Index: head/sys/cam/ata/ata_all.c =================================================================== --- head/sys/cam/ata/ata_all.c (revision 326264) +++ head/sys/cam/ata/ata_all.c (revision 326265) @@ -1,1212 +1,1214 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include "opt_scsi.h" #include #include #include #include #else #include #include #include #include #ifndef min #define min(a,b) (((a)<(b))?(a):(b)) #endif #endif #include #include #include #include #include #include #include #include int ata_version(int ver) { int bit; if (ver == 0xffff) return 0; for (bit = 15; bit >= 0; bit--) if (ver & (1<control & 0x04) return ("SOFT_RESET"); switch (cmd->command) { case 0x00: switch (cmd->features) { case 0x00: return ("NOP FLUSHQUEUE"); case 0x01: return ("NOP AUTOPOLL"); } return ("NOP"); case 0x03: return ("CFA_REQUEST_EXTENDED_ERROR"); case 0x06: switch (cmd->features) { case 0x01: return ("DSM TRIM"); } return "DSM"; case 0x08: return ("DEVICE_RESET"); case 0x0b: return ("REQUEST_SENSE_DATA_EXT"); case 0x20: return ("READ"); case 0x24: return ("READ48"); case 0x25: return ("READ_DMA48"); case 0x26: return ("READ_DMA_QUEUED48"); case 0x27: return ("READ_NATIVE_MAX_ADDRESS48"); case 0x29: return ("READ_MUL48"); case 0x2a: return ("READ_STREAM_DMA48"); case 0x2b: return ("READ_STREAM48"); case 0x2f: return ("READ_LOG_EXT"); case 0x30: return ("WRITE"); case 0x34: return ("WRITE48"); case 0x35: return ("WRITE_DMA48"); case 0x36: return ("WRITE_DMA_QUEUED48"); case 0x37: return ("SET_MAX_ADDRESS48"); case 0x39: return ("WRITE_MUL48"); case 0x3a: return ("WRITE_STREAM_DMA48"); case 0x3b: return ("WRITE_STREAM48"); case 0x3d: return ("WRITE_DMA_FUA48"); case 0x3e: return ("WRITE_DMA_QUEUED_FUA48"); case 0x3f: return ("WRITE_LOG_EXT"); case 0x40: return ("READ_VERIFY"); case 0x42: return ("READ_VERIFY48"); case 0x44: return ("ZERO_EXT"); case 0x45: switch (cmd->features) { case 0x55: return ("WRITE_UNCORRECTABLE48 PSEUDO"); case 0xaa: return ("WRITE_UNCORRECTABLE48 FLAGGED"); } return "WRITE_UNCORRECTABLE48"; case 0x47: return ("READ_LOG_DMA_EXT"); case 0x4a: return ("ZAC_MANAGEMENT_IN"); case 0x51: return ("CONFIGURE_STREAM"); case 0x57: return ("WRITE_LOG_DMA_EXT"); case 0x5b: return ("TRUSTED_NON_DATA"); case 0x5c: return ("TRUSTED_RECEIVE"); case 0x5d: return ("TRUSTED_RECEIVE_DMA"); case 0x5e: return ("TRUSTED_SEND"); case 0x5f: return ("TRUSTED_SEND_DMA"); case 0x60: return ("READ_FPDMA_QUEUED"); case 0x61: return ("WRITE_FPDMA_QUEUED"); case 0x63: switch (cmd->features & 0xf) { case 0x00: return ("NCQ_NON_DATA ABORT NCQ QUEUE"); case 0x01: return ("NCQ_NON_DATA DEADLINE HANDLING"); case 0x05: return ("NCQ_NON_DATA SET FEATURES"); /* * XXX KDM need common decoding between NCQ and non-NCQ * versions of SET FEATURES. */ case 0x06: return ("NCQ_NON_DATA ZERO EXT"); case 0x07: return ("NCQ_NON_DATA ZAC MANAGEMENT OUT"); } return ("NCQ_NON_DATA"); case 0x64: switch (cmd->sector_count_exp & 0xf) { case 0x00: return ("SEND_FPDMA_QUEUED DATA SET MANAGEMENT"); case 0x02: return ("SEND_FPDMA_QUEUED WRITE LOG DMA EXT"); case 0x03: return ("SEND_FPDMA_QUEUED ZAC MANAGEMENT OUT"); case 0x04: return ("SEND_FPDMA_QUEUED DATA SET MANAGEMENT XL"); } return ("SEND_FPDMA_QUEUED"); case 0x65: switch (cmd->sector_count_exp & 0xf) { case 0x01: return ("RECEIVE_FPDMA_QUEUED READ LOG DMA EXT"); case 0x02: return ("RECEIVE_FPDMA_QUEUED ZAC MANAGEMENT IN"); } return ("RECEIVE_FPDMA_QUEUED"); case 0x67: if (cmd->features == 0xec) return ("SEP_ATTN IDENTIFY"); switch (cmd->lba_low) { case 0x00: return ("SEP_ATTN READ BUFFER"); case 0x02: return ("SEP_ATTN RECEIVE DIAGNOSTIC RESULTS"); case 0x80: return ("SEP_ATTN WRITE BUFFER"); case 0x82: return ("SEP_ATTN SEND DIAGNOSTIC"); } return ("SEP_ATTN"); case 0x70: return ("SEEK"); case 0x77: return ("SET_DATE_TIME_EXT"); case 0x78: return ("ACCESSIBLE_MAX_ADDRESS_CONFIGURATION"); case 0x87: return ("CFA_TRANSLATE_SECTOR"); case 0x90: return ("EXECUTE_DEVICE_DIAGNOSTIC"); case 0x92: return ("DOWNLOAD_MICROCODE"); case 0x93: return ("DOWNLOAD_MICROCODE_DMA"); case 0x9a: return ("ZAC_MANAGEMENT_OUT"); case 0xa0: return ("PACKET"); case 0xa1: return ("ATAPI_IDENTIFY"); case 0xa2: return ("SERVICE"); case 0xb0: switch(cmd->features) { case 0xd0: return ("SMART READ ATTR VALUES"); case 0xd1: return ("SMART READ ATTR THRESHOLDS"); case 0xd3: return ("SMART SAVE ATTR VALUES"); case 0xd4: return ("SMART EXECUTE OFFLINE IMMEDIATE"); case 0xd5: return ("SMART READ LOG DATA"); case 0xd8: return ("SMART ENABLE OPERATION"); case 0xd9: return ("SMART DISABLE OPERATION"); case 0xda: return ("SMART RETURN STATUS"); } return ("SMART"); case 0xb1: return ("DEVICE CONFIGURATION"); case 0xb4: return ("SANITIZE_DEVICE"); case 0xc0: return ("CFA_ERASE"); case 0xc4: return ("READ_MUL"); case 0xc5: return ("WRITE_MUL"); case 0xc6: return ("SET_MULTI"); case 0xc7: return ("READ_DMA_QUEUED"); case 0xc8: return ("READ_DMA"); case 0xca: return ("WRITE_DMA"); case 0xcc: return ("WRITE_DMA_QUEUED"); case 0xcd: return ("CFA_WRITE_MULTIPLE_WITHOUT_ERASE"); case 0xce: return ("WRITE_MUL_FUA48"); case 0xd1: return ("CHECK_MEDIA_CARD_TYPE"); case 0xda: return ("GET_MEDIA_STATUS"); case 0xde: return ("MEDIA_LOCK"); case 0xdf: return ("MEDIA_UNLOCK"); case 0xe0: return ("STANDBY_IMMEDIATE"); case 0xe1: return ("IDLE_IMMEDIATE"); case 0xe2: return ("STANDBY"); case 0xe3: return ("IDLE"); case 0xe4: return ("READ_BUFFER/PM"); case 0xe5: return ("CHECK_POWER_MODE"); case 0xe6: return ("SLEEP"); case 0xe7: return ("FLUSHCACHE"); case 0xe8: return ("WRITE_PM"); case 0xea: return ("FLUSHCACHE48"); case 0xec: return ("ATA_IDENTIFY"); case 0xed: return ("MEDIA_EJECT"); case 0xef: /* * XXX KDM need common decoding between NCQ and non-NCQ * versions of SET FEATURES. */ switch (cmd->features) { case 0x02: return ("SETFEATURES ENABLE WCACHE"); case 0x03: return ("SETFEATURES SET TRANSFER MODE"); case 0x04: return ("SETFEATURES ENABLE APM"); case 0x06: return ("SETFEATURES ENABLE PUIS"); case 0x07: return ("SETFEATURES SPIN-UP"); case 0x0b: return ("SETFEATURES ENABLE WRITE READ VERIFY"); case 0x0c: return ("SETFEATURES ENABLE DEVICE LIFE CONTROL"); case 0x10: return ("SETFEATURES ENABLE SATA FEATURE"); case 0x41: return ("SETFEATURES ENABLE FREEFALL CONTROL"); case 0x43: return ("SETFEATURES SET MAX HOST INT SECT TIMES"); case 0x45: return ("SETFEATURES SET RATE BASIS"); case 0x4a: return ("SETFEATURES EXTENDED POWER CONDITIONS"); case 0x55: return ("SETFEATURES DISABLE RCACHE"); case 0x5d: return ("SETFEATURES ENABLE RELIRQ"); case 0x5e: return ("SETFEATURES ENABLE SRVIRQ"); case 0x62: return ("SETFEATURES LONG PHYS SECT ALIGN ERC"); case 0x63: return ("SETFEATURES DSN"); case 0x66: return ("SETFEATURES DISABLE DEFAULTS"); case 0x82: return ("SETFEATURES DISABLE WCACHE"); case 0x85: return ("SETFEATURES DISABLE APM"); case 0x86: return ("SETFEATURES DISABLE PUIS"); case 0x8b: return ("SETFEATURES DISABLE WRITE READ VERIFY"); case 0x8c: return ("SETFEATURES DISABLE DEVICE LIFE CONTROL"); case 0x90: return ("SETFEATURES DISABLE SATA FEATURE"); case 0xaa: return ("SETFEATURES ENABLE RCACHE"); case 0xC1: return ("SETFEATURES DISABLE FREEFALL CONTROL"); case 0xC3: return ("SETFEATURES SENSE DATA REPORTING"); case 0xC4: return ("SETFEATURES NCQ SENSE DATA RETURN"); case 0xCC: return ("SETFEATURES ENABLE DEFAULTS"); case 0xdd: return ("SETFEATURES DISABLE RELIRQ"); case 0xde: return ("SETFEATURES DISABLE SRVIRQ"); } return "SETFEATURES"; case 0xf1: return ("SECURITY_SET_PASSWORD"); case 0xf2: return ("SECURITY_UNLOCK"); case 0xf3: return ("SECURITY_ERASE_PREPARE"); case 0xf4: return ("SECURITY_ERASE_UNIT"); case 0xf5: return ("SECURITY_FREEZE_LOCK"); case 0xf6: return ("SECURITY_DISABLE_PASSWORD"); case 0xf8: return ("READ_NATIVE_MAX_ADDRESS"); case 0xf9: return ("SET_MAX_ADDRESS"); } return "UNKNOWN"; } char * ata_cmd_string(struct ata_cmd *cmd, char *cmd_string, size_t len) { struct sbuf sb; int error; if (len == 0) return (""); sbuf_new(&sb, cmd_string, len, SBUF_FIXEDLEN); ata_cmd_sbuf(cmd, &sb); error = sbuf_finish(&sb); if (error != 0 && error != ENOMEM) return (""); return(sbuf_data(&sb)); } void ata_cmd_sbuf(struct ata_cmd *cmd, struct sbuf *sb) { sbuf_printf(sb, "%02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x", cmd->command, cmd->features, cmd->lba_low, cmd->lba_mid, cmd->lba_high, cmd->device, cmd->lba_low_exp, cmd->lba_mid_exp, cmd->lba_high_exp, cmd->features_exp, cmd->sector_count, cmd->sector_count_exp); } char * ata_res_string(struct ata_res *res, char *res_string, size_t len) { struct sbuf sb; int error; if (len == 0) return (""); sbuf_new(&sb, res_string, len, SBUF_FIXEDLEN); ata_res_sbuf(res, &sb); error = sbuf_finish(&sb); if (error != 0 && error != ENOMEM) return (""); return(sbuf_data(&sb)); } int ata_res_sbuf(struct ata_res *res, struct sbuf *sb) { sbuf_printf(sb, "%02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x", res->status, res->error, res->lba_low, res->lba_mid, res->lba_high, res->device, res->lba_low_exp, res->lba_mid_exp, res->lba_high_exp, res->sector_count, res->sector_count_exp); return (0); } /* * ata_command_sbuf() returns 0 for success and -1 for failure. */ int ata_command_sbuf(struct ccb_ataio *ataio, struct sbuf *sb) { sbuf_printf(sb, "%s. ACB: ", ata_op_string(&ataio->cmd)); ata_cmd_sbuf(&ataio->cmd, sb); return(0); } /* * ata_status_abuf() returns 0 for success and -1 for failure. */ int ata_status_sbuf(struct ccb_ataio *ataio, struct sbuf *sb) { sbuf_printf(sb, "ATA status: %02x (%s%s%s%s%s%s%s%s)", ataio->res.status, (ataio->res.status & 0x80) ? "BSY " : "", (ataio->res.status & 0x40) ? "DRDY " : "", (ataio->res.status & 0x20) ? "DF " : "", (ataio->res.status & 0x10) ? "SERV " : "", (ataio->res.status & 0x08) ? "DRQ " : "", (ataio->res.status & 0x04) ? "CORR " : "", (ataio->res.status & 0x02) ? "IDX " : "", (ataio->res.status & 0x01) ? "ERR" : ""); if (ataio->res.status & 1) { sbuf_printf(sb, ", error: %02x (%s%s%s%s%s%s%s%s)", ataio->res.error, (ataio->res.error & 0x80) ? "ICRC " : "", (ataio->res.error & 0x40) ? "UNC " : "", (ataio->res.error & 0x20) ? "MC " : "", (ataio->res.error & 0x10) ? "IDNF " : "", (ataio->res.error & 0x08) ? "MCR " : "", (ataio->res.error & 0x04) ? "ABRT " : "", (ataio->res.error & 0x02) ? "NM " : "", (ataio->res.error & 0x01) ? "ILI" : ""); } return(0); } void ata_print_ident(struct ata_params *ident_data) { const char *proto; char ata[12], sata[12]; ata_print_ident_short(ident_data); proto = (ident_data->config == ATA_PROTO_CFA) ? "CFA" : (ident_data->config & ATA_PROTO_ATAPI) ? "ATAPI" : "ATA"; if (ata_version(ident_data->version_major) == 0) { snprintf(ata, sizeof(ata), "%s", proto); } else if (ata_version(ident_data->version_major) <= 7) { snprintf(ata, sizeof(ata), "%s-%d", proto, ata_version(ident_data->version_major)); } else if (ata_version(ident_data->version_major) == 8) { snprintf(ata, sizeof(ata), "%s8-ACS", proto); } else { snprintf(ata, sizeof(ata), "ACS-%d %s", ata_version(ident_data->version_major) - 7, proto); } if (ident_data->satacapabilities && ident_data->satacapabilities != 0xffff) { if (ident_data->satacapabilities & ATA_SATA_GEN3) snprintf(sata, sizeof(sata), " SATA 3.x"); else if (ident_data->satacapabilities & ATA_SATA_GEN2) snprintf(sata, sizeof(sata), " SATA 2.x"); else if (ident_data->satacapabilities & ATA_SATA_GEN1) snprintf(sata, sizeof(sata), " SATA 1.x"); else snprintf(sata, sizeof(sata), " SATA"); } else sata[0] = 0; printf(" %s%s device\n", ata, sata); } void ata_print_ident_sbuf(struct ata_params *ident_data, struct sbuf *sb) { const char *proto, *sata; int version; ata_print_ident_short_sbuf(ident_data, sb); sbuf_printf(sb, " "); proto = (ident_data->config == ATA_PROTO_CFA) ? "CFA" : (ident_data->config & ATA_PROTO_ATAPI) ? "ATAPI" : "ATA"; version = ata_version(ident_data->version_major); switch (version) { case 0: sbuf_printf(sb, "%s", proto); break; case 1: case 2: case 3: case 4: case 5: case 6: case 7: sbuf_printf(sb, "%s-%d", proto, version); break; case 8: sbuf_printf(sb, "%s8-ACS", proto); break; default: sbuf_printf(sb, "ACS-%d %s", version - 7, proto); break; } if (ident_data->satacapabilities && ident_data->satacapabilities != 0xffff) { if (ident_data->satacapabilities & ATA_SATA_GEN3) sata = " SATA 3.x"; else if (ident_data->satacapabilities & ATA_SATA_GEN2) sata = " SATA 2.x"; else if (ident_data->satacapabilities & ATA_SATA_GEN1) sata = " SATA 1.x"; else sata = " SATA"; } else sata = ""; sbuf_printf(sb, "%s device\n", sata); } void ata_print_ident_short(struct ata_params *ident_data) { char product[48], revision[16]; cam_strvis(product, ident_data->model, sizeof(ident_data->model), sizeof(product)); cam_strvis(revision, ident_data->revision, sizeof(ident_data->revision), sizeof(revision)); printf("<%s %s>", product, revision); } void ata_print_ident_short_sbuf(struct ata_params *ident_data, struct sbuf *sb) { sbuf_printf(sb, "<"); cam_strvis_sbuf(sb, ident_data->model, sizeof(ident_data->model), 0); sbuf_printf(sb, " "); cam_strvis_sbuf(sb, ident_data->revision, sizeof(ident_data->revision), 0); sbuf_printf(sb, ">"); } void semb_print_ident(struct sep_identify_data *ident_data) { char in[7], ins[5]; semb_print_ident_short(ident_data); cam_strvis(in, ident_data->interface_id, 6, sizeof(in)); cam_strvis(ins, ident_data->interface_rev, 4, sizeof(ins)); printf(" SEMB %s %s device\n", in, ins); } void semb_print_ident_sbuf(struct sep_identify_data *ident_data, struct sbuf *sb) { semb_print_ident_short_sbuf(ident_data, sb); sbuf_printf(sb, " SEMB "); cam_strvis_sbuf(sb, ident_data->interface_id, 6, 0); sbuf_printf(sb, " "); cam_strvis_sbuf(sb, ident_data->interface_rev, 4, 0); sbuf_printf(sb, " device\n"); } void semb_print_ident_short(struct sep_identify_data *ident_data) { char vendor[9], product[17], revision[5], fw[5]; cam_strvis(vendor, ident_data->vendor_id, 8, sizeof(vendor)); cam_strvis(product, ident_data->product_id, 16, sizeof(product)); cam_strvis(revision, ident_data->product_rev, 4, sizeof(revision)); cam_strvis(fw, ident_data->firmware_rev, 4, sizeof(fw)); printf("<%s %s %s %s>", vendor, product, revision, fw); } void semb_print_ident_short_sbuf(struct sep_identify_data *ident_data, struct sbuf *sb) { sbuf_printf(sb, "<"); cam_strvis_sbuf(sb, ident_data->vendor_id, 8, 0); sbuf_printf(sb, " "); cam_strvis_sbuf(sb, ident_data->product_id, 16, 0); sbuf_printf(sb, " "); cam_strvis_sbuf(sb, ident_data->product_rev, 4, 0); sbuf_printf(sb, " "); cam_strvis_sbuf(sb, ident_data->firmware_rev, 4, 0); sbuf_printf(sb, ">"); } uint32_t ata_logical_sector_size(struct ata_params *ident_data) { if ((ident_data->pss & ATA_PSS_VALID_MASK) == ATA_PSS_VALID_VALUE && (ident_data->pss & ATA_PSS_LSSABOVE512)) { return (((u_int32_t)ident_data->lss_1 | ((u_int32_t)ident_data->lss_2 << 16)) * 2); } return (512); } uint64_t ata_physical_sector_size(struct ata_params *ident_data) { if ((ident_data->pss & ATA_PSS_VALID_MASK) == ATA_PSS_VALID_VALUE) { if (ident_data->pss & ATA_PSS_MULTLS) { return ((uint64_t)ata_logical_sector_size(ident_data) * (1 << (ident_data->pss & ATA_PSS_LSPPS))); } else { return (uint64_t)ata_logical_sector_size(ident_data); } } return (512); } uint64_t ata_logical_sector_offset(struct ata_params *ident_data) { if ((ident_data->lsalign & 0xc000) == 0x4000) { return ((uint64_t)ata_logical_sector_size(ident_data) * (ident_data->lsalign & 0x3fff)); } return (0); } void ata_28bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint8_t features, uint32_t lba, uint8_t sector_count) { bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = 0; if (cmd == ATA_READ_DMA || cmd == ATA_READ_DMA_QUEUED || cmd == ATA_WRITE_DMA || cmd == ATA_WRITE_DMA_QUEUED) ataio->cmd.flags |= CAM_ATAIO_DMA; ataio->cmd.command = cmd; ataio->cmd.features = features; ataio->cmd.lba_low = lba; ataio->cmd.lba_mid = lba >> 8; ataio->cmd.lba_high = lba >> 16; ataio->cmd.device = ATA_DEV_LBA | ((lba >> 24) & 0x0f); ataio->cmd.sector_count = sector_count; } void ata_48bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint16_t features, uint64_t lba, uint16_t sector_count) { ataio->cmd.flags = CAM_ATAIO_48BIT; if (cmd == ATA_READ_DMA48 || cmd == ATA_READ_DMA_QUEUED48 || cmd == ATA_READ_STREAM_DMA48 || cmd == ATA_WRITE_DMA48 || cmd == ATA_WRITE_DMA_FUA48 || cmd == ATA_WRITE_DMA_QUEUED48 || cmd == ATA_WRITE_DMA_QUEUED_FUA48 || cmd == ATA_WRITE_STREAM_DMA48 || cmd == ATA_DATA_SET_MANAGEMENT || cmd == ATA_READ_LOG_DMA_EXT) ataio->cmd.flags |= CAM_ATAIO_DMA; ataio->cmd.command = cmd; ataio->cmd.features = features; ataio->cmd.lba_low = lba; ataio->cmd.lba_mid = lba >> 8; ataio->cmd.lba_high = lba >> 16; ataio->cmd.device = ATA_DEV_LBA; ataio->cmd.lba_low_exp = lba >> 24; ataio->cmd.lba_mid_exp = lba >> 32; ataio->cmd.lba_high_exp = lba >> 40; ataio->cmd.features_exp = features >> 8; ataio->cmd.sector_count = sector_count; ataio->cmd.sector_count_exp = sector_count >> 8; ataio->cmd.control = 0; } void ata_ncq_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint64_t lba, uint16_t sector_count) { ataio->cmd.flags = CAM_ATAIO_48BIT | CAM_ATAIO_FPDMA; ataio->cmd.command = cmd; ataio->cmd.features = sector_count; ataio->cmd.lba_low = lba; ataio->cmd.lba_mid = lba >> 8; ataio->cmd.lba_high = lba >> 16; ataio->cmd.device = ATA_DEV_LBA; ataio->cmd.lba_low_exp = lba >> 24; ataio->cmd.lba_mid_exp = lba >> 32; ataio->cmd.lba_high_exp = lba >> 40; ataio->cmd.features_exp = sector_count >> 8; ataio->cmd.sector_count = 0; ataio->cmd.sector_count_exp = 0; ataio->cmd.control = 0; } void ata_reset_cmd(struct ccb_ataio *ataio) { bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT; ataio->cmd.control = 0x04; } void ata_pm_read_cmd(struct ccb_ataio *ataio, int reg, int port) { bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_NEEDRESULT; ataio->cmd.command = ATA_READ_PM; ataio->cmd.features = reg; ataio->cmd.device = port & 0x0f; } void ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint32_t val) { bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = 0; ataio->cmd.command = ATA_WRITE_PM; ataio->cmd.features = reg; ataio->cmd.sector_count = val; ataio->cmd.lba_low = val >> 8; ataio->cmd.lba_mid = val >> 16; ataio->cmd.lba_high = val >> 24; ataio->cmd.device = port & 0x0f; } void ata_read_log(struct ccb_ataio *ataio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t log_address, uint32_t page_number, uint16_t block_count, uint32_t protocol, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout) { uint64_t lba; cam_fill_ataio(ataio, /*retries*/ 1, /*cbfcnp*/ cbfcnp, /*flags*/ CAM_DIR_IN, /*tag_action*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ timeout); lba = (((uint64_t)page_number & 0xff00) << 32) | ((page_number & 0x00ff) << 8) | (log_address & 0xff); ata_48bit_cmd(ataio, /*cmd*/ (protocol & CAM_ATAIO_DMA) ? ATA_READ_LOG_DMA_EXT : ATA_READ_LOG_EXT, /*features*/ 0, /*lba*/ lba, /*sector_count*/ block_count); } void ata_bswap(int8_t *buf, int len) { u_int16_t *ptr = (u_int16_t*)(buf + len); while (--ptr >= (u_int16_t*)buf) *ptr = be16toh(*ptr); } void ata_btrim(int8_t *buf, int len) { int8_t *ptr; for (ptr = buf; ptr < buf+len; ++ptr) if (!*ptr || *ptr == '_') *ptr = ' '; for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr) *ptr = 0; } void ata_bpack(int8_t *src, int8_t *dst, int len) { int i, j, blank; for (i = j = blank = 0 ; i < len; i++) { if (blank && src[i] == ' ') continue; if (blank && src[i] != ' ') { dst[j++] = src[i]; blank = 0; continue; } if (src[i] == ' ') { blank = 1; if (i == 0) continue; } dst[j++] = src[i]; } while (j < len) dst[j++] = 0x00; } int ata_max_pmode(struct ata_params *ap) { if (ap->atavalid & ATA_FLAG_64_70) { if (ap->apiomodes & 0x02) return ATA_PIO4; if (ap->apiomodes & 0x01) return ATA_PIO3; } if (ap->mwdmamodes & 0x04) return ATA_PIO4; if (ap->mwdmamodes & 0x02) return ATA_PIO3; if (ap->mwdmamodes & 0x01) return ATA_PIO2; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200) return ATA_PIO2; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100) return ATA_PIO1; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000) return ATA_PIO0; return ATA_PIO0; } int ata_max_wmode(struct ata_params *ap) { if (ap->mwdmamodes & 0x04) return ATA_WDMA2; if (ap->mwdmamodes & 0x02) return ATA_WDMA1; if (ap->mwdmamodes & 0x01) return ATA_WDMA0; return -1; } int ata_max_umode(struct ata_params *ap) { if (ap->atavalid & ATA_FLAG_88) { if (ap->udmamodes & 0x40) return ATA_UDMA6; if (ap->udmamodes & 0x20) return ATA_UDMA5; if (ap->udmamodes & 0x10) return ATA_UDMA4; if (ap->udmamodes & 0x08) return ATA_UDMA3; if (ap->udmamodes & 0x04) return ATA_UDMA2; if (ap->udmamodes & 0x02) return ATA_UDMA1; if (ap->udmamodes & 0x01) return ATA_UDMA0; } return -1; } int ata_max_mode(struct ata_params *ap, int maxmode) { if (maxmode == 0) maxmode = ATA_DMA_MAX; if (maxmode >= ATA_UDMA0 && ata_max_umode(ap) > 0) return (min(maxmode, ata_max_umode(ap))); if (maxmode >= ATA_WDMA0 && ata_max_wmode(ap) > 0) return (min(maxmode, ata_max_wmode(ap))); return (min(maxmode, ata_max_pmode(ap))); } char * ata_mode2string(int mode) { switch (mode) { case -1: return "UNSUPPORTED"; case 0: return "NONE"; case ATA_PIO0: return "PIO0"; case ATA_PIO1: return "PIO1"; case ATA_PIO2: return "PIO2"; case ATA_PIO3: return "PIO3"; case ATA_PIO4: return "PIO4"; case ATA_WDMA0: return "WDMA0"; case ATA_WDMA1: return "WDMA1"; case ATA_WDMA2: return "WDMA2"; case ATA_UDMA0: return "UDMA0"; case ATA_UDMA1: return "UDMA1"; case ATA_UDMA2: return "UDMA2"; case ATA_UDMA3: return "UDMA3"; case ATA_UDMA4: return "UDMA4"; case ATA_UDMA5: return "UDMA5"; case ATA_UDMA6: return "UDMA6"; default: if (mode & ATA_DMA_MASK) return "BIOSDMA"; else return "BIOSPIO"; } } int ata_string2mode(char *str) { if (!strcasecmp(str, "PIO0")) return (ATA_PIO0); if (!strcasecmp(str, "PIO1")) return (ATA_PIO1); if (!strcasecmp(str, "PIO2")) return (ATA_PIO2); if (!strcasecmp(str, "PIO3")) return (ATA_PIO3); if (!strcasecmp(str, "PIO4")) return (ATA_PIO4); if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0); if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1); if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2); if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0); if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0); if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1); if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1); if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2); if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2); if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3); if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3); if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4); if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4); if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5); if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5); if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6); if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6); return (-1); } u_int ata_mode2speed(int mode) { switch (mode) { case ATA_PIO0: default: return (3300); case ATA_PIO1: return (5200); case ATA_PIO2: return (8300); case ATA_PIO3: return (11100); case ATA_PIO4: return (16700); case ATA_WDMA0: return (4200); case ATA_WDMA1: return (13300); case ATA_WDMA2: return (16700); case ATA_UDMA0: return (16700); case ATA_UDMA1: return (25000); case ATA_UDMA2: return (33300); case ATA_UDMA3: return (44400); case ATA_UDMA4: return (66700); case ATA_UDMA5: return (100000); case ATA_UDMA6: return (133000); } } u_int ata_revision2speed(int revision) { switch (revision) { case 1: default: return (150000); case 2: return (300000); case 3: return (600000); } } int ata_speed2revision(u_int speed) { switch (speed) { case 0: return (0); case 150000: return (1); case 300000: return (2); case 600000: return (3); default: return (-1); } } int ata_identify_match(caddr_t identbuffer, caddr_t table_entry) { struct scsi_inquiry_pattern *entry; struct ata_params *ident; entry = (struct scsi_inquiry_pattern *)table_entry; ident = (struct ata_params *)identbuffer; if ((cam_strmatch(ident->model, entry->product, sizeof(ident->model)) == 0) && (cam_strmatch(ident->revision, entry->revision, sizeof(ident->revision)) == 0)) { return (0); } return (-1); } int ata_static_identify_match(caddr_t identbuffer, caddr_t table_entry) { struct scsi_static_inquiry_pattern *entry; struct ata_params *ident; entry = (struct scsi_static_inquiry_pattern *)table_entry; ident = (struct ata_params *)identbuffer; if ((cam_strmatch(ident->model, entry->product, sizeof(ident->model)) == 0) && (cam_strmatch(ident->revision, entry->revision, sizeof(ident->revision)) == 0)) { return (0); } return (-1); } void semb_receive_diagnostic_results(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int pcv, uint8_t page_code, uint8_t *data_ptr, uint16_t length, uint32_t timeout) { length = min(length, 1020); length = (length + 3) & ~3; cam_fill_ataio(ataio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, length, timeout); ata_28bit_cmd(ataio, ATA_SEP_ATTN, pcv ? page_code : 0, 0x02, length / 4); } void semb_send_diagnostic(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t *data_ptr, uint16_t length, uint32_t timeout) { length = min(length, 1020); length = (length + 3) & ~3; cam_fill_ataio(ataio, retries, cbfcnp, /*flags*/length ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, length, timeout); ata_28bit_cmd(ataio, ATA_SEP_ATTN, length > 0 ? data_ptr[0] : 0, 0x82, length / 4); } void semb_read_buffer(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, uint8_t page_code, uint8_t *data_ptr, uint16_t length, uint32_t timeout) { length = min(length, 1020); length = (length + 3) & ~3; cam_fill_ataio(ataio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, length, timeout); ata_28bit_cmd(ataio, ATA_SEP_ATTN, page_code, 0x00, length / 4); } void semb_write_buffer(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t *data_ptr, uint16_t length, uint32_t timeout) { length = min(length, 1020); length = (length + 3) & ~3; cam_fill_ataio(ataio, retries, cbfcnp, /*flags*/length ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, length, timeout); ata_28bit_cmd(ataio, ATA_SEP_ATTN, length > 0 ? data_ptr[0] : 0, 0x80, length / 4); } void ata_zac_mgmt_out(struct ccb_ataio *ataio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint16_t sector_count, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout) { uint8_t command_out, ata_flags; uint16_t features_out, sectors_out; uint32_t auxiliary; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_OUT; features_out = (zm_action & 0xf) | (zone_flags << 8); if (dxfer_len == 0) { ata_flags = 0; sectors_out = 0; } else { ata_flags = CAM_ATAIO_DMA; /* XXX KDM use sector count? */ sectors_out = ((dxfer_len >> 9) & 0xffff); } auxiliary = 0; } else { if (dxfer_len == 0) { command_out = ATA_NCQ_NON_DATA; features_out = ATA_NCQ_ZAC_MGMT_OUT; sectors_out = 0; } else { command_out = ATA_SEND_FPDMA_QUEUED; /* Note that we're defaulting to normal priority */ sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; /* * For SEND FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else { /* * Yes, the caller can theoretically send a * transfer larger than we can handle. * Anyone using this function needs enough * knowledge to avoid doing that. */ features_out = ((dxfer_len >> 9) & 0xffff); } } auxiliary = (zm_action & 0xf) | (zone_flags << 8); ata_flags = CAM_ATAIO_FPDMA; } cam_fill_ataio(ataio, /*retries*/ retries, /*cbfcnp*/ cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, /*tag_action*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ timeout); ata_48bit_cmd(ataio, /*cmd*/ command_out, /*features*/ features_out, /*lba*/ zone_id, /*sector_count*/ sectors_out); ataio->cmd.flags |= ata_flags; if (auxiliary != 0) { ataio->ata_flags |= ATA_FLAG_AUX; ataio->aux = auxiliary; } } void ata_zac_mgmt_in(struct ccb_ataio *ataio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout) { uint8_t command_out, ata_flags; uint16_t features_out, sectors_out; uint32_t auxiliary; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_IN; /* XXX KDM put a macro here */ features_out = (zm_action & 0xf) | (zone_flags << 8); ata_flags = CAM_ATAIO_DMA; sectors_out = ((dxfer_len >> 9) & 0xffff); auxiliary = 0; } else { command_out = ATA_RECV_FPDMA_QUEUED; sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; auxiliary = (zm_action & 0xf) | (zone_flags << 8); ata_flags = CAM_ATAIO_FPDMA; /* * For RECEIVE FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it is unlikely we will see a transfer that * large. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else { /* * Yes, the caller can theoretically request a * transfer larger than we can handle. * Anyone using this function needs enough * knowledge to avoid doing that. */ features_out = ((dxfer_len >> 9) & 0xffff); } } cam_fill_ataio(ataio, /*retries*/ retries, /*cbfcnp*/ cbfcnp, /*flags*/ CAM_DIR_IN, /*tag_action*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ timeout); ata_48bit_cmd(ataio, /*cmd*/ command_out, /*features*/ features_out, /*lba*/ zone_id, /*sector_count*/ sectors_out); ataio->cmd.flags |= ata_flags; if (auxiliary != 0) { ataio->ata_flags |= ATA_FLAG_AUX; ataio->aux = auxiliary; } } Index: head/sys/cam/ata/ata_all.h =================================================================== --- head/sys/cam/ata/ata_all.h (revision 326264) +++ head/sys/cam/ata/ata_all.h (revision 326265) @@ -1,191 +1,193 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef CAM_ATA_ALL_H #define CAM_ATA_ALL_H 1 #include struct ccb_ataio; struct cam_periph; union ccb; #define SID_DMA48 0x01 /* Abuse inq_flags bit to track enabled DMA48. */ #define SID_AEN 0x04 /* Abuse inq_flags bit to track enabled AEN. */ #define SID_DMA 0x10 /* Abuse inq_flags bit to track enabled DMA. */ struct ata_cmd { u_int8_t flags; /* ATA command flags */ #define CAM_ATAIO_48BIT 0x01 /* Command has 48-bit format */ #define CAM_ATAIO_FPDMA 0x02 /* FPDMA command */ #define CAM_ATAIO_CONTROL 0x04 /* Control, not a command */ #define CAM_ATAIO_NEEDRESULT 0x08 /* Request requires result. */ #define CAM_ATAIO_DMA 0x10 /* DMA command */ u_int8_t command; u_int8_t features; u_int8_t lba_low; u_int8_t lba_mid; u_int8_t lba_high; u_int8_t device; u_int8_t lba_low_exp; u_int8_t lba_mid_exp; u_int8_t lba_high_exp; u_int8_t features_exp; u_int8_t sector_count; u_int8_t sector_count_exp; u_int8_t control; }; struct ata_res { u_int8_t flags; /* ATA command flags */ #define CAM_ATAIO_48BIT 0x01 /* Command has 48-bit format */ u_int8_t status; u_int8_t error; u_int8_t lba_low; u_int8_t lba_mid; u_int8_t lba_high; u_int8_t device; u_int8_t lba_low_exp; u_int8_t lba_mid_exp; u_int8_t lba_high_exp; u_int8_t sector_count; u_int8_t sector_count_exp; }; struct sep_identify_data { uint8_t length; /* Enclosure descriptor length */ uint8_t subenc_id; /* Sub-enclosure identifier */ uint8_t logical_id[8]; /* Enclosure logical identifier (WWN) */ uint8_t vendor_id[8]; /* Vendor identification string */ uint8_t product_id[16]; /* Product identification string */ uint8_t product_rev[4]; /* Product revision string */ uint8_t channel_id; /* Channel identifier */ uint8_t firmware_rev[4];/* Firmware revision */ uint8_t interface_id[6];/* Interface spec ("S-E-S "/"SAF-TE")*/ uint8_t interface_rev[4];/* Interface spec revision */ uint8_t vend_spec[11]; /* Vendor specific information */ }; int ata_version(int ver); char * ata_op_string(struct ata_cmd *cmd); char * ata_cmd_string(struct ata_cmd *cmd, char *cmd_string, size_t len); void ata_cmd_sbuf(struct ata_cmd *cmd, struct sbuf *sb); char * ata_res_string(struct ata_res *res, char *res_string, size_t len); int ata_command_sbuf(struct ccb_ataio *ataio, struct sbuf *sb); int ata_status_sbuf(struct ccb_ataio *ataio, struct sbuf *sb); int ata_res_sbuf(struct ata_res *res, struct sbuf *sb); void ata_print_ident(struct ata_params *ident_data); void ata_print_ident_sbuf(struct ata_params *ident_data, struct sbuf *sb); void ata_print_ident_short(struct ata_params *ident_data); void ata_print_ident_short_sbuf(struct ata_params *ident_data, struct sbuf *sb); uint32_t ata_logical_sector_size(struct ata_params *ident_data); uint64_t ata_physical_sector_size(struct ata_params *ident_data); uint64_t ata_logical_sector_offset(struct ata_params *ident_data); void ata_28bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint8_t features, uint32_t lba, uint8_t sector_count); void ata_48bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint16_t features, uint64_t lba, uint16_t sector_count); void ata_ncq_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint64_t lba, uint16_t sector_count); void ata_reset_cmd(struct ccb_ataio *ataio); void ata_pm_read_cmd(struct ccb_ataio *ataio, int reg, int port); void ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint32_t val); void ata_read_log(struct ccb_ataio *ataio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t log_address, uint32_t page_number, uint16_t block_count, uint32_t protocol, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout); void ata_bswap(int8_t *buf, int len); void ata_btrim(int8_t *buf, int len); void ata_bpack(int8_t *src, int8_t *dst, int len); int ata_max_pmode(struct ata_params *ap); int ata_max_wmode(struct ata_params *ap); int ata_max_umode(struct ata_params *ap); int ata_max_mode(struct ata_params *ap, int maxmode); char * ata_mode2string(int mode); int ata_string2mode(char *str); u_int ata_mode2speed(int mode); u_int ata_revision2speed(int revision); int ata_speed2revision(u_int speed); int ata_identify_match(caddr_t identbuffer, caddr_t table_entry); int ata_static_identify_match(caddr_t identbuffer, caddr_t table_entry); void semb_print_ident(struct sep_identify_data *ident_data); void semb_print_ident_sbuf(struct sep_identify_data *ident_data, struct sbuf *sb); void semb_print_ident_short(struct sep_identify_data *ident_data); void semb_print_ident_short_sbuf(struct sep_identify_data *ident_data, struct sbuf *sb); void semb_receive_diagnostic_results(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int pcv, uint8_t page_code, uint8_t *data_ptr, uint16_t allocation_length, uint32_t timeout); void semb_send_diagnostic(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t *data_ptr, uint16_t param_list_length, uint32_t timeout); void semb_read_buffer(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, uint8_t page_code, uint8_t *data_ptr, uint16_t allocation_length, uint32_t timeout); void semb_write_buffer(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t *data_ptr, uint16_t param_list_length, uint32_t timeout); void ata_zac_mgmt_out(struct ccb_ataio *ataio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), int use_ncq __unused, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint16_t sector_count, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout); void ata_zac_mgmt_in(struct ccb_ataio *ataio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), int use_ncq __unused, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout); #endif Index: head/sys/cam/ata/ata_da.c =================================================================== --- head/sys/cam/ata/ata_da.c (revision 326264) +++ head/sys/cam/ata/ata_da.c (revision 326265) @@ -1,3620 +1,3622 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ada.h" #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #include #include /* geometry translation */ #ifdef _KERNEL #define ATA_MAX_28BIT_LBA 268435455UL extern int iosched_debug; typedef enum { ADA_STATE_RAHEAD, ADA_STATE_WCACHE, ADA_STATE_LOGDIR, ADA_STATE_IDDIR, ADA_STATE_SUP_CAP, ADA_STATE_ZONE, ADA_STATE_NORMAL } ada_state; typedef enum { ADA_FLAG_CAN_48BIT = 0x00000002, ADA_FLAG_CAN_FLUSHCACHE = 0x00000004, ADA_FLAG_CAN_NCQ = 0x00000008, ADA_FLAG_CAN_DMA = 0x00000010, ADA_FLAG_NEED_OTAG = 0x00000020, ADA_FLAG_WAS_OTAG = 0x00000040, ADA_FLAG_CAN_TRIM = 0x00000080, ADA_FLAG_OPEN = 0x00000100, ADA_FLAG_SCTX_INIT = 0x00000200, ADA_FLAG_CAN_CFA = 0x00000400, ADA_FLAG_CAN_POWERMGT = 0x00000800, ADA_FLAG_CAN_DMA48 = 0x00001000, ADA_FLAG_CAN_LOG = 0x00002000, ADA_FLAG_CAN_IDLOG = 0x00004000, ADA_FLAG_CAN_SUPCAP = 0x00008000, ADA_FLAG_CAN_ZONE = 0x00010000, ADA_FLAG_CAN_WCACHE = 0x00020000, ADA_FLAG_CAN_RAHEAD = 0x00040000, ADA_FLAG_PROBED = 0x00080000, ADA_FLAG_ANNOUNCED = 0x00100000, ADA_FLAG_DIRTY = 0x00200000, ADA_FLAG_CAN_NCQ_TRIM = 0x00400000, /* CAN_TRIM also set */ ADA_FLAG_PIM_ATA_EXT = 0x00800000 } ada_flags; typedef enum { ADA_Q_NONE = 0x00, ADA_Q_4K = 0x01, ADA_Q_NCQ_TRIM_BROKEN = 0x02, ADA_Q_LOG_BROKEN = 0x04, ADA_Q_SMR_DM = 0x08 } ada_quirks; #define ADA_Q_BIT_STRING \ "\020" \ "\0014K" \ "\002NCQ_TRIM_BROKEN" \ "\003LOG_BROKEN" \ "\004SMR_DM" typedef enum { ADA_CCB_RAHEAD = 0x01, ADA_CCB_WCACHE = 0x02, ADA_CCB_BUFFER_IO = 0x03, ADA_CCB_DUMP = 0x05, ADA_CCB_TRIM = 0x06, ADA_CCB_LOGDIR = 0x07, ADA_CCB_IDDIR = 0x08, ADA_CCB_SUP_CAP = 0x09, ADA_CCB_ZONE = 0x0a, ADA_CCB_TYPE_MASK = 0x0F, } ada_ccb_state; typedef enum { ADA_ZONE_NONE = 0x00, ADA_ZONE_DRIVE_MANAGED = 0x01, ADA_ZONE_HOST_AWARE = 0x02, ADA_ZONE_HOST_MANAGED = 0x03 } ada_zone_mode; typedef enum { ADA_ZONE_FLAG_RZ_SUP = 0x0001, ADA_ZONE_FLAG_OPEN_SUP = 0x0002, ADA_ZONE_FLAG_CLOSE_SUP = 0x0004, ADA_ZONE_FLAG_FINISH_SUP = 0x0008, ADA_ZONE_FLAG_RWP_SUP = 0x0010, ADA_ZONE_FLAG_SUP_MASK = (ADA_ZONE_FLAG_RZ_SUP | ADA_ZONE_FLAG_OPEN_SUP | ADA_ZONE_FLAG_CLOSE_SUP | ADA_ZONE_FLAG_FINISH_SUP | ADA_ZONE_FLAG_RWP_SUP), ADA_ZONE_FLAG_URSWRZ = 0x0020, ADA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, ADA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, ADA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, ADA_ZONE_FLAG_SET_MASK = (ADA_ZONE_FLAG_OPT_SEQ_SET | ADA_ZONE_FLAG_OPT_NONSEQ_SET | ADA_ZONE_FLAG_MAX_SEQ_SET) } ada_zone_flags; static struct ada_zone_desc { ada_zone_flags value; const char *desc; } ada_zone_desc_table[] = { {ADA_ZONE_FLAG_RZ_SUP, "Report Zones" }, {ADA_ZONE_FLAG_OPEN_SUP, "Open" }, {ADA_ZONE_FLAG_CLOSE_SUP, "Close" }, {ADA_ZONE_FLAG_FINISH_SUP, "Finish" }, {ADA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, }; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 typedef enum { ADA_DELETE_NONE, ADA_DELETE_DISABLE, ADA_DELETE_CFA_ERASE, ADA_DELETE_DSM_TRIM, ADA_DELETE_NCQ_DSM_TRIM, ADA_DELETE_MIN = ADA_DELETE_CFA_ERASE, ADA_DELETE_MAX = ADA_DELETE_NCQ_DSM_TRIM, } ada_delete_methods; static const char *ada_delete_method_names[] = { "NONE", "DISABLE", "CFA_ERASE", "DSM_TRIM", "NCQ_DSM_TRIM" }; #if 0 static const char *ada_delete_method_desc[] = { "NONE", "DISABLED", "CFA Erase", "DSM Trim", "DSM Trim via NCQ" }; #endif struct disk_params { u_int8_t heads; u_int8_t secs_per_track; u_int32_t cylinders; u_int32_t secsize; /* Number of bytes/logical sector */ u_int64_t sectors; /* Total number sectors */ }; #define TRIM_MAX_BLOCKS 8 #define TRIM_MAX_RANGES (TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES) struct trim_request { uint8_t data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE]; TAILQ_HEAD(, bio) bps; }; struct ada_softc { struct cam_iosched_softc *cam_iosched; int outstanding_cmds; /* Number of active commands */ int refcount; /* Active xpt_action() calls */ ada_state state; ada_flags flags; ada_zone_mode zone_mode; ada_zone_flags zone_flags; struct ata_gp_log_dir ata_logdir; int valid_logdir_len; struct ata_identify_log_pages ata_iddir; int valid_iddir_len; uint64_t optimal_seq_zones; uint64_t optimal_nonseq_zones; uint64_t max_seq_zones; ada_quirks quirks; ada_delete_methods delete_method; int trim_max_ranges; int read_ahead; int write_cache; int unmappedio; int rotating; #ifdef ADA_TEST_FAILURE int force_read_error; int force_write_error; int periodic_read_error; int periodic_read_count; #endif struct disk_params params; struct disk *disk; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; struct trim_request trim_req; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int timeouts; u_int errors; u_int invalidations; #endif #define ADA_ANNOUNCETMP_SZ 80 char announce_temp[ADA_ANNOUNCETMP_SZ]; #define ADA_ANNOUNCE_SZ 400 char announce_buffer[ADA_ANNOUNCE_SZ]; }; struct ada_quirk_entry { struct scsi_inquiry_pattern inq_pat; ada_quirks quirks; }; static struct ada_quirk_entry ada_quirk_table[] = { { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" }, /*quirks*/ADA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD155UI*", "*" }, /*quirks*/ADA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???DM*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DM*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640424AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????CX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green/Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????CX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????EX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" }, /*quirks*/ADA_Q_4K }, /* SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair CSSD-F*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force 3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force G*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "M4-CT???M4SSD2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Crucial M500 SSDs MU07 firmware * NCQ Trim works */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "MU07" }, /*quirks*/0 }, { /* * Crucial M500 SSDs all other firmware * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial M550 SSDs * NCQ Trim doesn't work, but only on MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M550*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial MX100 SSDs * NCQ Trim doesn't work, but only on MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*MX100*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/ADA_Q_4K }, { /* * FCCT M500 SSDs * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "FCCT*M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2CW*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2CT*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2MH*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BW*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel S3610 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BX*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2M*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SE100S3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SH103S3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "MARVELL SD88SA02*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Micron M500 SSDs firmware MU07 * NCQ Trim works? */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "MU07" }, /*quirks*/0 }, { /* * Micron M500 SSDs all other firmware * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Micron M5[15]0 SSDs * NCQ Trim doesn't work, but only MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M5[15]0*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Micron 5100 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron 5100 MTFDDAK*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "DENRSTE251M45*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ?VERTEX2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX4*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 750 SSDs * 4k optimised, NCQ TRIM seems to work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 750*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 840 SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 840*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 845 SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 845*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 850 SSDs * 4k optimised, NCQ TRIM broken (normal TRIM fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 850*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised, NCQ believed to be working */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7KM*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * 4k optimised, NCQ believed to be broken since these are * appear to be built with the same controllers as the 840/850. */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Same as for SAMSUNG MZ7* but enable the quirks for SSD * starting with MZ7* too */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "MZ7*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung PM851 Series SSDs Dell OEM * device model "SAMSUNG SSD PM851 mSATA 256GB" * 4k optimised, NCQ broken */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD PM851*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "FTM??CT25H*", "*" }, /*quirks*/ADA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SG9XCS2D*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung drive that doesn't support READ LOG EXT or * READ LOG DMA EXT, despite reporting that it does in * ATA identify data: * SAMSUNG HD200HJ KF100-06 */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD200*", "*" }, /*quirks*/ADA_Q_LOG_BROKEN }, { /* * Samsung drive that doesn't support READ LOG EXT or * READ LOG DMA EXT, despite reporting that it does in * ATA identify data: * SAMSUNG HD501LJ CR100-10 */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD501*", "*" }, /*quirks*/ADA_Q_LOG_BROKEN }, { /* * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) * Drive Managed SATA hard drive. This drive doesn't report * in firmware that it is a drive managed SMR drive. */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST8000AS000[23]*", "*" }, /*quirks*/ADA_Q_SMR_DM }, { /* Default */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, /*quirks*/0 }, }; static disk_strategy_t adastrategy; static dumper_t adadump; static periph_init_t adainit; static void adadiskgonecb(struct disk *dp); static periph_oninv_t adaoninvalidate; static periph_dtor_t adacleanup; static void adaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static int adazonemodesysctl(SYSCTL_HANDLER_ARGS); static int adazonesupsysctl(SYSCTL_HANDLER_ARGS); static void adasysctlinit(void *context, int pending); static int adagetattr(struct bio *bp); static void adasetflags(struct ada_softc *softc, struct ccb_getdev *cgd); static periph_ctor_t adaregister; static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio); static void ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio); static int ada_zone_bio_to_ata(int disk_zone_cmd); static int ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb); static periph_start_t adastart; static void adaprobedone(struct cam_periph *periph, union ccb *ccb); static void adazonedone(struct cam_periph *periph, union ccb *ccb); static void adadone(struct cam_periph *periph, union ccb *done_ccb); static int adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd); static timeout_t adasendorderedtag; static void adashutdown(void *arg, int howto); static void adasuspend(void *arg); static void adaresume(void *arg); #ifndef ADA_DEFAULT_TIMEOUT #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ #endif #ifndef ADA_DEFAULT_RETRY #define ADA_DEFAULT_RETRY 4 #endif #ifndef ADA_DEFAULT_SEND_ORDERED #define ADA_DEFAULT_SEND_ORDERED 1 #endif #ifndef ADA_DEFAULT_SPINDOWN_SHUTDOWN #define ADA_DEFAULT_SPINDOWN_SHUTDOWN 1 #endif #ifndef ADA_DEFAULT_SPINDOWN_SUSPEND #define ADA_DEFAULT_SPINDOWN_SUSPEND 1 #endif #ifndef ADA_DEFAULT_READ_AHEAD #define ADA_DEFAULT_READ_AHEAD 1 #endif #ifndef ADA_DEFAULT_WRITE_CACHE #define ADA_DEFAULT_WRITE_CACHE 1 #endif #define ADA_RA (softc->read_ahead >= 0 ? \ softc->read_ahead : ada_read_ahead) #define ADA_WC (softc->write_cache >= 0 ? \ softc->write_cache : ada_write_cache) /* * Most platforms map firmware geometry to actual, but some don't. If * not overridden, default to nothing. */ #ifndef ata_disk_firmware_geom_adjust #define ata_disk_firmware_geom_adjust(disk) #endif static int ada_retry_count = ADA_DEFAULT_RETRY; static int ada_default_timeout = ADA_DEFAULT_TIMEOUT; static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED; static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN; static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND; static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD; static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE; static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RWTUN, &ada_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &ada_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &ada_send_ordered, 0, "Send Ordered Tags"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RWTUN, &ada_spindown_shutdown, 0, "Spin down upon shutdown"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RWTUN, &ada_spindown_suspend, 0, "Spin down upon suspend"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN, &ada_read_ahead, 0, "Enable disk read-ahead"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN, &ada_write_cache, 0, "Enable disk write cache"); /* * ADA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef ADA_ORDEREDTAG_INTERVAL #define ADA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver adadriver = { adainit, "ada", TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0 }; static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS); PERIPHDRIVER_DECLARE(ada, adadriver); static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers"); static int adaopen(struct disk *dp) { struct cam_periph *periph; struct ada_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) { return(ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("adaopen\n")); softc = (struct ada_softc *)periph->softc; softc->flags |= ADA_FLAG_OPEN; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int adaclose(struct disk *dp) { struct cam_periph *periph; struct ada_softc *softc; union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("adaclose\n")); /* We only sync the cache if the drive is capable of it. */ if ((softc->flags & ADA_FLAG_DIRTY) != 0 && (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 && (periph->flags & CAM_PERIPH_INVALID) == 0 && cam_periph_hold(periph, PRIBIO) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); cam_fill_ataio(&ccb->ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0, /*sense_flags*/0, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); softc->flags &= ~ADA_FLAG_DIRTY; xpt_release_ccb(ccb); cam_periph_unhold(periph); } softc->flags &= ~ADA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "adaclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void adaschedule(struct cam_periph *periph) { struct ada_softc *softc = (struct ada_softc *)periph->softc; if (softc->state != ADA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void adastrategy(struct bio *bp) { struct cam_periph *periph; struct ada_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastrategy(%p)\n", bp)); /* * If the device has been made invalid, error out */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Zone commands must be ordered, because they can depend on the * effects of previously issued commands, and they may affect * commands after them. */ if (bp->bio_cmd == BIO_ZONE) bp->bio_flags |= BIO_ORDERED; /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ adaschedule(periph); cam_periph_unlock(periph); return; } static int adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct ada_softc *softc; u_int secsize; union ccb ccb; struct disk *dp; uint64_t lba; uint16_t count; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); secsize = softc->params.secsize; lba = offset / secsize; count = length / secsize; if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); return (ENXIO); } memset(&ccb, 0, sizeof(ccb)); if (length > 0) { xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccb.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&ccb.ataio, 0, adadone, CAM_DIR_OUT, 0, (u_int8_t *) virtual, length, ada_default_timeout*1000); if ((softc->flags & ADA_FLAG_CAN_48BIT) && (lba + count >= ATA_MAX_28BIT_LBA || count >= 256)) { ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48, 0, lba, count); } else { ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA, 0, lba, count); } xpt_polled_action(&ccb); error = adaerror(&ccb, 0, SF_NO_RECOVERY | SF_NO_RETRY); if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) printf("Aborting dump due to I/O error.\n"); cam_periph_unlock(periph); return (error); } if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); /* * Tell the drive to flush its internal cache. if we * can't flush in 5s we have big problems. No need to * wait the default 60s to detect problems. */ ccb.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&ccb.ataio, 0, adadone, CAM_DIR_NONE, 0, NULL, 0, 5*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0); xpt_polled_action(&ccb); error = adaerror(&ccb, 0, SF_NO_RECOVERY | SF_NO_RETRY); if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } cam_periph_unlock(periph); return (error); } static void adainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ada: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (ada_send_ordered) { /* Register our event handlers */ if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend, NULL, EVENTHANDLER_PRI_LAST)) == NULL) printf("adainit: power event registration failed!\n"); if ((EVENTHANDLER_REGISTER(power_resume, adaresume, NULL, EVENTHANDLER_PRI_LAST)) == NULL) printf("adainit: power event registration failed!\n"); if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("adainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void adadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void adaoninvalidate(struct cam_periph *periph) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, adaasync, periph, periph->path); #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); disk_gone(softc->disk); } static void adacleanup(struct cam_periph *periph) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void adasetdeletemethod(struct ada_softc *softc) { if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) softc->delete_method = ADA_DELETE_NCQ_DSM_TRIM; else if (softc->flags & ADA_FLAG_CAN_TRIM) softc->delete_method = ADA_DELETE_DSM_TRIM; else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) softc->delete_method = ADA_DELETE_CFA_ERASE; else softc->delete_method = ADA_DELETE_NONE; } static void adaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct ccb_getdev cgd; struct cam_periph *periph; struct ada_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_ATA) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(adaregister, adaoninvalidate, adacleanup, adastart, "ada", CAM_PERIPH_BIO, path, adaasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("adaasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_GETDEV_CHANGED: { softc = (struct ada_softc *)periph->softc; xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); /* * Set/clear support flags based on the new Identify data. */ adasetflags(softc, &cgd); cam_periph_async(periph, code, path, arg); break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct ada_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_SENT_BDR: case AC_BUS_RESET: { softc = (struct ada_softc *)periph->softc; cam_periph_async(periph, code, path, arg); if (softc->state != ADA_STATE_NORMAL) break; xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) softc->state = ADA_STATE_RAHEAD; else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE) softc->state = ADA_STATE_WCACHE; else if ((softc->flags & ADA_FLAG_CAN_LOG) && (softc->zone_mode != ADA_ZONE_NONE)) softc->state = ADA_STATE_LOGDIR; else break; if (cam_periph_acquire(periph) != CAM_REQ_CMP) softc->state = ADA_STATE_NORMAL; else xpt_schedule(periph, CAM_PRIORITY_DEV); } default: cam_periph_async(periph, code, path, arg); break; } } static int adazonemodesysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[40]; struct ada_softc *softc; int error; softc = (struct ada_softc *)arg1; switch (softc->zone_mode) { case ADA_ZONE_DRIVE_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); break; case ADA_ZONE_HOST_AWARE: snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); break; case ADA_ZONE_HOST_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); break; case ADA_ZONE_NONE: default: snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); break; } error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); return (error); } static int adazonesupsysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[180]; struct ada_softc *softc; struct sbuf sb; int error, first; unsigned int i; softc = (struct ada_softc *)arg1; error = 0; first = 1; sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); for (i = 0; i < sizeof(ada_zone_desc_table) / sizeof(ada_zone_desc_table[0]); i++) { if (softc->zone_flags & ada_zone_desc_table[i].value) { if (first == 0) sbuf_printf(&sb, ", "); else first = 0; sbuf_cat(&sb, ada_zone_desc_table[i].desc); } } if (first == 1) sbuf_printf(&sb, "None"); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); return (error); } static void adasysctlinit(void *context, int pending) { struct cam_periph *periph; struct ada_softc *softc; char tmpstr[80], tmpstr2[80]; periph = (struct cam_periph *)context; /* periph was held for us when this task was enqueued */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_release(periph); return; } softc = (struct ada_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d",periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= ADA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("adasysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RW, softc, 0, adadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "read_ahead", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->read_ahead, 0, "Enable disk read ahead."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->write_cache, 0, "Enable disk write cache."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->rotating, 0, "Rotating media"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, adazonemodesysctl, "A", "Zone Mode"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, adazonesupsysctl, "A", "Zone Support"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, "Optimal Number of Open Sequential Write Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_nonseq_zones", CTLFLAG_RD, &softc->optimal_nonseq_zones, "Optimal Number of Non-Sequentially Written Sequential Write " "Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, "Maximum Number of Open Sequential Write Required Zones"); #ifdef ADA_TEST_FAILURE /* * Add a 'door bell' sysctl which allows one to set it from userland * and cause something bad to happen. For the moment, we only allow * whacking the next read or write. */ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->force_read_error, 0, "Force a read error for the next N reads."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->force_write_error, 0, "Force a write error for the next N writes."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->periodic_read_error, 0, "Force a read error every N reads (don't set too low)."); #endif #ifdef CAM_IO_STATS softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->errors, 0, "Transport errors reported by the SIM."); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->invalidations, 0, "Device pack invalidations."); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); cam_periph_release(periph); } static int adagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct ada_softc *softc; int i, error, value, methods; softc = (struct ada_softc *)arg1; value = softc->delete_method; if (value < 0 || value > ADA_DELETE_MAX) p = "UNKNOWN"; else p = ada_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); methods = 1 << ADA_DELETE_DISABLE; if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) methods |= 1 << ADA_DELETE_CFA_ERASE; if (softc->flags & ADA_FLAG_CAN_TRIM) methods |= 1 << ADA_DELETE_DSM_TRIM; if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) methods |= 1 << ADA_DELETE_NCQ_DSM_TRIM; for (i = 0; i <= ADA_DELETE_MAX; i++) { if (!(methods & (1 << i)) || strcmp(buf, ada_delete_method_names[i]) != 0) continue; softc->delete_method = i; return (0); } return (EINVAL); } static void adasetflags(struct ada_softc *softc, struct ccb_getdev *cgd) { if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) && (cgd->inq_flags & SID_DMA)) softc->flags |= ADA_FLAG_CAN_DMA; else softc->flags &= ~ADA_FLAG_CAN_DMA; if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) { softc->flags |= ADA_FLAG_CAN_48BIT; if (cgd->inq_flags & SID_DMA48) softc->flags |= ADA_FLAG_CAN_DMA48; else softc->flags &= ~ADA_FLAG_CAN_DMA48; } else softc->flags &= ~(ADA_FLAG_CAN_48BIT | ADA_FLAG_CAN_DMA48); if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE) softc->flags |= ADA_FLAG_CAN_FLUSHCACHE; else softc->flags &= ~ADA_FLAG_CAN_FLUSHCACHE; if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT) softc->flags |= ADA_FLAG_CAN_POWERMGT; else softc->flags &= ~ADA_FLAG_CAN_POWERMGT; if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) && (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue)) softc->flags |= ADA_FLAG_CAN_NCQ; else softc->flags &= ~ADA_FLAG_CAN_NCQ; if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) && (cgd->inq_flags & SID_DMA)) { softc->flags |= ADA_FLAG_CAN_TRIM; softc->trim_max_ranges = TRIM_MAX_RANGES; if (cgd->ident_data.max_dsm_blocks != 0) { softc->trim_max_ranges = min(cgd->ident_data.max_dsm_blocks * ATA_DSM_BLK_RANGES, softc->trim_max_ranges); } /* * If we can do RCVSND_FPDMA_QUEUED commands, we may be able * to do NCQ trims, if we support trims at all. We also need * support from the SIM to do things properly. Perhaps we * should look at log 13 dword 0 bit 0 and dword 1 bit 0 are * set too... */ if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 && (softc->flags & ADA_FLAG_PIM_ATA_EXT) != 0 && (cgd->ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 && (softc->flags & ADA_FLAG_CAN_TRIM) != 0) softc->flags |= ADA_FLAG_CAN_NCQ_TRIM; else softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM; } else softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM); if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA) softc->flags |= ADA_FLAG_CAN_CFA; else softc->flags &= ~ADA_FLAG_CAN_CFA; /* * Now that we've set the appropriate flags, setup the delete * method. */ adasetdeletemethod(softc); if ((cgd->ident_data.support.extension & ATA_SUPPORT_GENLOG) && ((softc->quirks & ADA_Q_LOG_BROKEN) == 0)) softc->flags |= ADA_FLAG_CAN_LOG; else softc->flags &= ~ADA_FLAG_CAN_LOG; if ((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = ADA_ZONE_HOST_AWARE; else if (((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) || (softc->quirks & ADA_Q_SMR_DM)) softc->zone_mode = ADA_ZONE_DRIVE_MANAGED; else softc->zone_mode = ADA_ZONE_NONE; if (cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) softc->flags |= ADA_FLAG_CAN_RAHEAD; else softc->flags &= ~ADA_FLAG_CAN_RAHEAD; if (cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) softc->flags |= ADA_FLAG_CAN_WCACHE; else softc->flags &= ~ADA_FLAG_CAN_WCACHE; } static cam_status adaregister(struct cam_periph *periph, void *arg) { struct ada_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; struct disk_params *dp; struct sbuf sb; char *announce_buf; caddr_t match; u_int maxio; int quirks; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("adaregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("adaregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } announce_buf = softc->announce_temp; bzero(announce_buf, ADA_ANNOUNCETMP_SZ); if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("adaregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); free(softc, M_DEVBUF); return(CAM_REQ_CMP_ERR); } periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->ident_data, (caddr_t)ada_quirk_table, nitems(ada_quirk_table), sizeof(*ada_quirk_table), ata_identify_match); if (match != NULL) softc->quirks = ((struct ada_quirk_entry *)match)->quirks; else softc->quirks = ADA_Q_NONE; bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph); /* * Register this media as a disk */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "kern.cam.ada.%d.quirks", periph->unit_number); quirks = softc->quirks; TUNABLE_INT_FETCH(announce_buf, &quirks); softc->quirks = quirks; softc->read_ahead = -1; snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "kern.cam.ada.%d.read_ahead", periph->unit_number); TUNABLE_INT_FETCH(announce_buf, &softc->read_ahead); softc->write_cache = -1; snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "kern.cam.ada.%d.write_cache", periph->unit_number); TUNABLE_INT_FETCH(announce_buf, &softc->write_cache); /* * Set support flags based on the Identify data and quirks. */ adasetflags(softc, cgd); /* Disable queue sorting for non-rotational media by default. */ if (cgd->ident_data.media_rotation_rate == ATA_RATE_NON_ROTATING) { softc->rotating = 0; } else { softc->rotating = 1; } cam_iosched_set_sort_queue(softc->cam_iosched, softc->rotating ? -1 : 0); adagetparams(periph, cgd); softc->disk = disk_alloc(); softc->disk->d_rotation_rate = cgd->ident_data.media_rotation_rate; softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, softc->params.secsize, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = adaopen; softc->disk->d_close = adaclose; softc->disk->d_strategy = adastrategy; softc->disk->d_getattr = adagetattr; softc->disk->d_dump = adadump; softc->disk->d_gone = adadiskgonecb; softc->disk->d_name = "ada"; softc->disk->d_drv1 = periph; maxio = cpi.maxio; /* Honor max I/O size of SIM */ if (maxio == 0) maxio = DFLTPHYS; /* traditional default */ else if (maxio > MAXPHYS) maxio = MAXPHYS; /* for safety */ if (softc->flags & ADA_FLAG_CAN_48BIT) maxio = min(maxio, 65536 * softc->params.secsize); else /* 28bit ATA command limit */ maxio = min(maxio, 256 * softc->params.secsize); softc->disk->d_maxsize = maxio; softc->disk->d_unit = periph->unit_number; softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if (softc->flags & ADA_FLAG_CAN_TRIM) { softc->disk->d_flags |= DISKFLAG_CANDELETE; softc->disk->d_delmaxsize = softc->params.secsize * ATA_DSM_RANGE_MAX * softc->trim_max_ranges; } else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) { softc->disk->d_flags |= DISKFLAG_CANDELETE; softc->disk->d_delmaxsize = 256 * softc->params.secsize; } else softc->disk->d_delmaxsize = maxio; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; softc->unmappedio = 1; } if (cpi.hba_misc & PIM_ATA_EXT) softc->flags |= ADA_FLAG_PIM_ATA_EXT; strlcpy(softc->disk->d_descr, cgd->ident_data.model, MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model))); strlcpy(softc->disk->d_ident, cgd->ident_data.serial, MIN(sizeof(softc->disk->d_ident), sizeof(cgd->ident_data.serial))); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = (off_t)softc->params.sectors * softc->params.secsize; if (ata_physical_sector_size(&cgd->ident_data) != softc->params.secsize) { softc->disk->d_stripesize = ata_physical_sector_size(&cgd->ident_data); softc->disk->d_stripeoffset = (softc->disk->d_stripesize - ata_logical_sector_offset(&cgd->ident_data)) % softc->disk->d_stripesize; } else if (softc->quirks & ADA_Q_4K) { softc->disk->d_stripesize = 4096; softc->disk->d_stripeoffset = 0; } softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; ata_disk_firmware_geom_adjust(softc->disk); /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * adadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); dp = &softc->params; snprintf(announce_buf, ADA_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors)", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); sbuf_new(&sb, softc->announce_buffer, ADA_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, ADA_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ if (cam_periph_acquire(periph) == CAM_REQ_CMP) taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED, adaasync, periph, periph->path); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL, adasendorderedtag, softc); if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) { softc->state = ADA_STATE_RAHEAD; } else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE) { softc->state = ADA_STATE_WCACHE; } else if ((softc->flags & ADA_FLAG_CAN_LOG) && (softc->zone_mode != ADA_ZONE_NONE)) { softc->state = ADA_STATE_LOGDIR; } else { /* * Nothing to probe, so we can just transition to the * normal state. */ adaprobedone(periph, NULL); return(CAM_REQ_CMP); } xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int ada_dsmtrim_req_create(struct ada_softc *softc, struct bio *bp, struct trim_request *req) { uint64_t lastlba = (uint64_t)-1; int c, lastcount = 0, off, ranges = 0; bzero(req, sizeof(*req)); TAILQ_INIT(&req->bps); do { uint64_t lba = bp->bio_pblkno; int count = bp->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = min(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * ATA_DSM_RANGE_SIZE; req->data[off + 6] = lastcount & 0xff; req->data[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; } while (count > 0) { c = min(count, ATA_DSM_RANGE_MAX); off = ranges * ATA_DSM_RANGE_SIZE; req->data[off + 0] = lba & 0xff; req->data[off + 1] = (lba >> 8) & 0xff; req->data[off + 2] = (lba >> 16) & 0xff; req->data[off + 3] = (lba >> 24) & 0xff; req->data[off + 4] = (lba >> 32) & 0xff; req->data[off + 5] = (lba >> 40) & 0xff; req->data[off + 6] = c & 0xff; req->data[off + 7] = (c >> 8) & 0xff; lba += c; count -= c; lastcount = c; ranges++; /* * Its the caller's responsibility to ensure the * request will fit so we don't need to check for * overrun here */ } lastlba = lba; TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue); bp = cam_iosched_next_trim(softc->cam_iosched); if (bp == NULL) break; if (bp->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp); break; } } while (1); return (ranges); } static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; int ranges; ranges = ada_dsmtrim_req_create(softc, bp, req); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_OUT, 0, req->data, howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE, ada_default_timeout * 1000); ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT, ATA_DSM_TRIM, 0, howmany(ranges, ATA_DSM_BLK_RANGES)); } static void ada_ncq_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; int ranges; ranges = ada_dsmtrim_req_create(softc, bp, req); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_OUT, 0, req->data, howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE, ada_default_timeout * 1000); ata_ncq_cmd(ataio, ATA_SEND_FPDMA_QUEUED, 0, howmany(ranges, ATA_DSM_BLK_RANGES)); ataio->cmd.sector_count_exp = ATA_SFPDMA_DSM; ataio->ata_flags |= ATA_FLAG_AUX; ataio->aux = 1; } static void ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; uint64_t lba = bp->bio_pblkno; uint16_t count = bp->bio_bcount / softc->params.secsize; bzero(req, sizeof(*req)); TAILQ_INIT(&req->bps); TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (count >= 256) count = 0; ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count); } static int ada_zone_bio_to_ata(int disk_zone_cmd) { switch (disk_zone_cmd) { case DISK_ZONE_OPEN: return ATA_ZM_OPEN_ZONE; case DISK_ZONE_CLOSE: return ATA_ZM_CLOSE_ZONE; case DISK_ZONE_FINISH: return ATA_ZM_FINISH_ZONE; case DISK_ZONE_RWP: return ATA_ZM_RWP; } return -1; } static int ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb) { struct ada_softc *softc; int error; error = 0; if (bp->bio_cmd != BIO_ZONE) { error = EINVAL; goto bailout; } softc = periph->softc; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: { int zone_flags; int zone_sa; uint64_t lba; zone_sa = ada_zone_bio_to_ata(bp->bio_zone.zone_cmd); if (zone_sa == -1) { xpt_print(periph->path, "Cannot translate zone " "cmd %#x to ATA\n", bp->bio_zone.zone_cmd); error = EINVAL; goto bailout; } zone_flags = 0; lba = bp->bio_zone.zone_params.rwp.id; if (bp->bio_zone.zone_params.rwp.flags & DISK_ZONE_RWP_FLAG_ALL) zone_flags |= ZBC_OUT_ALL; ata_zac_mgmt_out(&ccb->ataio, /*retries*/ ada_retry_count, /*cbfcnp*/ adadone, /*use_ncq*/ (softc->flags & ADA_FLAG_PIM_ATA_EXT) ? 1 : 0, /*zm_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*sector_count*/ 0, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*timeout*/ ada_default_timeout * 1000); *queue_ccb = 1; break; } case DISK_ZONE_REPORT_ZONES: { uint8_t *rz_ptr; uint32_t num_entries, alloc_size; struct disk_zone_report *rep; rep = &bp->bio_zone.zone_params.report; num_entries = rep->entries_allocated; if (num_entries == 0) { xpt_print(periph->path, "No entries allocated for " "Report Zones request\n"); error = EINVAL; goto bailout; } alloc_size = sizeof(struct scsi_report_zones_hdr) + (sizeof(struct scsi_report_zones_desc) * num_entries); alloc_size = min(alloc_size, softc->disk->d_maxsize); rz_ptr = malloc(alloc_size, M_ATADA, M_NOWAIT | M_ZERO); if (rz_ptr == NULL) { xpt_print(periph->path, "Unable to allocate memory " "for Report Zones request\n"); error = ENOMEM; goto bailout; } ata_zac_mgmt_in(&ccb->ataio, /*retries*/ ada_retry_count, /*cbcfnp*/ adadone, /*use_ncq*/ (softc->flags & ADA_FLAG_PIM_ATA_EXT) ? 1 : 0, /*zm_action*/ ATA_ZM_REPORT_ZONES, /*zone_id*/ rep->starting_id, /*zone_flags*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*timeout*/ ada_default_timeout * 1000); /* * For BIO_ZONE, this isn't normally needed. However, it * is used by devstat_end_transaction_bio() to determine * how much data was transferred. */ /* * XXX KDM we have a problem. But I'm not sure how to fix * it. devstat uses bio_bcount - bio_resid to calculate * the amount of data transferred. The GEOM disk code * uses bio_length - bio_resid to calculate the amount of * data in bio_completed. We have different structure * sizes above and below the ada(4) driver. So, if we * use the sizes above, the amount transferred won't be * quite accurate for devstat. If we use different sizes * for bio_bcount and bio_length (above and below * respectively), then the residual needs to match one or * the other. Everything is calculated after the bio * leaves the driver, so changing the values around isn't * really an option. For now, just set the count to the * passed in length. This means that the calculations * above (e.g. bio_completed) will be correct, but the * amount of data reported to devstat will be slightly * under or overstated. */ bp->bio_bcount = bp->bio_length; *queue_ccb = 1; break; } case DISK_ZONE_GET_PARAMS: { struct disk_zone_disk_params *params; params = &bp->bio_zone.zone_params.disk_params; bzero(params, sizeof(*params)); switch (softc->zone_mode) { case ADA_ZONE_DRIVE_MANAGED: params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; break; case ADA_ZONE_HOST_AWARE: params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; break; case ADA_ZONE_HOST_MANAGED: params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; break; default: case ADA_ZONE_NONE: params->zone_mode = DISK_ZONE_MODE_NONE; break; } if (softc->zone_flags & ADA_ZONE_FLAG_URSWRZ) params->flags |= DISK_ZONE_DISK_URSWRZ; if (softc->zone_flags & ADA_ZONE_FLAG_OPT_SEQ_SET) { params->optimal_seq_zones = softc->optimal_seq_zones; params->flags |= DISK_ZONE_OPT_SEQ_SET; } if (softc->zone_flags & ADA_ZONE_FLAG_OPT_NONSEQ_SET) { params->optimal_nonseq_zones = softc->optimal_nonseq_zones; params->flags |= DISK_ZONE_OPT_NONSEQ_SET; } if (softc->zone_flags & ADA_ZONE_FLAG_MAX_SEQ_SET) { params->max_seq_zones = softc->max_seq_zones; params->flags |= DISK_ZONE_MAX_SEQ_SET; } if (softc->zone_flags & ADA_ZONE_FLAG_RZ_SUP) params->flags |= DISK_ZONE_RZ_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_OPEN_SUP) params->flags |= DISK_ZONE_OPEN_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_CLOSE_SUP) params->flags |= DISK_ZONE_CLOSE_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_FINISH_SUP) params->flags |= DISK_ZONE_FINISH_SUP; if (softc->zone_flags & ADA_ZONE_FLAG_RWP_SUP) params->flags |= DISK_ZONE_RWP_SUP; break; } default: break; } bailout: return (error); } static void adastart(struct cam_periph *periph, union ccb *start_ccb) { struct ada_softc *softc = (struct ada_softc *)periph->softc; struct ccb_ataio *ataio = &start_ccb->ataio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastart\n")); switch (softc->state) { case ADA_STATE_NORMAL: { struct bio *bp; u_int8_t tag_code; bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { xpt_release_ccb(start_ccb); break; } if ((bp->bio_flags & BIO_ORDERED) != 0 || (bp->bio_cmd != BIO_DELETE && (softc->flags & ADA_FLAG_NEED_OTAG) != 0)) { softc->flags &= ~ADA_FLAG_NEED_OTAG; softc->flags |= ADA_FLAG_WAS_OTAG; tag_code = 0; } else { tag_code = 1; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { uint64_t lba = bp->bio_pblkno; uint16_t count = bp->bio_bcount / softc->params.secsize; void *data_ptr; int rw_op; if (bp->bio_cmd == BIO_WRITE) { softc->flags |= ADA_FLAG_DIRTY; rw_op = CAM_DIR_OUT; } else { rw_op = CAM_DIR_IN; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= CAM_DATA_BIO; data_ptr = bp; } #ifdef ADA_TEST_FAILURE int fail = 0; /* * Support the failure ioctls. If the command is a * read, and there are pending forced read errors, or * if a write and pending write errors, then fail this * operation with EIO. This is useful for testing * purposes. Also, support having every Nth read fail. * * This is a rather blunt tool. */ if (bp->bio_cmd == BIO_READ) { if (softc->force_read_error) { softc->force_read_error--; fail = 1; } if (softc->periodic_read_error > 0) { if (++softc->periodic_read_count >= softc->periodic_read_error) { softc->periodic_read_count = 0; fail = 1; } } } else { if (softc->force_write_error) { softc->force_write_error--; fail = 1; } } if (fail) { biofinish(bp, NULL, EIO); xpt_release_ccb(start_ccb); adaschedule(periph); return; } #endif KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || round_page(bp->bio_bcount + bp->bio_ma_offset) / PAGE_SIZE == bp->bio_ma_n, ("Short bio %p", bp)); cam_fill_ataio(ataio, ada_retry_count, adadone, rw_op, 0, data_ptr, bp->bio_bcount, ada_default_timeout*1000); if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) { if (bp->bio_cmd == BIO_READ) { ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED, lba, count); } else { ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED, lba, count); } } else if ((softc->flags & ADA_FLAG_CAN_48BIT) && (lba + count >= ATA_MAX_28BIT_LBA || count > 256)) { if (softc->flags & ADA_FLAG_CAN_DMA48) { if (bp->bio_cmd == BIO_READ) { ata_48bit_cmd(ataio, ATA_READ_DMA48, 0, lba, count); } else { ata_48bit_cmd(ataio, ATA_WRITE_DMA48, 0, lba, count); } } else { if (bp->bio_cmd == BIO_READ) { ata_48bit_cmd(ataio, ATA_READ_MUL48, 0, lba, count); } else { ata_48bit_cmd(ataio, ATA_WRITE_MUL48, 0, lba, count); } } } else { if (count == 256) count = 0; if (softc->flags & ADA_FLAG_CAN_DMA) { if (bp->bio_cmd == BIO_READ) { ata_28bit_cmd(ataio, ATA_READ_DMA, 0, lba, count); } else { ata_28bit_cmd(ataio, ATA_WRITE_DMA, 0, lba, count); } } else { if (bp->bio_cmd == BIO_READ) { ata_28bit_cmd(ataio, ATA_READ_MUL, 0, lba, count); } else { ata_28bit_cmd(ataio, ATA_WRITE_MUL, 0, lba, count); } } } break; } case BIO_DELETE: switch (softc->delete_method) { case ADA_DELETE_NCQ_DSM_TRIM: ada_ncq_dsmtrim(softc, bp, ataio); break; case ADA_DELETE_DSM_TRIM: ada_dsmtrim(softc, bp, ataio); break; case ADA_DELETE_CFA_ERASE: ada_cfaerase(softc, bp, ataio); break; default: biofinish(bp, NULL, EOPNOTSUPP); xpt_release_ccb(start_ccb); adaschedule(periph); return; } start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM; start_ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); goto out; case BIO_FLUSH: cam_fill_ataio(ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0); break; case BIO_ZONE: { int error, queue_ccb; queue_ccb = 0; error = ada_zone_cmd(periph, start_ccb, bp, &queue_ccb); if ((error != 0) || (queue_ccb == 0)) { biofinish(bp, NULL, error); xpt_release_ccb(start_ccb); return; } break; } } start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; out: start_ccb->ccb_h.ccb_bp = bp; softc->outstanding_cmds++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ adaschedule(periph); break; } case ADA_STATE_RAHEAD: case ADA_STATE_WCACHE: { cam_fill_ataio(ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->state == ADA_STATE_RAHEAD) { ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_RA ? ATA_SF_ENAB_RCACHE : ATA_SF_DIS_RCACHE, 0, 0); start_ccb->ccb_h.ccb_state = ADA_CCB_RAHEAD; } else { ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_WC ? ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0); start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE; } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; xpt_action(start_ccb); break; } case ADA_STATE_LOGDIR: { struct ata_gp_log_dir *log_dir; if ((softc->flags & ADA_FLAG_CAN_LOG) == 0) { adaprobedone(periph, start_ccb); break; } log_dir = malloc(sizeof(*log_dir), M_ATADA, M_NOWAIT|M_ZERO); if (log_dir == NULL) { xpt_print(periph->path, "Couldn't malloc log_dir " "data\n"); softc->state = ADA_STATE_NORMAL; xpt_release_ccb(start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_LOG_DIRECTORY, /*page_number*/ 0, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)log_dir, /*dxfer_len*/sizeof(*log_dir), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_LOGDIR; xpt_action(start_ccb); break; } case ADA_STATE_IDDIR: { struct ata_identify_log_pages *id_dir; id_dir = malloc(sizeof(*id_dir), M_ATADA, M_NOWAIT | M_ZERO); if (id_dir == NULL) { xpt_print(periph->path, "Couldn't malloc id_dir " "data\n"); adaprobedone(periph, start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_PAGE_LIST, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)id_dir, /*dxfer_len*/ sizeof(*id_dir), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_IDDIR; xpt_action(start_ccb); break; } case ADA_STATE_SUP_CAP: { struct ata_identify_log_sup_cap *sup_cap; sup_cap = malloc(sizeof(*sup_cap), M_ATADA, M_NOWAIT|M_ZERO); if (sup_cap == NULL) { xpt_print(periph->path, "Couldn't malloc sup_cap " "data\n"); adaprobedone(periph, start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_SUP_CAP, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)sup_cap, /*dxfer_len*/ sizeof(*sup_cap), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_SUP_CAP; xpt_action(start_ccb); break; } case ADA_STATE_ZONE: { struct ata_zoned_info_log *ata_zone; ata_zone = malloc(sizeof(*ata_zone), M_ATADA, M_NOWAIT|M_ZERO); if (ata_zone == NULL) { xpt_print(periph->path, "Couldn't malloc ata_zone " "data\n"); adaprobedone(periph, start_ccb); break; } ata_read_log(ataio, /*retries*/1, /*cbfcnp*/adadone, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_ZDI, /*block_count*/ 1, /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? CAM_ATAIO_DMA : 0, /*data_ptr*/ (uint8_t *)ata_zone, /*dxfer_len*/ sizeof(*ata_zone), /*timeout*/ada_default_timeout*1000); start_ccb->ccb_h.ccb_state = ADA_CCB_ZONE; xpt_action(start_ccb); break; } } } static void adaprobedone(struct cam_periph *periph, union ccb *ccb) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; if (ccb != NULL) xpt_release_ccb(ccb); softc->state = ADA_STATE_NORMAL; softc->flags |= ADA_FLAG_PROBED; adaschedule(periph); if ((softc->flags & ADA_FLAG_ANNOUNCED) == 0) { softc->flags |= ADA_FLAG_ANNOUNCED; cam_periph_unhold(periph); } else { cam_periph_release_locked(periph); } } static void adazonedone(struct cam_periph *periph, union ccb *ccb) { struct ada_softc *softc; struct bio *bp; softc = periph->softc; bp = (struct bio *)ccb->ccb_h.ccb_bp; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: break; case DISK_ZONE_REPORT_ZONES: { uint32_t avail_len; struct disk_zone_report *rep; struct scsi_report_zones_hdr *hdr; struct scsi_report_zones_desc *desc; struct disk_zone_rep_entry *entry; uint32_t num_alloced, hdr_len, num_avail; uint32_t num_to_fill, i; rep = &bp->bio_zone.zone_params.report; avail_len = ccb->ataio.dxfer_len - ccb->ataio.resid; /* * Note that bio_resid isn't normally used for zone * commands, but it is used by devstat_end_transaction_bio() * to determine how much data was transferred. Because * the size of the SCSI/ATA data structures is different * than the size of the BIO interface structures, the * amount of data actually transferred from the drive will * be different than the amount of data transferred to * the user. */ num_alloced = rep->entries_allocated; hdr = (struct scsi_report_zones_hdr *)ccb->ataio.data_ptr; if (avail_len < sizeof(*hdr)) { /* * Is there a better error than EIO here? We asked * for at least the header, and we got less than * that. */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; break; } hdr_len = le32dec(hdr->length); if (hdr_len > 0) rep->entries_available = hdr_len / sizeof(*desc); else rep->entries_available = 0; /* * NOTE: using the same values for the BIO version of the * same field as the SCSI/ATA values. This means we could * get some additional values that aren't defined in bio.h * if more values of the same field are defined later. */ rep->header.same = hdr->byte4 & SRZ_SAME_MASK; rep->header.maximum_lba = le64dec(hdr->maximum_lba); /* * If the drive reports no entries that match the query, * we're done. */ if (hdr_len == 0) { rep->entries_filled = 0; bp->bio_resid = bp->bio_bcount; break; } num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), hdr_len / sizeof(*desc)); /* * If the drive didn't return any data, then we're done. */ if (num_avail == 0) { rep->entries_filled = 0; bp->bio_resid = bp->bio_bcount; break; } num_to_fill = min(num_avail, rep->entries_allocated); /* * If the user didn't allocate any entries for us to fill, * we're done. */ if (num_to_fill == 0) { rep->entries_filled = 0; bp->bio_resid = bp->bio_bcount; break; } for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; i < num_to_fill; i++, desc++, entry++) { /* * NOTE: we're mapping the values here directly * from the SCSI/ATA bit definitions to the bio.h * definitions. There is also a warning in * disk_zone.h, but the impact is that if * additional values are added in the SCSI/ATA * specs these will be visible to consumers of * this interface. */ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; entry->zone_condition = (desc->zone_flags & SRZ_ZONE_COND_MASK) >> SRZ_ZONE_COND_SHIFT; entry->zone_flags |= desc->zone_flags & (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); entry->zone_length = le64dec(desc->zone_length); entry->zone_start_lba = le64dec(desc->zone_start_lba); entry->write_pointer_lba = le64dec(desc->write_pointer_lba); } rep->entries_filled = num_to_fill; /* * Note that this residual is accurate from the user's * standpoint, but the amount transferred isn't accurate * from the standpoint of what actually came back from the * drive. */ bp->bio_resid = bp->bio_bcount - (num_to_fill * sizeof(*entry)); break; } case DISK_ZONE_GET_PARAMS: default: /* * In theory we should not get a GET_PARAMS bio, since it * should be handled without queueing the command to the * drive. */ panic("%s: Invalid zone command %d", __func__, bp->bio_zone.zone_cmd); break; } if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) free(ccb->ataio.data_ptr, M_ATADA); } static void adadone(struct cam_periph *periph, union ccb *done_ccb) { struct ada_softc *softc; struct ccb_ataio *ataio; struct cam_path *path; uint32_t priority; int state; softc = (struct ada_softc *)periph->softc; ataio = &done_ccb->ataio; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n")); state = ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK; switch (state) { case ADA_CCB_BUFFER_IO: case ADA_CCB_TRIM: { struct bio *bp; int error; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = adaerror(done_ccb, 0, 0); if (error == ERESTART) { /* A retry was scheduled, so just return. */ cam_periph_unlock(periph); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); /* * If we get an error on an NCQ DSM TRIM, fall back * to a non-NCQ DSM TRIM forever. Please note that if * CAN_NCQ_TRIM is set, CAN_TRIM is necessarily set too. * However, for this one trim, we treat it as advisory * and return success up the stack. */ if (state == ADA_CCB_TRIM && error != 0 && (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) != 0) { softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM; error = 0; adasetdeletemethod(softc); } } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); error = 0; } bp->bio_error = error; if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { if (bp->bio_cmd == BIO_ZONE) adazonedone(periph, done_ccb); else if (state == ADA_CCB_TRIM) bp->bio_resid = 0; else bp->bio_resid = ataio->resid; if ((bp->bio_resid > 0) && (bp->bio_cmd != BIO_ZONE)) bp->bio_flags |= BIO_ERROR; } softc->outstanding_cmds--; if (softc->outstanding_cmds == 0) softc->flags |= ADA_FLAG_WAS_OTAG; /* * We need to call cam_iosched before we call biodone so that we * don't measure any activity that happens in the completion * routine, which in the case of sendfile can be quite * extensive. */ cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); if (state == ADA_CCB_TRIM) { TAILQ_HEAD(, bio) queue; struct bio *bp1; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue); /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * trim_running set to 0 before the call above to allow * other I/O to progress when many BIO_DELETE requests * are pushed down. We set trim_running to 0 and call * daschedule again so that we don't stall if there are * no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); adaschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = error; if (error != 0) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { adaschedule(periph); cam_periph_unlock(periph); biodone(bp); } return; } case ADA_CCB_RAHEAD: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (adaerror(done_ccb, 0, 0) == ERESTART) { /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the reference on the peripheral. * The peripheral will only go away once the last reference * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); softc->state = ADA_STATE_WCACHE; xpt_schedule(periph, priority); /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } case ADA_CCB_WCACHE: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (adaerror(done_ccb, 0, 0) == ERESTART) { /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); if ((softc->flags & ADA_FLAG_CAN_LOG) && (softc->zone_mode != ADA_ZONE_NONE)) { xpt_release_ccb(done_ccb); softc->state = ADA_STATE_LOGDIR; xpt_schedule(periph, priority); } else { adaprobedone(periph, done_ccb); } return; } case ADA_CCB_LOGDIR: { int error; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { error = 0; softc->valid_logdir_len = 0; bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); softc->valid_logdir_len = ataio->dxfer_len - ataio->resid; if (softc->valid_logdir_len > 0) bcopy(ataio->data_ptr, &softc->ata_logdir, min(softc->valid_logdir_len, sizeof(softc->ata_logdir))); /* * Figure out whether the Identify Device log is * supported. The General Purpose log directory * has a header, and lists the number of pages * available for each GP log identified by the * offset into the list. */ if ((softc->valid_logdir_len >= ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) && (le16dec(softc->ata_logdir.header) == ATA_GP_LOG_DIR_VERSION) && (le16dec(&softc->ata_logdir.num_pages[ (ATA_IDENTIFY_DATA_LOG * sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ softc->flags |= ADA_FLAG_CAN_IDLOG; } else { softc->flags &= ~ADA_FLAG_CAN_IDLOG; } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA log directory, * then ATA logs are effectively not * supported even if the bit is set in the * identify data. */ softc->flags &= ~(ADA_FLAG_CAN_LOG | ADA_FLAG_CAN_IDLOG); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); if ((error == 0) && (softc->flags & ADA_FLAG_CAN_IDLOG)) { softc->state = ADA_STATE_IDDIR; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); } else adaprobedone(periph, done_ccb); return; } case ADA_CCB_IDDIR: { int error; if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { off_t entries_offset, max_entries; error = 0; softc->valid_iddir_len = 0; bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); softc->flags &= ~(ADA_FLAG_CAN_SUPCAP | ADA_FLAG_CAN_ZONE); softc->valid_iddir_len = ataio->dxfer_len - ataio->resid; if (softc->valid_iddir_len > 0) bcopy(ataio->data_ptr, &softc->ata_iddir, min(softc->valid_iddir_len, sizeof(softc->ata_iddir))); entries_offset = __offsetof(struct ata_identify_log_pages,entries); max_entries = softc->valid_iddir_len - entries_offset; if ((softc->valid_iddir_len > (entries_offset + 1)) && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) && (softc->ata_iddir.entry_count > 0)) { int num_entries, i; num_entries = softc->ata_iddir.entry_count; num_entries = min(num_entries, softc->valid_iddir_len - entries_offset); for (i = 0; i < num_entries && i < max_entries; i++) { if (softc->ata_iddir.entries[i] == ATA_IDL_SUP_CAP) softc->flags |= ADA_FLAG_CAN_SUPCAP; else if (softc->ata_iddir.entries[i]== ATA_IDL_ZDI) softc->flags |= ADA_FLAG_CAN_ZONE; if ((softc->flags & ADA_FLAG_CAN_SUPCAP) && (softc->flags & ADA_FLAG_CAN_ZONE)) break; } } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data log * directory, then it effectively isn't * supported even if the ATA Log directory * a non-zero number of pages present for * this log. */ softc->flags &= ~ADA_FLAG_CAN_IDLOG; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); if ((error == 0) && (softc->flags & ADA_FLAG_CAN_SUPCAP)) { softc->state = ADA_STATE_SUP_CAP; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); } else adaprobedone(periph, done_ccb); return; } case ADA_CCB_SUP_CAP: { int error; if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_size; struct ata_identify_log_sup_cap *sup_cap; error = 0; sup_cap = (struct ata_identify_log_sup_cap *) ataio->data_ptr; valid_len = ataio->dxfer_len - ataio->resid; needed_size = __offsetof(struct ata_identify_log_sup_cap, sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); if (valid_len >= needed_size) { uint64_t zoned, zac_cap; zoned = le64dec(sup_cap->zoned_cap); if (zoned & ATA_ZONED_VALID) { /* * This should have already been * set, because this is also in the * ATA identify data. */ if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = ADA_ZONE_HOST_AWARE; else if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) softc->zone_mode = ADA_ZONE_DRIVE_MANAGED; } zac_cap = le64dec(sup_cap->sup_zac_cap); if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { if (zac_cap & ATA_REPORT_ZONES_SUP) softc->zone_flags |= ADA_ZONE_FLAG_RZ_SUP; if (zac_cap & ATA_ND_OPEN_ZONE_SUP) softc->zone_flags |= ADA_ZONE_FLAG_OPEN_SUP; if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) softc->zone_flags |= ADA_ZONE_FLAG_CLOSE_SUP; if (zac_cap & ATA_ND_FINISH_ZONE_SUP) softc->zone_flags |= ADA_ZONE_FLAG_FINISH_SUP; if (zac_cap & ATA_ND_RWP_SUP) softc->zone_flags |= ADA_ZONE_FLAG_RWP_SUP; } else { /* * This field was introduced in * ACS-4, r08 on April 28th, 2015. * If the drive firmware was written * to an earlier spec, it won't have * the field. So, assume all * commands are supported. */ softc->zone_flags |= ADA_ZONE_FLAG_SUP_MASK; } } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data * Supported Capabilities page, clear the * flag... */ softc->flags &= ~ADA_FLAG_CAN_SUPCAP; /* * And clear zone capabilities. */ softc->zone_flags &= ~ADA_ZONE_FLAG_SUP_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); if ((error == 0) && (softc->flags & ADA_FLAG_CAN_ZONE)) { softc->state = ADA_STATE_ZONE; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); } else adaprobedone(periph, done_ccb); return; } case ADA_CCB_ZONE: { int error; if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct ata_zoned_info_log *zi_log; uint32_t valid_len; size_t needed_size; zi_log = (struct ata_zoned_info_log *)ataio->data_ptr; valid_len = ataio->dxfer_len - ataio->resid; needed_size = __offsetof(struct ata_zoned_info_log, version_info) + 1 + sizeof(zi_log->version_info); if (valid_len >= needed_size) { uint64_t tmpvar; tmpvar = le64dec(zi_log->zoned_cap); if (tmpvar & ATA_ZDI_CAP_VALID) { if (tmpvar & ATA_ZDI_CAP_URSWRZ) softc->zone_flags |= ADA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~ADA_ZONE_FLAG_URSWRZ; } tmpvar = le64dec(zi_log->optimal_seq_zones); if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { softc->zone_flags |= ADA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = (tmpvar & ATA_ZDI_OPT_SEQ_MASK); } else { softc->zone_flags &= ~ADA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = 0; } tmpvar =le64dec(zi_log->optimal_nonseq_zones); if (tmpvar & ATA_ZDI_OPT_NS_VALID) { softc->zone_flags |= ADA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = (tmpvar & ATA_ZDI_OPT_NS_MASK); } else { softc->zone_flags &= ~ADA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = 0; } tmpvar = le64dec(zi_log->max_seq_req_zones); if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { softc->zone_flags |= ADA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = (tmpvar & ATA_ZDI_MAX_SEQ_MASK); } else { softc->zone_flags &= ~ADA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = 0; } } } else { error = adaerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { softc->flags &= ~ADA_FLAG_CAN_ZONE; softc->flags &= ~ADA_ZONE_FLAG_SET_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ataio->data_ptr, M_ATADA); adaprobedone(periph, done_ccb); return; } case ADA_CCB_DUMP: /* No-op. We're polling */ return; default: break; } xpt_release_ccb(done_ccb); } static int adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { #ifdef CAM_IO_STATS struct ada_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct ada_softc *)periph->softc; switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: case CAM_ATA_STATUS_ERROR: softc->errors++; break; default: break; } #endif return(cam_periph_error(ccb, cam_flags, sense_flags, NULL)); } static void adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd) { struct ada_softc *softc = (struct ada_softc *)periph->softc; struct disk_params *dp = &softc->params; u_int64_t lbasize48; u_int32_t lbasize; dp->secsize = ata_logical_sector_size(&cgd->ident_data); if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) && cgd->ident_data.current_heads && cgd->ident_data.current_sectors) { dp->heads = cgd->ident_data.current_heads; dp->secs_per_track = cgd->ident_data.current_sectors; dp->cylinders = cgd->ident_data.cylinders; dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 | ((u_int32_t)cgd->ident_data.current_size_2 << 16); } else { dp->heads = cgd->ident_data.heads; dp->secs_per_track = cgd->ident_data.sectors; dp->cylinders = cgd->ident_data.cylinders; dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track; } lbasize = (u_int32_t)cgd->ident_data.lba_size_1 | ((u_int32_t)cgd->ident_data.lba_size_2 << 16); /* use the 28bit LBA size if valid or bigger than the CHS mapping */ if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize) dp->sectors = lbasize; /* use the 48bit LBA size if valid */ lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) | ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) | ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) | ((u_int64_t)cgd->ident_data.lba_size48_4 << 48); if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) && lbasize48 > ATA_MAX_28BIT_LBA) dp->sectors = lbasize48; } static void adasendorderedtag(void *arg) { struct ada_softc *softc = arg; if (ada_send_ordered) { if (softc->outstanding_cmds > 0) { if ((softc->flags & ADA_FLAG_WAS_OTAG) == 0) softc->flags |= ADA_FLAG_NEED_OTAG; softc->flags &= ~ADA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL, adasendorderedtag, softc); } /* * Step through all ADA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void adaflush(void) { struct cam_periph *periph; struct ada_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &adadriver) { softc = (struct ada_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & ADA_FLAG_OPEN)) { adadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & ADA_FLAG_OPEN) == 0) || (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); cam_fill_ataio(&ccb->ataio, 0, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } static void adaspindown(uint8_t cmd, int flags) { struct cam_periph *periph; struct ada_softc *softc; struct ccb_ataio local_ccb; int error; CAM_PERIPH_FOREACH(periph, &adadriver) { /* If we paniced with lock held - not recurse here. */ if (cam_periph_owned(periph)) continue; cam_periph_lock(periph); softc = (struct ada_softc *)periph->softc; /* * We only spin-down the drive if it is capable of it.. */ if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) { cam_periph_unlock(periph); continue; } if (bootverbose) xpt_print(periph->path, "spin-down\n"); memset(&local_ccb, 0, sizeof(local_ccb)); xpt_setup_ccb(&local_ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); local_ccb.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&local_ccb, 0, adadone, CAM_DIR_NONE | flags, 0, NULL, 0, ada_default_timeout*1000); ata_28bit_cmd(&local_ccb, cmd, 0, 0, 0); if (!SCHEDULER_STOPPED()) { /* * Not panicing, can just do the normal runccb * XXX should make cam_periph_runccb work while * XXX panicing... later */ error = cam_periph_runccb((union ccb *)&local_ccb, adaerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, softc->disk->d_devstat); } else { /* * Panicing, so we have to do this by hand: do * xpt_polled_action to run the request through the SIM, * extract the error, and if the queue was frozen, * unfreeze it. cam_periph_runccb takes care of these * details, but xpt_polled_action doesn't. */ xpt_polled_action((union ccb *)&local_ccb); error = adaerror((union ccb *)&local_ccb, 0, SF_NO_RECOVERY | SF_NO_RETRY); if ((local_ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(local_ccb.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } if (error != 0) xpt_print(periph->path, "Spin-down disk failed\n"); cam_periph_unlock(periph); } } static void adashutdown(void *arg, int howto) { int how; adaflush(); /* * STANDBY IMMEDIATE saves any volatile data to the drive. It also spins * down hard drives. IDLE IMMEDIATE also saves the volatile data without * a spindown. We send the former when we expect to lose power soon. For * a warm boot, we send the latter to avoid a thundering herd of spinups * just after the kernel loads while probing. We have to do something to * flush the data because the BIOS in many systems resets the HBA * causing a COMINIT/COMRESET negotiation, which some drives interpret * as license to toss the volatile data, and others count as unclean * shutdown when in the Active PM state in SMART attributes. * * adaspindown will ensure that we don't send this to a drive that * doesn't support it. */ if (ada_spindown_shutdown != 0) { how = (howto & (RB_HALT | RB_POWEROFF | RB_POWERCYCLE)) ? ATA_STANDBY_IMMEDIATE : ATA_IDLE_IMMEDIATE; adaspindown(how, 0); } } static void adasuspend(void *arg) { adaflush(); /* * SLEEP also fushes any volatile data, like STANDBY IMEDIATE, * so we don't need to send it as well. */ if (ada_spindown_suspend != 0) adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE); } static void adaresume(void *arg) { struct cam_periph *periph; struct ada_softc *softc; if (ada_spindown_suspend == 0) return; CAM_PERIPH_FOREACH(periph, &adadriver) { cam_periph_lock(periph); softc = (struct ada_softc *)periph->softc; /* * We only spin-down the drive if it is capable of it.. */ if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) { cam_periph_unlock(periph); continue; } if (bootverbose) xpt_print(periph->path, "resume\n"); /* * Drop freeze taken due to CAM_DEV_QFREEZE flag set on * sleep request. */ cam_release_devq(periph->path, /*relsim_flags*/0, /*openings*/0, /*timeout*/0, /*getcount_only*/0); cam_periph_unlock(periph); } } #endif /* _KERNEL */ Index: head/sys/cam/ata/ata_pmp.c =================================================================== --- head/sys/cam/ata/ata_pmp.c (revision 326264) +++ head/sys/cam/ata/ata_pmp.c (revision 326265) @@ -1,861 +1,863 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #ifdef _KERNEL typedef enum { PMP_STATE_NORMAL, PMP_STATE_PORTS, PMP_STATE_PM_QUIRKS_1, PMP_STATE_PM_QUIRKS_2, PMP_STATE_PM_QUIRKS_3, PMP_STATE_PRECONFIG, PMP_STATE_RESET, PMP_STATE_CONNECT, PMP_STATE_CHECK, PMP_STATE_CLEAR, PMP_STATE_CONFIG, PMP_STATE_SCAN } pmp_state; typedef enum { PMP_FLAG_SCTX_INIT = 0x200 } pmp_flags; typedef enum { PMP_CCB_PROBE = 0x01, } pmp_ccb_state; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct pmp_softc { SLIST_ENTRY(pmp_softc) links; pmp_state state; pmp_flags flags; uint32_t pm_pid; uint32_t pm_prv; int pm_ports; int pm_step; int pm_try; int found; int reset; int frozen; int restart; int events; #define PMP_EV_RESET 1 #define PMP_EV_RESCAN 2 u_int caps; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; static periph_init_t pmpinit; static void pmpasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void pmpsysctlinit(void *context, int pending); static periph_ctor_t pmpregister; static periph_dtor_t pmpcleanup; static periph_start_t pmpstart; static periph_oninv_t pmponinvalidate; static void pmpdone(struct cam_periph *periph, union ccb *done_ccb); #ifndef PMP_DEFAULT_TIMEOUT #define PMP_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ #endif #ifndef PMP_DEFAULT_RETRY #define PMP_DEFAULT_RETRY 1 #endif #ifndef PMP_DEFAULT_HIDE_SPECIAL #define PMP_DEFAULT_HIDE_SPECIAL 1 #endif static int pmp_retry_count = PMP_DEFAULT_RETRY; static int pmp_default_timeout = PMP_DEFAULT_TIMEOUT; static int pmp_hide_special = PMP_DEFAULT_HIDE_SPECIAL; static SYSCTL_NODE(_kern_cam, OID_AUTO, pmp, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_pmp, OID_AUTO, retry_count, CTLFLAG_RWTUN, &pmp_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_pmp, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &pmp_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_pmp, OID_AUTO, hide_special, CTLFLAG_RWTUN, &pmp_hide_special, 0, "Hide extra ports"); static struct periph_driver pmpdriver = { pmpinit, "pmp", TAILQ_HEAD_INITIALIZER(pmpdriver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(pmp, pmpdriver); static void pmpinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, pmpasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("pmp: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void pmpfreeze(struct cam_periph *periph, int mask) { struct pmp_softc *softc = (struct pmp_softc *)periph->softc; struct cam_path *dpath; int i; mask &= ~softc->frozen; for (i = 0; i < 15; i++) { if ((mask & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), i, 0) == CAM_REQ_CMP) { softc->frozen |= (1 << i); xpt_acquire_device(dpath->device); cam_freeze_devq(dpath); xpt_free_path(dpath); } } } static void pmprelease(struct cam_periph *periph, int mask) { struct pmp_softc *softc = (struct pmp_softc *)periph->softc; struct cam_path *dpath; int i; mask &= softc->frozen; for (i = 0; i < 15; i++) { if ((mask & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), i, 0) == CAM_REQ_CMP) { softc->frozen &= ~(1 << i); cam_release_devq(dpath, 0, 0, 0, FALSE); xpt_release_device(dpath->device); xpt_free_path(dpath); } } } static void pmponinvalidate(struct cam_periph *periph) { struct cam_path *dpath; int i; /* * De-register any async callbacks. */ xpt_register_async(0, pmpasync, periph, periph->path); for (i = 0; i < 15; i++) { if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), i, 0) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, dpath, NULL); xpt_free_path(dpath); } } pmprelease(periph, -1); } static void pmpcleanup(struct cam_periph *periph) { struct pmp_softc *softc; softc = (struct pmp_softc *)periph->softc; cam_periph_unlock(periph); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & PMP_FLAG_SCTX_INIT) != 0 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { xpt_print(periph->path, "can't remove sysctl context\n"); } free(softc, M_DEVBUF); cam_periph_lock(periph); } static void pmpasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct pmp_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SATAPM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(pmpregister, pmponinvalidate, pmpcleanup, pmpstart, "pmp", CAM_PERIPH_BIO, path, pmpasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("pmpasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_SCSI_AEN: case AC_SENT_BDR: case AC_BUS_RESET: softc = (struct pmp_softc *)periph->softc; cam_periph_async(periph, code, path, arg); if (code == AC_SCSI_AEN) softc->events |= PMP_EV_RESCAN; else softc->events |= PMP_EV_RESET; if (code == AC_SCSI_AEN && softc->state != PMP_STATE_NORMAL) break; xpt_hold_boot(); pmpfreeze(periph, softc->found); if (code == AC_SENT_BDR || code == AC_BUS_RESET) softc->found = 0; /* We have to reset everything. */ if (softc->state == PMP_STATE_NORMAL) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { if (softc->pm_pid == 0x37261095 || softc->pm_pid == 0x38261095) softc->state = PMP_STATE_PM_QUIRKS_1; else softc->state = PMP_STATE_PRECONFIG; xpt_schedule(periph, CAM_PRIORITY_DEV); } else { pmprelease(periph, softc->found); xpt_release_boot(); } } else softc->restart = 1; break; default: cam_periph_async(periph, code, path, arg); break; } } static void pmpsysctlinit(void *context, int pending) { struct cam_periph *periph; struct pmp_softc *softc; char tmpstr[80], tmpstr2[80]; periph = (struct cam_periph *)context; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return; softc = (struct pmp_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM PMP unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= PMP_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_pmp), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("pmpsysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } cam_periph_release(periph); } static cam_status pmpregister(struct cam_periph *periph, void *arg) { struct pmp_softc *softc; struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("pmpregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct pmp_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("pmpregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } periph->softc = softc; softc->pm_pid = ((uint32_t *)&cgd->ident_data)[0]; softc->pm_prv = ((uint32_t *)&cgd->ident_data)[1]; TASK_INIT(&softc->sysctl_task, 0, pmpsysctlinit, periph); xpt_announce_periph(periph, NULL); /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_SCSI_AEN, pmpasync, periph, periph->path); /* * Take an exclusive refcount on the periph while pmpstart is called * to finish the probe. The reference will be dropped in pmpdone at * the end of probe. */ (void)cam_periph_acquire(periph); xpt_hold_boot(); softc->state = PMP_STATE_PORTS; softc->events = PMP_EV_RESCAN; xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static void pmpstart(struct cam_periph *periph, union ccb *start_ccb) { struct ccb_trans_settings cts; struct ccb_ataio *ataio; struct pmp_softc *softc; struct cam_path *dpath; int revision = 0; softc = (struct pmp_softc *)periph->softc; ataio = &start_ccb->ataio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("pmpstart\n")); if (softc->restart) { softc->restart = 0; if (softc->pm_pid == 0x37261095 || softc->pm_pid == 0x38261095) softc->state = min(softc->state, PMP_STATE_PM_QUIRKS_1); else softc->state = min(softc->state, PMP_STATE_PRECONFIG); } /* Fetch user wanted device speed. */ if (softc->state == PMP_STATE_RESET || softc->state == PMP_STATE_CONNECT) { if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), softc->pm_step, 0) == CAM_REQ_CMP) { bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, dpath, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (cts.xport_specific.sata.valid & CTS_SATA_VALID_REVISION) revision = cts.xport_specific.sata.revision; xpt_free_path(dpath); } } switch (softc->state) { case PMP_STATE_PORTS: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_read_cmd(ataio, 2, 15); break; case PMP_STATE_PM_QUIRKS_1: case PMP_STATE_PM_QUIRKS_3: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_read_cmd(ataio, 129, 15); break; case PMP_STATE_PM_QUIRKS_2: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 129, 15, softc->caps & ~0x1); break; case PMP_STATE_PRECONFIG: /* Get/update host SATA capabilities. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) softc->caps = cts.xport_specific.sata.caps; else softc->caps = 0; cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 0x60, 15, 0x0); break; case PMP_STATE_RESET: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 2, softc->pm_step, (revision << 4) | ((softc->found & (1 << softc->pm_step)) ? 0 : 1)); break; case PMP_STATE_CONNECT: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 2, softc->pm_step, (revision << 4)); break; case PMP_STATE_CHECK: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_read_cmd(ataio, 0, softc->pm_step); break; case PMP_STATE_CLEAR: softc->reset = 0; cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 1, softc->pm_step, 0xFFFFFFFF); break; case PMP_STATE_CONFIG: cam_fill_ataio(ataio, pmp_retry_count, pmpdone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, pmp_default_timeout * 1000); ata_pm_write_cmd(ataio, 0x60, 15, 0x07 | ((softc->caps & CTS_SATA_CAPS_H_AN) ? 0x08 : 0)); break; default: break; } xpt_action(start_ccb); } static void pmpdone(struct cam_periph *periph, union ccb *done_ccb) { struct ccb_trans_settings cts; struct pmp_softc *softc; struct ccb_ataio *ataio; struct cam_path *dpath; u_int32_t priority, res; int i; softc = (struct pmp_softc *)periph->softc; ataio = &done_ccb->ataio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("pmpdone\n")); priority = done_ccb->ccb_h.pinfo.priority; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, 0, NULL) == ERESTART) { return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } goto done; } if (softc->restart) { softc->restart = 0; xpt_release_ccb(done_ccb); if (softc->pm_pid == 0x37261095 || softc->pm_pid == 0x38261095) softc->state = min(softc->state, PMP_STATE_PM_QUIRKS_1); else softc->state = min(softc->state, PMP_STATE_PRECONFIG); xpt_schedule(periph, priority); return; } switch (softc->state) { case PMP_STATE_PORTS: softc->pm_ports = (ataio->res.lba_high << 24) + (ataio->res.lba_mid << 16) + (ataio->res.lba_low << 8) + ataio->res.sector_count; if (pmp_hide_special) { /* * This PMP declares 6 ports, while only 5 of them * are real. Port 5 is a SEMB port, probing which * causes timeouts if external SEP is not connected * to PMP over I2C. */ if ((softc->pm_pid == 0x37261095 || softc->pm_pid == 0x38261095) && softc->pm_ports == 6) softc->pm_ports = 5; /* * This PMP declares 7 ports, while only 5 of them * are real. Port 5 is a fake "Config Disk" with * 640 sectors size. Port 6 is a SEMB port. */ if (softc->pm_pid == 0x47261095 && softc->pm_ports == 7) softc->pm_ports = 5; /* * These PMPs have extra configuration port. */ if (softc->pm_pid == 0x57231095 || softc->pm_pid == 0x57331095 || softc->pm_pid == 0x57341095 || softc->pm_pid == 0x57441095) softc->pm_ports--; } printf("%s%d: %d fan-out ports\n", periph->periph_name, periph->unit_number, softc->pm_ports); if (softc->pm_pid == 0x37261095 || softc->pm_pid == 0x38261095) softc->state = PMP_STATE_PM_QUIRKS_1; else softc->state = PMP_STATE_PRECONFIG; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_PM_QUIRKS_1: softc->caps = (ataio->res.lba_high << 24) + (ataio->res.lba_mid << 16) + (ataio->res.lba_low << 8) + ataio->res.sector_count; if (softc->caps & 0x1) softc->state = PMP_STATE_PM_QUIRKS_2; else softc->state = PMP_STATE_PRECONFIG; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_PM_QUIRKS_2: if (bootverbose) softc->state = PMP_STATE_PM_QUIRKS_3; else softc->state = PMP_STATE_PRECONFIG; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_PM_QUIRKS_3: res = (ataio->res.lba_high << 24) + (ataio->res.lba_mid << 16) + (ataio->res.lba_low << 8) + ataio->res.sector_count; printf("%s%d: Disabling SiI3x26 R_OK in GSCR_POLL: %x->%x\n", periph->periph_name, periph->unit_number, softc->caps, res); softc->state = PMP_STATE_PRECONFIG; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_PRECONFIG: softc->pm_step = 0; softc->state = PMP_STATE_RESET; softc->reset |= ~softc->found; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_RESET: softc->pm_step++; if (softc->pm_step >= softc->pm_ports) { softc->pm_step = 0; cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/5, /*getcount_only*/0); softc->state = PMP_STATE_CONNECT; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_CONNECT: softc->pm_step++; if (softc->pm_step >= softc->pm_ports) { softc->pm_step = 0; softc->pm_try = 0; cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/10, /*getcount_only*/0); softc->state = PMP_STATE_CHECK; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_CHECK: res = (ataio->res.lba_high << 24) + (ataio->res.lba_mid << 16) + (ataio->res.lba_low << 8) + ataio->res.sector_count; if (((res & 0xf0f) == 0x103 && (res & 0x0f0) != 0) || (res & 0x600) != 0) { if (bootverbose) { printf("%s%d: port %d status: %08x\n", periph->periph_name, periph->unit_number, softc->pm_step, res); } /* Report device speed if it is online. */ if ((res & 0xf0f) == 0x103 && xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), softc->pm_step, 0) == CAM_REQ_CMP) { bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, dpath, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.revision = (res & 0x0f0) >> 4; cts.xport_specific.sata.valid = CTS_SATA_VALID_REVISION; cts.xport_specific.sata.caps = softc->caps & (CTS_SATA_CAPS_H_PMREQ | CTS_SATA_CAPS_H_DMAAA | CTS_SATA_CAPS_H_AN); cts.xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; xpt_action((union ccb *)&cts); xpt_free_path(dpath); } softc->found |= (1 << softc->pm_step); softc->pm_step++; } else { if (softc->pm_try < 10) { cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/10, /*getcount_only*/0); softc->pm_try++; } else { if (bootverbose) { printf("%s%d: port %d status: %08x\n", periph->periph_name, periph->unit_number, softc->pm_step, res); } softc->found &= ~(1 << softc->pm_step); if (xpt_create_path(&dpath, periph, done_ccb->ccb_h.path_id, softc->pm_step, 0) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, dpath, NULL); xpt_free_path(dpath); } softc->pm_step++; } } if (softc->pm_step >= softc->pm_ports) { if (softc->reset & softc->found) { cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/1000, /*getcount_only*/0); } softc->state = PMP_STATE_CLEAR; softc->pm_step = 0; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_CLEAR: softc->pm_step++; if (softc->pm_step >= softc->pm_ports) { softc->state = PMP_STATE_CONFIG; softc->pm_step = 0; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; case PMP_STATE_CONFIG: for (i = 0; i < softc->pm_ports; i++) { union ccb *ccb; if ((softc->found & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, periph, xpt_path_path_id(periph->path), i, 0) != CAM_REQ_CMP) { printf("pmpdone: xpt_create_path failed\n"); continue; } /* If we did hard reset to this device, inform XPT. */ if ((softc->reset & softc->found & (1 << i)) != 0) xpt_async(AC_SENT_BDR, dpath, NULL); /* If rescan requested, scan this device. */ if (softc->events & PMP_EV_RESCAN) { ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { xpt_free_path(dpath); goto done; } xpt_setup_ccb(&ccb->ccb_h, dpath, CAM_PRIORITY_XPT); xpt_rescan(ccb); } else xpt_free_path(dpath); } break; default: break; } done: xpt_release_ccb(done_ccb); softc->state = PMP_STATE_NORMAL; softc->events = 0; xpt_release_boot(); pmprelease(periph, -1); cam_periph_release_locked(periph); } #endif /* _KERNEL */ Index: head/sys/cam/ata/ata_xpt.c =================================================================== --- head/sys/cam/ata/ata_xpt.c (revision 326264) +++ head/sys/cam/ata/ata_xpt.c (revision 326265) @@ -1,2295 +1,2297 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ #include "opt_cam.h" struct ata_quirk_entry { struct scsi_inquiry_pattern inq_pat; u_int8_t quirks; #define CAM_QUIRK_MAXTAGS 0x01 u_int mintags; u_int maxtags; }; static periph_init_t probe_periph_init; static struct periph_driver probe_driver = { probe_periph_init, "aprobe", TAILQ_HEAD_INITIALIZER(probe_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(aprobe, probe_driver); typedef enum { PROBE_RESET, PROBE_IDENTIFY, PROBE_SPINUP, PROBE_SETMODE, PROBE_SETPM, PROBE_SETAPST, PROBE_SETDMAAA, PROBE_SETAN, PROBE_SET_MULTI, PROBE_INQUIRY, PROBE_FULL_INQUIRY, PROBE_PM_PID, PROBE_PM_PRV, PROBE_IDENTIFY_SES, PROBE_IDENTIFY_SAFTE, PROBE_DONE, PROBE_INVALID } probe_action; static char *probe_action_text[] = { "PROBE_RESET", "PROBE_IDENTIFY", "PROBE_SPINUP", "PROBE_SETMODE", "PROBE_SETPM", "PROBE_SETAPST", "PROBE_SETDMAAA", "PROBE_SETAN", "PROBE_SET_MULTI", "PROBE_INQUIRY", "PROBE_FULL_INQUIRY", "PROBE_PM_PID", "PROBE_PM_PRV", "PROBE_IDENTIFY_SES", "PROBE_IDENTIFY_SAFTE", "PROBE_DONE", "PROBE_INVALID" }; #define PROBE_SET_ACTION(softc, newaction) \ do { \ char **text; \ text = probe_action_text; \ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ ("Probe %s to %s\n", text[(softc)->action], \ text[(newaction)])); \ (softc)->action = (newaction); \ } while(0) typedef enum { PROBE_NO_ANNOUNCE = 0x04 } probe_flags; typedef struct { TAILQ_HEAD(, ccb_hdr) request_ccbs; struct ata_params ident_data; probe_action action; probe_flags flags; uint32_t pm_pid; uint32_t pm_prv; int restart; int spinup; int faults; u_int caps; struct cam_periph *periph; } probe_softc; static struct ata_quirk_entry ata_quirk_table[] = { { /* Default tagged queuing parameters for all devices */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, }; static cam_status proberegister(struct cam_periph *periph, void *arg); static void probeschedule(struct cam_periph *probe_periph); static void probestart(struct cam_periph *periph, union ccb *start_ccb); static void proberequestdefaultnegotiation(struct cam_periph *periph); static void probedone(struct cam_periph *periph, union ccb *done_ccb); static void probecleanup(struct cam_periph *periph); static void ata_find_quirk(struct cam_ed *device); static void ata_scan_bus(struct cam_periph *periph, union ccb *ccb); static void ata_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *ccb); static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); static struct cam_ed * ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static void ata_device_transport(struct cam_path *path); static void ata_get_transfer_settings(struct ccb_trans_settings *cts); static void ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update); static void ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static void ata_action(union ccb *start_ccb); static void ata_announce_periph(struct cam_periph *periph); static void ata_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb); static void ata_proto_announce(struct cam_ed *device); static void ata_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb); static void ata_proto_denounce(struct cam_ed *device); static void ata_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb); static void ata_proto_debug_out(union ccb *ccb); static void semb_proto_announce(struct cam_ed *device); static void semb_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb); static void semb_proto_denounce(struct cam_ed *device); static void semb_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb); static int ata_dma = 1; static int atapi_dma = 1; TUNABLE_INT("hw.ata.ata_dma", &ata_dma); TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma); static struct xpt_xport_ops ata_xport_ops = { .alloc_device = ata_alloc_device, .action = ata_action, .async = ata_dev_async, .announce = ata_announce_periph, .announce_sbuf = ata_announce_periph_sbuf, }; #define ATA_XPT_XPORT(x, X) \ static struct xpt_xport ata_xport_ ## x = { \ .xport = XPORT_ ## X, \ .name = #x, \ .ops = &ata_xport_ops, \ }; \ CAM_XPT_XPORT(ata_xport_ ## x); ATA_XPT_XPORT(ata, ATA); ATA_XPT_XPORT(sata, SATA); #undef ATA_XPORT_XPORT static struct xpt_proto_ops ata_proto_ops_ata = { .announce = ata_proto_announce, .announce_sbuf = ata_proto_announce_sbuf, .denounce = ata_proto_denounce, .denounce_sbuf = ata_proto_denounce_sbuf, .debug_out = ata_proto_debug_out, }; static struct xpt_proto ata_proto_ata = { .proto = PROTO_ATA, .name = "ata", .ops = &ata_proto_ops_ata, }; static struct xpt_proto_ops ata_proto_ops_satapm = { .announce = ata_proto_announce, .announce_sbuf = ata_proto_announce_sbuf, .denounce = ata_proto_denounce, .denounce_sbuf = ata_proto_denounce_sbuf, .debug_out = ata_proto_debug_out, }; static struct xpt_proto ata_proto_satapm = { .proto = PROTO_SATAPM, .name = "satapm", .ops = &ata_proto_ops_satapm, }; static struct xpt_proto_ops ata_proto_ops_semb = { .announce = semb_proto_announce, .announce_sbuf = semb_proto_announce_sbuf, .denounce = semb_proto_denounce, .denounce_sbuf = semb_proto_denounce_sbuf, .debug_out = ata_proto_debug_out, }; static struct xpt_proto ata_proto_semb = { .proto = PROTO_SEMB, .name = "semb", .ops = &ata_proto_ops_semb, }; CAM_XPT_PROTO(ata_proto_ata); CAM_XPT_PROTO(ata_proto_satapm); CAM_XPT_PROTO(ata_proto_semb); static void probe_periph_init() { } static cam_status proberegister(struct cam_periph *periph, void *arg) { union ccb *request_ccb; /* CCB representing the probe request */ cam_status status; probe_softc *softc; request_ccb = (union ccb *)arg; if (request_ccb == NULL) { printf("proberegister: no probe CCB, " "can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT); if (softc == NULL) { printf("proberegister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } TAILQ_INIT(&softc->request_ccbs); TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->flags = 0; periph->softc = softc; softc->periph = periph; softc->action = PROBE_INVALID; status = cam_periph_acquire(periph); if (status != CAM_REQ_CMP) { return (status); } CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); ata_device_transport(periph->path); probeschedule(periph); return(CAM_REQ_CMP); } static void probeschedule(struct cam_periph *periph) { union ccb *ccb; probe_softc *softc; softc = (probe_softc *)periph->softc; ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) || periph->path->device->protocol == PROTO_SATAPM || periph->path->device->protocol == PROTO_SEMB) PROBE_SET_ACTION(softc, PROBE_RESET); else PROBE_SET_ACTION(softc, PROBE_IDENTIFY); if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) softc->flags |= PROBE_NO_ANNOUNCE; else softc->flags &= ~PROBE_NO_ANNOUNCE; xpt_schedule(periph, CAM_PRIORITY_XPT); } static void probestart(struct cam_periph *periph, union ccb *start_ccb) { struct ccb_trans_settings cts; struct ccb_ataio *ataio; struct ccb_scsiio *csio; probe_softc *softc; struct cam_path *path; struct ata_params *ident_buf; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); softc = (probe_softc *)periph->softc; path = start_ccb->ccb_h.path; ataio = &start_ccb->ataio; csio = &start_ccb->csio; ident_buf = &periph->path->device->ident_data; if (softc->restart) { softc->restart = 0; if ((path->device->flags & CAM_DEV_UNCONFIGURED) || path->device->protocol == PROTO_SATAPM || path->device->protocol == PROTO_SEMB) softc->action = PROBE_RESET; else softc->action = PROBE_IDENTIFY; } switch (softc->action) { case PROBE_RESET: cam_fill_ataio(ataio, 0, probedone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, 15 * 1000); ata_reset_cmd(ataio); break; case PROBE_IDENTIFY: cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_IN, 0, /*data_ptr*/(u_int8_t *)&softc->ident_data, /*dxfer_len*/sizeof(softc->ident_data), 30 * 1000); if (periph->path->device->protocol == PROTO_ATA) ata_28bit_cmd(ataio, ATA_ATA_IDENTIFY, 0, 0, 0); else ata_28bit_cmd(ataio, ATA_ATAPI_IDENTIFY, 0, 0, 0); break; case PROBE_SPINUP: if (bootverbose) xpt_print(path, "Spinning up device\n"); cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_NONE | CAM_HIGH_POWER, 0, /*data_ptr*/NULL, /*dxfer_len*/0, 30 * 1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, ATA_SF_PUIS_SPINUP, 0, 0); break; case PROBE_SETMODE: { int mode, wantmode; mode = 0; /* Fetch user modes from SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_ATA) { if (cts.xport_specific.ata.valid & CTS_ATA_VALID_MODE) mode = cts.xport_specific.ata.mode; } else { if (cts.xport_specific.sata.valid & CTS_SATA_VALID_MODE) mode = cts.xport_specific.sata.mode; } if (periph->path->device->protocol == PROTO_ATA) { if (ata_dma == 0 && (mode == 0 || mode > ATA_PIO_MAX)) mode = ATA_PIO_MAX; } else { if (atapi_dma == 0 && (mode == 0 || mode > ATA_PIO_MAX)) mode = ATA_PIO_MAX; } negotiate: /* Honor device capabilities. */ wantmode = mode = ata_max_mode(ident_buf, mode); /* Report modes to SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; if (path->device->transport == XPORT_ATA) { cts.xport_specific.ata.mode = mode; cts.xport_specific.ata.valid = CTS_ATA_VALID_MODE; } else { cts.xport_specific.sata.mode = mode; cts.xport_specific.sata.valid = CTS_SATA_VALID_MODE; } xpt_action((union ccb *)&cts); /* Fetch current modes from SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_ATA) { if (cts.xport_specific.ata.valid & CTS_ATA_VALID_MODE) mode = cts.xport_specific.ata.mode; } else { if (cts.xport_specific.ata.valid & CTS_SATA_VALID_MODE) mode = cts.xport_specific.sata.mode; } /* If SIM disagree - renegotiate. */ if (mode != wantmode) goto negotiate; /* Remember what transport thinks about DMA. */ if (mode < ATA_DMA) path->device->inq_flags &= ~SID_DMA; else path->device->inq_flags |= SID_DMA; xpt_async(AC_GETDEV_CHANGED, path, NULL); cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, 30 * 1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); break; } case PROBE_SETPM: cam_fill_ataio(ataio, 1, probedone, CAM_DIR_NONE, 0, NULL, 0, 30*1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->caps & CTS_SATA_CAPS_H_PMREQ) ? 0x10 : 0x90, 0, 0x03); break; case PROBE_SETAPST: cam_fill_ataio(ataio, 1, probedone, CAM_DIR_NONE, 0, NULL, 0, 30*1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->caps & CTS_SATA_CAPS_H_APST) ? 0x10 : 0x90, 0, 0x07); break; case PROBE_SETDMAAA: cam_fill_ataio(ataio, 1, probedone, CAM_DIR_NONE, 0, NULL, 0, 30*1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->caps & CTS_SATA_CAPS_H_DMAAA) ? 0x10 : 0x90, 0, 0x02); break; case PROBE_SETAN: /* Remember what transport thinks about AEN. */ if (softc->caps & CTS_SATA_CAPS_H_AN) path->device->inq_flags |= SID_AEN; else path->device->inq_flags &= ~SID_AEN; xpt_async(AC_GETDEV_CHANGED, path, NULL); cam_fill_ataio(ataio, 1, probedone, CAM_DIR_NONE, 0, NULL, 0, 30*1000); ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->caps & CTS_SATA_CAPS_H_AN) ? 0x10 : 0x90, 0, 0x05); break; case PROBE_SET_MULTI: { u_int sectors, bytecount; bytecount = 8192; /* SATA maximum */ /* Fetch user bytecount from SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_ATA) { if (cts.xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.ata.bytecount; } else { if (cts.xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.sata.bytecount; } /* Honor device capabilities. */ sectors = max(1, min(ident_buf->sectors_intr & 0xff, bytecount / ata_logical_sector_size(ident_buf))); /* Report bytecount to SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; if (path->device->transport == XPORT_ATA) { cts.xport_specific.ata.bytecount = sectors * ata_logical_sector_size(ident_buf); cts.xport_specific.ata.valid = CTS_ATA_VALID_BYTECOUNT; } else { cts.xport_specific.sata.bytecount = sectors * ata_logical_sector_size(ident_buf); cts.xport_specific.sata.valid = CTS_SATA_VALID_BYTECOUNT; } xpt_action((union ccb *)&cts); /* Fetch current bytecount from SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_ATA) { if (cts.xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.ata.bytecount; } else { if (cts.xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.sata.bytecount; } sectors = bytecount / ata_logical_sector_size(ident_buf); cam_fill_ataio(ataio, 1, probedone, CAM_DIR_NONE, 0, NULL, 0, 30*1000); ata_28bit_cmd(ataio, ATA_SET_MULTI, 0, 0, sectors); break; } case PROBE_INQUIRY: { u_int bytecount; bytecount = 8192; /* SATA maximum */ /* Fetch user bytecount from SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_ATA) { if (cts.xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.ata.bytecount; } else { if (cts.xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) bytecount = cts.xport_specific.sata.bytecount; } /* Honor device capabilities. */ bytecount &= ~1; bytecount = max(2, min(65534, bytecount)); if (ident_buf->satacapabilities != 0x0000 && ident_buf->satacapabilities != 0xffff) { bytecount = min(8192, bytecount); } /* Report bytecount to SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; if (path->device->transport == XPORT_ATA) { cts.xport_specific.ata.bytecount = bytecount; cts.xport_specific.ata.valid = CTS_ATA_VALID_BYTECOUNT; } else { cts.xport_specific.sata.bytecount = bytecount; cts.xport_specific.sata.valid = CTS_SATA_VALID_BYTECOUNT; } xpt_action((union ccb *)&cts); /* FALLTHROUGH */ } case PROBE_FULL_INQUIRY: { u_int inquiry_len; struct scsi_inquiry_data *inq_buf = &periph->path->device->inq_data; if (softc->action == PROBE_INQUIRY) inquiry_len = SHORT_INQUIRY_LENGTH; else inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf); /* * Some parallel SCSI devices fail to send an * ignore wide residue message when dealing with * odd length inquiry requests. Round up to be * safe. */ inquiry_len = roundup2(inquiry_len, 2); scsi_inquiry(csio, /*retries*/1, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)inq_buf, inquiry_len, /*evpd*/FALSE, /*page_code*/0, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } case PROBE_PM_PID: cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, 10 * 1000); ata_pm_read_cmd(ataio, 0, 15); break; case PROBE_PM_PRV: cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_NONE, 0, /*data_ptr*/NULL, /*dxfer_len*/0, 10 * 1000); ata_pm_read_cmd(ataio, 1, 15); break; case PROBE_IDENTIFY_SES: cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_IN, 0, /*data_ptr*/(u_int8_t *)&softc->ident_data, /*dxfer_len*/sizeof(softc->ident_data), 30 * 1000); ata_28bit_cmd(ataio, ATA_SEP_ATTN, 0xEC, 0x02, sizeof(softc->ident_data) / 4); break; case PROBE_IDENTIFY_SAFTE: cam_fill_ataio(ataio, 1, probedone, /*flags*/CAM_DIR_IN, 0, /*data_ptr*/(u_int8_t *)&softc->ident_data, /*dxfer_len*/sizeof(softc->ident_data), 30 * 1000); ata_28bit_cmd(ataio, ATA_SEP_ATTN, 0xEC, 0x00, sizeof(softc->ident_data) / 4); break; default: panic("probestart: invalid action state 0x%x\n", softc->action); } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; xpt_action(start_ccb); } static void proberequestdefaultnegotiation(struct cam_periph *periph) { struct ccb_trans_settings cts; xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; cts.xport_specific.valid = 0; cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); } static void probedone(struct cam_periph *periph, union ccb *done_ccb) { struct ccb_trans_settings cts; struct ata_params *ident_buf; struct scsi_inquiry_data *inq_buf; probe_softc *softc; struct cam_path *path; cam_status status; u_int32_t priority; u_int caps; int changed = 1, found = 1; static const uint8_t fake_device_id_hdr[8] = {0, SVPD_DEVICE_ID, 0, 12, SVPD_ID_CODESET_BINARY, SVPD_ID_TYPE_NAA, 0, 8}; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); softc = (probe_softc *)periph->softc; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; ident_buf = &path->device->ident_data; inq_buf = &path->device->inq_data; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0, NULL) == ERESTART) { out: /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); } status = done_ccb->ccb_h.status & CAM_STATUS_MASK; if (softc->restart) { softc->faults++; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) softc->faults += 4; if (softc->faults < 10) goto done; else softc->restart = 0; /* Old PIO2 devices may not support mode setting. */ } else if (softc->action == PROBE_SETMODE && status == CAM_ATA_STATUS_ERROR && ata_max_pmode(ident_buf) <= ATA_PIO2 && (ident_buf->capabilities1 & ATA_SUPPORT_IORDY) == 0) { goto noerror; /* * Some old WD SATA disks report supported and enabled * device-initiated interface power management, but return * ABORT on attempt to disable it. */ } else if (softc->action == PROBE_SETPM && status == CAM_ATA_STATUS_ERROR) { goto noerror; /* * Some old WD SATA disks have broken SPINUP handling. * If we really fail to spin up the disk, then there will be * some media access errors later on, but at least we will * have a device to interact with for recovery attempts. */ } else if (softc->action == PROBE_SPINUP && status == CAM_ATA_STATUS_ERROR) { goto noerror; /* * Some HP SATA disks report supported DMA Auto-Activation, * but return ABORT on attempt to enable it. */ } else if (softc->action == PROBE_SETDMAAA && status == CAM_ATA_STATUS_ERROR) { goto noerror; /* * SES and SAF-TE SEPs have different IDENTIFY commands, * but SATA specification doesn't tell how to identify them. * Until better way found, just try another if first fail. */ } else if (softc->action == PROBE_IDENTIFY_SES && status == CAM_ATA_STATUS_ERROR) { PROBE_SET_ACTION(softc, PROBE_IDENTIFY_SAFTE); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } /* * If we get to this point, we got an error status back * from the inquiry and the error status doesn't require * automatically retrying the command. Therefore, the * inquiry failed. If we had inquiry information before * for this device, but this latest inquiry command failed, * the device has probably gone away. If this device isn't * already marked unconfigured, notify the peripheral * drivers that this device is no more. */ device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) xpt_async(AC_LOST_DEVICE, path, NULL); PROBE_SET_ACTION(softc, PROBE_INVALID); found = 0; goto done; } noerror: if (softc->restart) goto done; switch (softc->action) { case PROBE_RESET: { int sign = (done_ccb->ataio.res.lba_high << 8) + done_ccb->ataio.res.lba_mid; CAM_DEBUG(path, CAM_DEBUG_PROBE, ("SIGNATURE: %04x\n", sign)); if (sign == 0x0000 && done_ccb->ccb_h.target_id != 15) { path->device->protocol = PROTO_ATA; PROBE_SET_ACTION(softc, PROBE_IDENTIFY); } else if (sign == 0x9669 && done_ccb->ccb_h.target_id == 15) { /* Report SIM that PM is present. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.pm_present = 1; cts.xport_specific.sata.valid = CTS_SATA_VALID_PM; xpt_action((union ccb *)&cts); path->device->protocol = PROTO_SATAPM; PROBE_SET_ACTION(softc, PROBE_PM_PID); } else if (sign == 0xc33c && done_ccb->ccb_h.target_id != 15) { path->device->protocol = PROTO_SEMB; PROBE_SET_ACTION(softc, PROBE_IDENTIFY_SES); } else if (sign == 0xeb14 && done_ccb->ccb_h.target_id != 15) { path->device->protocol = PROTO_SCSI; PROBE_SET_ACTION(softc, PROBE_IDENTIFY); } else { if (done_ccb->ccb_h.target_id != 15) { xpt_print(path, "Unexpected signature 0x%04x\n", sign); } goto device_fail; } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } case PROBE_IDENTIFY: { struct ccb_pathinq cpi; int16_t *ptr; int veto = 0; ident_buf = &softc->ident_data; for (ptr = (int16_t *)ident_buf; ptr < (int16_t *)ident_buf + sizeof(struct ata_params)/2; ptr++) { *ptr = le16toh(*ptr); } /* * Allow others to veto this ATA disk attachment. This * is mainly used by VMs, whose disk controllers may * share the disks with the simulated ATA controllers. */ EVENTHANDLER_INVOKE(ada_probe_veto, path, ident_buf, &veto); if (veto) { goto device_fail; } if (strncmp(ident_buf->model, "FX", 2) && strncmp(ident_buf->model, "NEC", 3) && strncmp(ident_buf->model, "Pioneer", 7) && strncmp(ident_buf->model, "SHARP", 5)) { ata_bswap(ident_buf->model, sizeof(ident_buf->model)); ata_bswap(ident_buf->revision, sizeof(ident_buf->revision)); ata_bswap(ident_buf->serial, sizeof(ident_buf->serial)); } ata_btrim(ident_buf->model, sizeof(ident_buf->model)); ata_bpack(ident_buf->model, ident_buf->model, sizeof(ident_buf->model)); ata_btrim(ident_buf->revision, sizeof(ident_buf->revision)); ata_bpack(ident_buf->revision, ident_buf->revision, sizeof(ident_buf->revision)); ata_btrim(ident_buf->serial, sizeof(ident_buf->serial)); ata_bpack(ident_buf->serial, ident_buf->serial, sizeof(ident_buf->serial)); /* Device may need spin-up before IDENTIFY become valid. */ if ((ident_buf->specconf == 0x37c8 || ident_buf->specconf == 0x738c) && ((ident_buf->config & ATA_RESP_INCOMPLETE) || softc->spinup == 0)) { PROBE_SET_ACTION(softc, PROBE_SPINUP); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } ident_buf = &path->device->ident_data; if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { /* Check that it is the same device. */ if (bcmp(softc->ident_data.model, ident_buf->model, sizeof(ident_buf->model)) || bcmp(softc->ident_data.revision, ident_buf->revision, sizeof(ident_buf->revision)) || bcmp(softc->ident_data.serial, ident_buf->serial, sizeof(ident_buf->serial))) { /* Device changed. */ xpt_async(AC_LOST_DEVICE, path, NULL); } else { bcopy(&softc->ident_data, ident_buf, sizeof(struct ata_params)); changed = 0; } } if (changed) { bcopy(&softc->ident_data, ident_buf, sizeof(struct ata_params)); /* Clean up from previous instance of this device */ if (path->device->serial_num != NULL) { free(path->device->serial_num, M_CAMXPT); path->device->serial_num = NULL; path->device->serial_num_len = 0; } if (path->device->device_id != NULL) { free(path->device->device_id, M_CAMXPT); path->device->device_id = NULL; path->device->device_id_len = 0; } path->device->serial_num = (u_int8_t *)malloc((sizeof(ident_buf->serial) + 1), M_CAMXPT, M_NOWAIT); if (path->device->serial_num != NULL) { bcopy(ident_buf->serial, path->device->serial_num, sizeof(ident_buf->serial)); path->device->serial_num[sizeof(ident_buf->serial)] = '\0'; path->device->serial_num_len = strlen(path->device->serial_num); } if (ident_buf->enabled.extension & ATA_SUPPORT_64BITWWN) { path->device->device_id = malloc(16, M_CAMXPT, M_NOWAIT); if (path->device->device_id != NULL) { path->device->device_id_len = 16; bcopy(&fake_device_id_hdr, path->device->device_id, 8); bcopy(ident_buf->wwn, path->device->device_id + 8, 8); ata_bswap(path->device->device_id + 8, 8); } } path->device->flags |= CAM_DEV_IDENTIFY_DATA_VALID; xpt_async(AC_GETDEV_CHANGED, path, NULL); } if (ident_buf->satacapabilities & ATA_SUPPORT_NCQ) { path->device->mintags = 2; path->device->maxtags = ATA_QUEUE_LEN(ident_buf->queue) + 1; } ata_find_quirk(path->device); if (path->device->mintags != 0 && path->bus->sim->max_tagged_dev_openings != 0) { /* Check if the SIM does not want queued commands. */ bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_inquiry & PI_TAG_ABLE)) { /* Report SIM which tags are allowed. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.tags = path->device->maxtags; cts.xport_specific.sata.valid = CTS_SATA_VALID_TAGS; xpt_action((union ccb *)&cts); } } ata_device_transport(path); if (changed) proberequestdefaultnegotiation(periph); PROBE_SET_ACTION(softc, PROBE_SETMODE); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } case PROBE_SPINUP: if (bootverbose) xpt_print(path, "Spin-up done\n"); softc->spinup = 1; PROBE_SET_ACTION(softc, PROBE_IDENTIFY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; case PROBE_SETMODE: /* Set supported bits. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_SATA && cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) caps = cts.xport_specific.sata.caps & CTS_SATA_CAPS_H; else if (path->device->transport == XPORT_ATA && cts.xport_specific.ata.valid & CTS_ATA_VALID_CAPS) caps = cts.xport_specific.ata.caps & CTS_ATA_CAPS_H; else caps = 0; if (path->device->transport == XPORT_SATA && ident_buf->satacapabilities != 0xffff) { if (ident_buf->satacapabilities & ATA_SUPPORT_IFPWRMNGTRCV) caps |= CTS_SATA_CAPS_D_PMREQ; if (ident_buf->satacapabilities & ATA_SUPPORT_HAPST) caps |= CTS_SATA_CAPS_D_APST; } /* Mask unwanted bits. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (path->device->transport == XPORT_SATA && cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) caps &= cts.xport_specific.sata.caps; else if (path->device->transport == XPORT_ATA && cts.xport_specific.ata.valid & CTS_ATA_VALID_CAPS) caps &= cts.xport_specific.ata.caps; else caps = 0; /* * Remember what transport thinks about 48-bit DMA. If * capability information is not provided or transport is * SATA, we take support for granted. */ if (!(path->device->inq_flags & SID_DMA) || (path->device->transport == XPORT_ATA && (cts.xport_specific.ata.valid & CTS_ATA_VALID_CAPS) && !(caps & CTS_ATA_CAPS_H_DMA48))) path->device->inq_flags &= ~SID_DMA48; else path->device->inq_flags |= SID_DMA48; /* Store result to SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; if (path->device->transport == XPORT_SATA) { cts.xport_specific.sata.caps = caps; cts.xport_specific.sata.valid = CTS_SATA_VALID_CAPS; } else { cts.xport_specific.ata.caps = caps; cts.xport_specific.ata.valid = CTS_ATA_VALID_CAPS; } xpt_action((union ccb *)&cts); softc->caps = caps; if (path->device->transport != XPORT_SATA) goto notsata; if ((ident_buf->satasupport & ATA_SUPPORT_IFPWRMNGT) && (!(softc->caps & CTS_SATA_CAPS_H_PMREQ)) != (!(ident_buf->sataenabled & ATA_SUPPORT_IFPWRMNGT))) { PROBE_SET_ACTION(softc, PROBE_SETPM); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } /* FALLTHROUGH */ case PROBE_SETPM: if (ident_buf->satacapabilities != 0xffff && (ident_buf->satacapabilities & ATA_SUPPORT_DAPST) && (!(softc->caps & CTS_SATA_CAPS_H_APST)) != (!(ident_buf->sataenabled & ATA_ENABLED_DAPST))) { PROBE_SET_ACTION(softc, PROBE_SETAPST); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } /* FALLTHROUGH */ case PROBE_SETAPST: if ((ident_buf->satasupport & ATA_SUPPORT_AUTOACTIVATE) && (!(softc->caps & CTS_SATA_CAPS_H_DMAAA)) != (!(ident_buf->sataenabled & ATA_SUPPORT_AUTOACTIVATE))) { PROBE_SET_ACTION(softc, PROBE_SETDMAAA); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } /* FALLTHROUGH */ case PROBE_SETDMAAA: if (path->device->protocol != PROTO_ATA && (ident_buf->satasupport & ATA_SUPPORT_ASYNCNOTIF) && (!(softc->caps & CTS_SATA_CAPS_H_AN)) != (!(ident_buf->sataenabled & ATA_SUPPORT_ASYNCNOTIF))) { PROBE_SET_ACTION(softc, PROBE_SETAN); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } /* FALLTHROUGH */ case PROBE_SETAN: notsata: if (path->device->protocol == PROTO_ATA) { PROBE_SET_ACTION(softc, PROBE_SET_MULTI); } else { PROBE_SET_ACTION(softc, PROBE_INQUIRY); } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; case PROBE_SET_MULTI: if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); break; case PROBE_INQUIRY: case PROBE_FULL_INQUIRY: { u_int8_t periph_qual, len; path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; periph_qual = SID_QUAL(inq_buf); if (periph_qual != SID_QUAL_LU_CONNECTED && periph_qual != SID_QUAL_LU_OFFLINE) break; /* * We conservatively request only * SHORT_INQUIRY_LEN bytes of inquiry * information during our first try * at sending an INQUIRY. If the device * has more information to give, * perform a second request specifying * the amount of information the device * is willing to give. */ len = inq_buf->additional_length + offsetof(struct scsi_inquiry_data, additional_length) + 1; if (softc->action == PROBE_INQUIRY && len > SHORT_INQUIRY_LENGTH) { PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } ata_device_transport(path); if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); break; } case PROBE_PM_PID: if ((path->device->flags & CAM_DEV_IDENTIFY_DATA_VALID) == 0) bzero(ident_buf, sizeof(*ident_buf)); softc->pm_pid = (done_ccb->ataio.res.lba_high << 24) + (done_ccb->ataio.res.lba_mid << 16) + (done_ccb->ataio.res.lba_low << 8) + done_ccb->ataio.res.sector_count; ((uint32_t *)ident_buf)[0] = softc->pm_pid; snprintf(ident_buf->model, sizeof(ident_buf->model), "Port Multiplier %08x", softc->pm_pid); PROBE_SET_ACTION(softc, PROBE_PM_PRV); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; case PROBE_PM_PRV: softc->pm_prv = (done_ccb->ataio.res.lba_high << 24) + (done_ccb->ataio.res.lba_mid << 16) + (done_ccb->ataio.res.lba_low << 8) + done_ccb->ataio.res.sector_count; ((uint32_t *)ident_buf)[1] = softc->pm_prv; snprintf(ident_buf->revision, sizeof(ident_buf->revision), "%04x", softc->pm_prv); path->device->flags |= CAM_DEV_IDENTIFY_DATA_VALID; ata_device_transport(path); if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) proberequestdefaultnegotiation(periph); /* Set supported bits. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) caps = cts.xport_specific.sata.caps & CTS_SATA_CAPS_H; else caps = 0; /* All PMPs must support PM requests. */ caps |= CTS_SATA_CAPS_D_PMREQ; /* Mask unwanted bits. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS) caps &= cts.xport_specific.sata.caps; else caps = 0; /* Remember what transport thinks about AEN. */ if ((caps & CTS_SATA_CAPS_H_AN) && path->device->protocol != PROTO_ATA) path->device->inq_flags |= SID_AEN; else path->device->inq_flags &= ~SID_AEN; /* Store result to SIM. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.caps = caps; cts.xport_specific.sata.valid = CTS_SATA_VALID_CAPS; xpt_action((union ccb *)&cts); softc->caps = caps; xpt_async(AC_GETDEV_CHANGED, path, NULL); if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } else { done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_SCSI_AEN, path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); break; case PROBE_IDENTIFY_SES: case PROBE_IDENTIFY_SAFTE: if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { /* Check that it is the same device. */ if (bcmp(&softc->ident_data, ident_buf, 53)) { /* Device changed. */ xpt_async(AC_LOST_DEVICE, path, NULL); } else { bcopy(&softc->ident_data, ident_buf, sizeof(struct ata_params)); changed = 0; } } if (changed) { bcopy(&softc->ident_data, ident_buf, sizeof(struct ata_params)); /* Clean up from previous instance of this device */ if (path->device->device_id != NULL) { free(path->device->device_id, M_CAMXPT); path->device->device_id = NULL; path->device->device_id_len = 0; } path->device->device_id = malloc(16, M_CAMXPT, M_NOWAIT); if (path->device->device_id != NULL) { path->device->device_id_len = 16; bcopy(&fake_device_id_hdr, path->device->device_id, 8); bcopy(((uint8_t*)ident_buf) + 2, path->device->device_id + 8, 8); } path->device->flags |= CAM_DEV_IDENTIFY_DATA_VALID; } ata_device_transport(path); if (changed) proberequestdefaultnegotiation(periph); if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); break; default: panic("probedone: invalid action state 0x%x\n", softc->action); } done: if (softc->restart) { softc->restart = 0; xpt_release_ccb(done_ccb); probeschedule(periph); goto out; } xpt_release_ccb(done_ccb); CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) { TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR; xpt_done(done_ccb); } /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_invalidate(periph); cam_periph_release_locked(periph); } static void probecleanup(struct cam_periph *periph) { free(periph->softc, M_CAMXPT); } static void ata_find_quirk(struct cam_ed *device) { struct ata_quirk_entry *quirk; caddr_t match; match = cam_quirkmatch((caddr_t)&device->ident_data, (caddr_t)ata_quirk_table, nitems(ata_quirk_table), sizeof(*ata_quirk_table), ata_identify_match); if (match == NULL) panic("xpt_find_quirk: device didn't match wildcard entry!!"); quirk = (struct ata_quirk_entry *)match; device->quirk = quirk; if (quirk->quirks & CAM_QUIRK_MAXTAGS) { device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; } } typedef struct { union ccb *request_ccb; struct ccb_pathinq *cpi; int counter; } ata_scan_bus_info; /* * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. * As the scan progresses, xpt_scan_bus is used as the * callback on completion function. */ static void ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb) { struct cam_path *path; ata_scan_bus_info *scan_info; union ccb *work_ccb, *reset_ccb; struct mtx *mtx; cam_status status; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_scan_bus\n")); switch (request_ccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_SCAN_TGT: /* Find out the characteristics of the bus */ work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); return; } xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_PATH_INQ; xpt_action(work_ccb); if (work_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = work_ccb->ccb_h.status; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } /* We may need to reset bus first, if we haven't done it yet. */ if ((work_ccb->cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) && !(work_ccb->cpi.hba_misc & PIM_NOBUSRESET) && !timevalisset(&request_ccb->ccb_h.path->bus->last_reset)) { reset_ccb = xpt_alloc_ccb_nowait(); if (reset_ccb == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } xpt_setup_ccb(&reset_ccb->ccb_h, request_ccb->ccb_h.path, CAM_PRIORITY_NONE); reset_ccb->ccb_h.func_code = XPT_RESET_BUS; xpt_action(reset_ccb); if (reset_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = reset_ccb->ccb_h.status; xpt_free_ccb(reset_ccb); xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } xpt_free_ccb(reset_ccb); } /* Save some state for use while we probe for devices */ scan_info = (ata_scan_bus_info *) malloc(sizeof(ata_scan_bus_info), M_CAMXPT, M_NOWAIT); if (scan_info == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } scan_info->request_ccb = request_ccb; scan_info->cpi = &work_ccb->cpi; /* If PM supported, probe it first. */ if (scan_info->cpi->hba_inquiry & PI_SATAPM) scan_info->counter = scan_info->cpi->max_target; else scan_info->counter = 0; work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); break; } mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); goto scan_next; case XPT_SCAN_LUN: work_ccb = request_ccb; /* Reuse the same CCB to query if a device was really found */ scan_info = (ata_scan_bus_info *)work_ccb->ccb_h.ppriv_ptr0; mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); mtx_lock(mtx); /* If there is PMP... */ if ((scan_info->cpi->hba_inquiry & PI_SATAPM) && (scan_info->counter == scan_info->cpi->max_target)) { if (work_ccb->ccb_h.status == CAM_REQ_CMP) { /* everything else will be probed by it */ /* Free the current request path- we're done with it. */ xpt_free_path(work_ccb->ccb_h.path); goto done; } else { struct ccb_trans_settings cts; /* Report SIM that PM is absent. */ bzero(&cts, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, work_ccb->ccb_h.path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.xport_specific.sata.pm_present = 0; cts.xport_specific.sata.valid = CTS_SATA_VALID_PM; xpt_action((union ccb *)&cts); } } /* Free the current request path- we're done with it. */ xpt_free_path(work_ccb->ccb_h.path); if (scan_info->counter == ((scan_info->cpi->hba_inquiry & PI_SATAPM) ? 0 : scan_info->cpi->max_target)) { done: mtx_unlock(mtx); xpt_free_ccb(work_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(request_ccb); break; } /* Take next device. Wrap from max (PMP) to 0. */ scan_info->counter = (scan_info->counter + 1 ) % (scan_info->cpi->max_target + 1); scan_next: status = xpt_create_path(&path, NULL, scan_info->request_ccb->ccb_h.path_id, scan_info->counter, 0); if (status != CAM_REQ_CMP) { if (request_ccb->ccb_h.func_code == XPT_SCAN_LUN) mtx_unlock(mtx); printf("xpt_scan_bus: xpt_create_path failed" " with status %#x, bus scan halted\n", status); xpt_free_ccb(work_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = status; xpt_done(request_ccb); break; } xpt_setup_ccb(&work_ccb->ccb_h, path, scan_info->request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_SCAN_LUN; work_ccb->ccb_h.cbfcnp = ata_scan_bus; work_ccb->ccb_h.flags |= CAM_UNLOCKED; work_ccb->ccb_h.ppriv_ptr0 = scan_info; work_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; mtx_unlock(mtx); if (request_ccb->ccb_h.func_code == XPT_SCAN_LUN) mtx = NULL; xpt_action(work_ccb); if (mtx != NULL) mtx_lock(mtx); break; default: break; } } static void ata_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *request_ccb) { struct ccb_pathinq cpi; cam_status status; struct cam_path *new_path; struct cam_periph *old_periph; int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_scan_lun\n")); xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status != CAM_REQ_CMP) { if (request_ccb != NULL) { request_ccb->ccb_h.status = cpi.ccb_h.status; xpt_done(request_ccb); } return; } if (request_ccb == NULL) { request_ccb = xpt_alloc_ccb_nowait(); if (request_ccb == NULL) { xpt_print(path, "xpt_scan_lun: can't allocate CCB, " "can't continue\n"); return; } status = xpt_create_path(&new_path, NULL, path->bus->path_id, path->target->target_id, path->device->lun_id); if (status != CAM_REQ_CMP) { xpt_print(path, "xpt_scan_lun: can't create path, " "can't continue\n"); xpt_free_ccb(request_ccb); return; } xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT); request_ccb->ccb_h.cbfcnp = xptscandone; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->crcn.flags = flags; } lock = (xpt_path_owned(path) == 0); if (lock) xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "aprobe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { probe_softc *softc; softc = (probe_softc *)old_periph->softc; TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->restart = 1; } else { request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(request_ccb); } } else { status = cam_periph_alloc(proberegister, NULL, probecleanup, probestart, "aprobe", CAM_PERIPH_BIO, request_ccb->ccb_h.path, NULL, 0, request_ccb); if (status != CAM_REQ_CMP) { xpt_print(path, "xpt_scan_lun: cam_alloc_periph " "returned an error, can't continue probe\n"); request_ccb->ccb_h.status = status; xpt_done(request_ccb); } } if (lock) xpt_path_unlock(path); } static void xptscandone(struct cam_periph *periph, union ccb *done_ccb) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } static struct cam_ed * ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct ata_quirk_entry *quirk; struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); /* * Take the default quirk entry until we have inquiry * data and can determine a better quirk to use. */ quirk = &ata_quirk_table[nitems(ata_quirk_table) - 1]; device->quirk = (void *)quirk; device->mintags = 0; device->maxtags = 0; bzero(&device->inq_data, sizeof(device->inq_data)); device->inq_flags = 0; device->queue_flags = 0; device->serial_num = NULL; device->serial_num_len = 0; return (device); } static void ata_device_transport(struct cam_path *path) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct scsi_inquiry_data *inq_buf = NULL; struct ata_params *ident_buf = NULL; /* Get transport information from the SIM */ xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); path->device->transport = cpi.transport; if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) inq_buf = &path->device->inq_data; if ((path->device->flags & CAM_DEV_IDENTIFY_DATA_VALID) != 0) ident_buf = &path->device->ident_data; if (path->device->protocol == PROTO_ATA) { path->device->protocol_version = ident_buf ? ata_version(ident_buf->version_major) : cpi.protocol_version; } else if (path->device->protocol == PROTO_SCSI) { path->device->protocol_version = inq_buf ? SID_ANSI_REV(inq_buf) : cpi.protocol_version; } path->device->transport_version = ident_buf ? ata_version(ident_buf->version_major) : cpi.transport_version; /* Tell the controller what we think */ xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.transport = path->device->transport; cts.transport_version = path->device->transport_version; cts.protocol = path->device->protocol; cts.protocol_version = path->device->protocol_version; cts.proto_specific.valid = 0; if (ident_buf) { if (path->device->transport == XPORT_ATA) { cts.xport_specific.ata.atapi = (ident_buf->config == ATA_PROTO_CFA) ? 0 : ((ident_buf->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_16) ? 16 : ((ident_buf->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12) ? 12 : 0; cts.xport_specific.ata.valid = CTS_ATA_VALID_ATAPI; } else { cts.xport_specific.sata.atapi = (ident_buf->config == ATA_PROTO_CFA) ? 0 : ((ident_buf->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_16) ? 16 : ((ident_buf->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12) ? 12 : 0; cts.xport_specific.sata.valid = CTS_SATA_VALID_ATAPI; } } else cts.xport_specific.valid = 0; xpt_action((union ccb *)&cts); } static void ata_dev_advinfo(union ccb *start_ccb) { struct cam_ed *device; struct ccb_dev_advinfo *cdai; off_t amt; start_ccb->ccb_h.status = CAM_REQ_INVALID; device = start_ccb->ccb_h.path->device; cdai = &start_ccb->cdai; switch(cdai->buftype) { case CDAI_TYPE_SCSI_DEVID: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->device_id_len; if (device->device_id_len == 0) break; amt = device->device_id_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->device_id, amt); break; case CDAI_TYPE_SERIAL_NUM: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->serial_num_len; if (device->serial_num_len == 0) break; amt = device->serial_num_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->serial_num, amt); break; case CDAI_TYPE_PHYS_PATH: if (cdai->flags & CDAI_FLAG_STORE) { if (device->physpath != NULL) free(device->physpath, M_CAMXPT); device->physpath_len = cdai->bufsiz; /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->physpath == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } memcpy(device->physpath, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->physpath_len; if (device->physpath_len == 0) break; amt = device->physpath_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->physpath, amt); } break; default: return; } start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); } } static void ata_action(union ccb *start_ccb) { switch (start_ccb->ccb_h.func_code) { case XPT_SET_TRAN_SETTINGS: { ata_set_transfer_settings(&start_ccb->cts, start_ccb->ccb_h.path, /*async_update*/FALSE); break; } case XPT_SCAN_BUS: case XPT_SCAN_TGT: ata_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); break; case XPT_SCAN_LUN: ata_scan_lun(start_ccb->ccb_h.path->periph, start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; case XPT_GET_TRAN_SETTINGS: { ata_get_transfer_settings(&start_ccb->cts); break; } case XPT_SCSI_IO: { struct cam_ed *device; u_int maxlen = 0; device = start_ccb->ccb_h.path->device; if (device->protocol == PROTO_SCSI && (device->flags & CAM_DEV_IDENTIFY_DATA_VALID)) { uint16_t p = device->ident_data.config & ATA_PROTO_MASK; maxlen = (device->ident_data.config == ATA_PROTO_CFA) ? 0 : (p == ATA_PROTO_ATAPI_16) ? 16 : (p == ATA_PROTO_ATAPI_12) ? 12 : 0; } if (start_ccb->csio.cdb_len > maxlen) { start_ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(start_ccb); break; } xpt_action_default(start_ccb); break; } case XPT_DEV_ADVINFO: { ata_dev_advinfo(start_ccb); break; } default: xpt_action_default(start_ccb); break; } } static void ata_get_transfer_settings(struct ccb_trans_settings *cts) { struct ccb_trans_settings_ata *ata; struct ccb_trans_settings_scsi *scsi; struct cam_ed *device; device = cts->ccb_h.path->device; xpt_action_default((union ccb *)cts); if (cts->protocol == PROTO_UNKNOWN || cts->protocol == PROTO_UNSPECIFIED) { cts->protocol = device->protocol; cts->protocol_version = device->protocol_version; } if (cts->protocol == PROTO_ATA) { ata = &cts->proto_specific.ata; if ((ata->valid & CTS_ATA_VALID_TQ) == 0) { ata->valid |= CTS_ATA_VALID_TQ; if (cts->type == CTS_TYPE_USER_SETTINGS || (device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (device->inq_flags & SID_CmdQue) != 0) ata->flags |= CTS_ATA_FLAGS_TAG_ENB; } } if (cts->protocol == PROTO_SCSI) { scsi = &cts->proto_specific.scsi; if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) { scsi->valid |= CTS_SCSI_VALID_TQ; if (cts->type == CTS_TYPE_USER_SETTINGS || (device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (device->inq_flags & SID_CmdQue) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } } if (cts->transport == XPORT_UNKNOWN || cts->transport == XPORT_UNSPECIFIED) { cts->transport = device->transport; cts->transport_version = device->transport_version; } } static void ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update) { struct ccb_pathinq cpi; struct ccb_trans_settings_ata *ata; struct ccb_trans_settings_scsi *scsi; struct ata_params *ident_data; struct scsi_inquiry_data *inq_data; struct cam_ed *device; if (path == NULL || (device = path->device) == NULL) { cts->ccb_h.status = CAM_PATH_INVALID; xpt_done((union ccb *)cts); return; } if (cts->protocol == PROTO_UNKNOWN || cts->protocol == PROTO_UNSPECIFIED) { cts->protocol = device->protocol; cts->protocol_version = device->protocol_version; } if (cts->protocol_version == PROTO_VERSION_UNKNOWN || cts->protocol_version == PROTO_VERSION_UNSPECIFIED) cts->protocol_version = device->protocol_version; if (cts->protocol != device->protocol) { xpt_print(path, "Uninitialized Protocol %x:%x?\n", cts->protocol, device->protocol); cts->protocol = device->protocol; } if (cts->protocol_version > device->protocol_version) { if (bootverbose) { xpt_print(path, "Down reving Protocol " "Version from %d to %d?\n", cts->protocol_version, device->protocol_version); } cts->protocol_version = device->protocol_version; } if (cts->transport == XPORT_UNKNOWN || cts->transport == XPORT_UNSPECIFIED) { cts->transport = device->transport; cts->transport_version = device->transport_version; } if (cts->transport_version == XPORT_VERSION_UNKNOWN || cts->transport_version == XPORT_VERSION_UNSPECIFIED) cts->transport_version = device->transport_version; if (cts->transport != device->transport) { xpt_print(path, "Uninitialized Transport %x:%x?\n", cts->transport, device->transport); cts->transport = device->transport; } if (cts->transport_version > device->transport_version) { if (bootverbose) { xpt_print(path, "Down reving Transport " "Version from %d to %d?\n", cts->transport_version, device->transport_version); } cts->transport_version = device->transport_version; } ident_data = &device->ident_data; inq_data = &device->inq_data; if (cts->protocol == PROTO_ATA) ata = &cts->proto_specific.ata; else ata = NULL; if (cts->protocol == PROTO_SCSI) scsi = &cts->proto_specific.scsi; else scsi = NULL; xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* Sanity checking */ if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 || (ata && (ident_data->satacapabilities & ATA_SUPPORT_NCQ) == 0) || (scsi && (INQ_DATA_TQ_ENABLED(inq_data)) == 0) || (device->queue_flags & SCP_QUEUE_DQUE) != 0 || (device->mintags == 0)) { /* * Can't tag on hardware that doesn't support tags, * doesn't have it enabled, or has broken tag support. */ if (ata) ata->flags &= ~CTS_ATA_FLAGS_TAG_ENB; if (scsi) scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } /* Start/stop tags use. */ if (cts->type == CTS_TYPE_CURRENT_SETTINGS && ((ata && (ata->valid & CTS_ATA_VALID_TQ) != 0) || (scsi && (scsi->valid & CTS_SCSI_VALID_TQ) != 0))) { int nowt, newt = 0; nowt = ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (device->inq_flags & SID_CmdQue) != 0); if (ata) newt = (ata->flags & CTS_ATA_FLAGS_TAG_ENB) != 0; if (scsi) newt = (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0; if (newt && !nowt) { /* * Delay change to use tags until after a * few commands have gone to this device so * the controller has time to perform transfer * negotiations without tagged messages getting * in the way. */ device->tag_delay_count = CAM_TAG_DELAY_COUNT; device->flags |= CAM_DEV_TAG_AFTER_COUNT; } else if (nowt && !newt) xpt_stop_tags(path); } if (async_update == FALSE) xpt_action_default((union ccb *)cts); } /* * Handle any per-device event notifications that require action by the XPT. */ static void ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { cam_status status; struct cam_path newpath; /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; /* * We need our own path with wildcards expanded to * handle certain types of events. */ if ((async_code == AC_SENT_BDR) || (async_code == AC_BUS_RESET) || (async_code == AC_INQ_CHANGED)) status = xpt_compile_path(&newpath, NULL, bus->path_id, target->target_id, device->lun_id); else status = CAM_REQ_CMP_ERR; if (status == CAM_REQ_CMP) { if (async_code == AC_INQ_CHANGED) { /* * We've sent a start unit command, or * something similar to a device that * may have caused its inquiry data to * change. So we re-scan the device to * refresh the inquiry data for it. */ ata_scan_lun(newpath.periph, &newpath, CAM_EXPECT_INQ_CHANGE, NULL); } else { /* We need to reinitialize device after reset. */ ata_scan_lun(newpath.periph, &newpath, 0, NULL); } xpt_release_path(&newpath); } else if (async_code == AC_LOST_DEVICE && (device->flags & CAM_DEV_UNCONFIGURED) == 0) { device->flags |= CAM_DEV_UNCONFIGURED; xpt_release_device(device); } else if (async_code == AC_TRANSFER_NEG) { struct ccb_trans_settings *settings; struct cam_path path; settings = (struct ccb_trans_settings *)async_arg; xpt_compile_path(&path, NULL, bus->path_id, target->target_id, device->lun_id); ata_set_transfer_settings(settings, &path, /*async_update*/TRUE); xpt_release_path(&path); } } static void _ata_announce_periph(struct cam_periph *periph, struct ccb_trans_settings *cts, u_int *speed) { struct ccb_pathinq cpi; struct cam_path *path = periph->path; cam_periph_assert(periph, MA_OWNED); xpt_setup_ccb(&cts->ccb_h, path, CAM_PRIORITY_NORMAL); cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts->type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)cts); if ((cts->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; /* Ask the SIM for its base transfer speed */ xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* Report connection speed */ *speed = cpi.base_transfer_speed; if (cts->transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &cts->xport_specific.ata; if (pata->valid & CTS_ATA_VALID_MODE) *speed = ata_mode2speed(pata->mode); } if (cts->transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts->xport_specific.sata; if (sata->valid & CTS_SATA_VALID_REVISION) *speed = ata_revision2speed(sata->revision); } } static void ata_announce_periph(struct cam_periph *periph) { struct ccb_trans_settings cts; u_int speed, mb; _ata_announce_periph(periph, &cts, &speed); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) printf("%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else printf("%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about connection */ if (cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &cts.xport_specific.ata; printf(" ("); if (pata->valid & CTS_ATA_VALID_MODE) printf("%s, ", ata_mode2string(pata->mode)); if ((pata->valid & CTS_ATA_VALID_ATAPI) && pata->atapi != 0) printf("ATAPI %dbytes, ", pata->atapi); if (pata->valid & CTS_ATA_VALID_BYTECOUNT) printf("PIO %dbytes", pata->bytecount); printf(")"); } if (cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts.xport_specific.sata; printf(" ("); if (sata->valid & CTS_SATA_VALID_REVISION) printf("SATA %d.x, ", sata->revision); else printf("SATA, "); if (sata->valid & CTS_SATA_VALID_MODE) printf("%s, ", ata_mode2string(sata->mode)); if ((sata->valid & CTS_ATA_VALID_ATAPI) && sata->atapi != 0) printf("ATAPI %dbytes, ", sata->atapi); if (sata->valid & CTS_SATA_VALID_BYTECOUNT) printf("PIO %dbytes", sata->bytecount); printf(")"); } printf("\n"); } static void ata_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) { struct ccb_trans_settings cts; u_int speed, mb; _ata_announce_periph(periph, &cts, &speed); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) sbuf_printf(sb, "%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else sbuf_printf(sb, "%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about connection */ if (cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &cts.xport_specific.ata; sbuf_printf(sb, " ("); if (pata->valid & CTS_ATA_VALID_MODE) sbuf_printf(sb, "%s, ", ata_mode2string(pata->mode)); if ((pata->valid & CTS_ATA_VALID_ATAPI) && pata->atapi != 0) sbuf_printf(sb, "ATAPI %dbytes, ", pata->atapi); if (pata->valid & CTS_ATA_VALID_BYTECOUNT) sbuf_printf(sb, "PIO %dbytes", pata->bytecount); sbuf_printf(sb, ")"); } if (cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts.xport_specific.sata; sbuf_printf(sb, " ("); if (sata->valid & CTS_SATA_VALID_REVISION) sbuf_printf(sb, "SATA %d.x, ", sata->revision); else sbuf_printf(sb, "SATA, "); if (sata->valid & CTS_SATA_VALID_MODE) sbuf_printf(sb, "%s, ", ata_mode2string(sata->mode)); if ((sata->valid & CTS_ATA_VALID_ATAPI) && sata->atapi != 0) sbuf_printf(sb, "ATAPI %dbytes, ", sata->atapi); if (sata->valid & CTS_SATA_VALID_BYTECOUNT) sbuf_printf(sb, "PIO %dbytes", sata->bytecount); sbuf_printf(sb, ")"); } sbuf_printf(sb, "\n"); } static void ata_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) { ata_print_ident_sbuf(&device->ident_data, sb); } static void ata_proto_announce(struct cam_ed *device) { ata_print_ident(&device->ident_data); } static void ata_proto_denounce(struct cam_ed *device) { ata_print_ident_short(&device->ident_data); } static void ata_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) { ata_print_ident_short_sbuf(&device->ident_data, sb); } static void semb_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) { semb_print_ident_sbuf((struct sep_identify_data *)&device->ident_data, sb); } static void semb_proto_announce(struct cam_ed *device) { semb_print_ident((struct sep_identify_data *)&device->ident_data); } static void semb_proto_denounce(struct cam_ed *device) { semb_print_ident_short((struct sep_identify_data *)&device->ident_data); } static void semb_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) { semb_print_ident_short_sbuf((struct sep_identify_data *)&device->ident_data, sb); } static void ata_proto_debug_out(union ccb *ccb) { char cdb_str[(sizeof(struct ata_cmd) * 3) + 1]; if (ccb->ccb_h.func_code != XPT_ATA_IO) return; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_CDB,("%s. ACB: %s\n", ata_op_string(&ccb->ataio.cmd), ata_cmd_string(&ccb->ataio.cmd, cdb_str, sizeof(cdb_str)))); } Index: head/sys/cam/cam.c =================================================================== --- head/sys/cam/cam.c (revision 326264) +++ head/sys/cam/cam.c (revision 326265) @@ -1,588 +1,590 @@ /*- * Generic utility routines for the Common Access Method layer. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #else /* _KERNEL */ #include #include #include #include #endif /* _KERNEL */ #include #include #include #include #include #ifdef _KERNEL #include #include #include FEATURE(scbus, "SCSI devices support"); #endif static int camstatusentrycomp(const void *key, const void *member); const struct cam_status_entry cam_status_table[] = { { CAM_REQ_INPROG, "CCB request is in progress" }, { CAM_REQ_CMP, "CCB request completed without error" }, { CAM_REQ_ABORTED, "CCB request aborted by the host" }, { CAM_UA_ABORT, "Unable to abort CCB request" }, { CAM_REQ_CMP_ERR, "CCB request completed with an error" }, { CAM_BUSY, "CAM subsystem is busy" }, { CAM_REQ_INVALID, "CCB request was invalid" }, { CAM_PATH_INVALID, "Supplied Path ID is invalid" }, { CAM_DEV_NOT_THERE, "Device Not Present" }, { CAM_UA_TERMIO, "Unable to terminate I/O CCB request" }, { CAM_SEL_TIMEOUT, "Selection Timeout" }, { CAM_CMD_TIMEOUT, "Command timeout" }, { CAM_SCSI_STATUS_ERROR, "SCSI Status Error" }, { CAM_MSG_REJECT_REC, "Message Reject Reveived" }, { CAM_SCSI_BUS_RESET, "SCSI Bus Reset Sent/Received" }, { CAM_UNCOR_PARITY, "Uncorrectable parity/CRC error" }, { CAM_AUTOSENSE_FAIL, "Auto-Sense Retrieval Failed" }, { CAM_NO_HBA, "No HBA Detected" }, { CAM_DATA_RUN_ERR, "Data Overrun error" }, { CAM_UNEXP_BUSFREE, "Unexpected Bus Free" }, { CAM_SEQUENCE_FAIL, "Target Bus Phase Sequence Failure" }, { CAM_CCB_LEN_ERR, "CCB length supplied is inadequate" }, { CAM_PROVIDE_FAIL, "Unable to provide requested capability" }, { CAM_BDR_SENT, "SCSI BDR Message Sent" }, { CAM_REQ_TERMIO, "CCB request terminated by the host" }, { CAM_UNREC_HBA_ERROR, "Unrecoverable Host Bus Adapter Error" }, { CAM_REQ_TOO_BIG, "The request was too large for this host" }, { CAM_REQUEUE_REQ, "Unconditionally Re-queue Request", }, { CAM_ATA_STATUS_ERROR, "ATA Status Error" }, { CAM_SCSI_IT_NEXUS_LOST,"Initiator/Target Nexus Lost" }, { CAM_SMP_STATUS_ERROR, "SMP Status Error" }, { CAM_IDE, "Initiator Detected Error Message Received" }, { CAM_RESRC_UNAVAIL, "Resource Unavailable" }, { CAM_UNACKED_EVENT, "Unacknowledged Event by Host" }, { CAM_MESSAGE_RECV, "Message Received in Host Target Mode" }, { CAM_INVALID_CDB, "Invalid CDB received in Host Target Mode" }, { CAM_LUN_INVALID, "Invalid Lun" }, { CAM_TID_INVALID, "Invalid Target ID" }, { CAM_FUNC_NOTAVAIL, "Function Not Available" }, { CAM_NO_NEXUS, "Nexus Not Established" }, { CAM_IID_INVALID, "Invalid Initiator ID" }, { CAM_CDB_RECVD, "CDB Received" }, { CAM_LUN_ALRDY_ENA, "LUN Already Enabled for Target Mode" }, { CAM_SCSI_BUSY, "SCSI Bus Busy" }, }; #ifdef _KERNEL SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD, 0, "CAM Subsystem"); #ifndef CAM_DEFAULT_SORT_IO_QUEUES #define CAM_DEFAULT_SORT_IO_QUEUES 1 #endif int cam_sort_io_queues = CAM_DEFAULT_SORT_IO_QUEUES; SYSCTL_INT(_kern_cam, OID_AUTO, sort_io_queues, CTLFLAG_RWTUN, &cam_sort_io_queues, 0, "Sort IO queues to try and optimise disk access patterns"); #endif void cam_strvis(u_int8_t *dst, const u_int8_t *src, int srclen, int dstlen) { /* Trim leading/trailing spaces, nulls. */ while (srclen > 0 && src[0] == ' ') src++, srclen--; while (srclen > 0 && (src[srclen-1] == ' ' || src[srclen-1] == '\0')) srclen--; while (srclen > 0 && dstlen > 1) { u_int8_t *cur_pos = dst; if (*src < 0x20 || *src >= 0x80) { /* SCSI-II Specifies that these should never occur. */ /* non-printable character */ if (dstlen > 4) { *cur_pos++ = '\\'; *cur_pos++ = ((*src & 0300) >> 6) + '0'; *cur_pos++ = ((*src & 0070) >> 3) + '0'; *cur_pos++ = ((*src & 0007) >> 0) + '0'; } else { *cur_pos++ = '?'; } } else { /* normal character */ *cur_pos++ = *src; } src++; srclen--; dstlen -= cur_pos - dst; dst = cur_pos; } *dst = '\0'; } void cam_strvis_sbuf(struct sbuf *sb, const u_int8_t *src, int srclen, uint32_t flags) { /* Trim leading/trailing spaces, nulls. */ while (srclen > 0 && src[0] == ' ') src++, srclen--; while (srclen > 0 && (src[srclen-1] == ' ' || src[srclen-1] == '\0')) srclen--; while (srclen > 0) { if (*src < 0x20 || *src >= 0x80) { /* SCSI-II Specifies that these should never occur. */ /* non-printable character */ switch (flags & CAM_STRVIS_FLAG_NONASCII_MASK) { case CAM_STRVIS_FLAG_NONASCII_ESC: sbuf_printf(sb, "\\%c%c%c", ((*src & 0300) >> 6) + '0', ((*src & 0070) >> 3) + '0', ((*src & 0007) >> 0) + '0'); break; case CAM_STRVIS_FLAG_NONASCII_RAW: /* * If we run into a NUL, just transform it * into a space. */ if (*src != 0x00) sbuf_putc(sb, *src); else sbuf_putc(sb, ' '); break; case CAM_STRVIS_FLAG_NONASCII_SPC: sbuf_putc(sb, ' '); break; case CAM_STRVIS_FLAG_NONASCII_TRIM: default: break; } } else { /* normal character */ sbuf_putc(sb, *src); } src++; srclen--; } } /* * Compare string with pattern, returning 0 on match. * Short pattern matches trailing blanks in name, * Shell globbing rules apply: * matches 0 or more characters, * ? matchces one character, [...] denotes a set to match one char, * [^...] denotes a complimented set to match one character. * Spaces in str used to match anything in the pattern string * but was removed because it's a bug. No current patterns require * it, as far as I know, but it's impossible to know what drives * returned. * * Each '*' generates recursion, so keep the number of * in check. */ int cam_strmatch(const u_int8_t *str, const u_int8_t *pattern, int str_len) { while (*pattern != '\0' && str_len > 0) { if (*pattern == '*') { pattern++; if (*pattern == '\0') return (0); do { if (cam_strmatch(str, pattern, str_len) == 0) return (0); str++; str_len--; } while (str_len > 0); return (1); } else if (*pattern == '[') { int negate_range, ok; uint8_t pc = UCHAR_MAX; uint8_t sc; ok = 0; sc = *str++; str_len--; pattern++; if ((negate_range = (*pattern == '^')) != 0) pattern++; while ((*pattern != ']') && *pattern != '\0') { if (*pattern == '-') { if (pattern[1] == '\0') /* Bad pattern */ return (1); if (sc >= pc && sc <= pattern[1]) ok = 1; pattern++; } else if (*pattern == sc) ok = 1; pc = *pattern; pattern++; } if (ok == negate_range) return (1); pattern++; } else if (*pattern == '?') { /* * NB: || *str == ' ' of the old code is a bug and was * removed. If you add it back, keep this the last if * before the naked else */ pattern++; str++; str_len--; } else { if (*str != *pattern) return (1); pattern++; str++; str_len--; } } /* '*' is allowed to match nothing, so gobble it */ while (*pattern == '*') pattern++; if ( *pattern != '\0') { /* Pattern not fully consumed. Not a match */ return (1); } /* Eat trailing spaces, which get added by SAT */ while (str_len > 0 && *str == ' ') { str++; str_len--; } return (str_len); } caddr_t cam_quirkmatch(caddr_t target, caddr_t quirk_table, int num_entries, int entry_size, cam_quirkmatch_t *comp_func) { for (; num_entries > 0; num_entries--, quirk_table += entry_size) { if ((*comp_func)(target, quirk_table) == 0) return (quirk_table); } return (NULL); } const struct cam_status_entry* cam_fetch_status_entry(cam_status status) { status &= CAM_STATUS_MASK; return (bsearch(&status, &cam_status_table, nitems(cam_status_table), sizeof(*cam_status_table), camstatusentrycomp)); } static int camstatusentrycomp(const void *key, const void *member) { cam_status status; const struct cam_status_entry *table_entry; status = *(const cam_status *)key; table_entry = (const struct cam_status_entry *)member; return (status - table_entry->status_code); } #ifdef _KERNEL char * cam_error_string(union ccb *ccb, char *str, int str_len, cam_error_string_flags flags, cam_error_proto_flags proto_flags) #else /* !_KERNEL */ char * cam_error_string(struct cam_device *device, union ccb *ccb, char *str, int str_len, cam_error_string_flags flags, cam_error_proto_flags proto_flags) #endif /* _KERNEL/!_KERNEL */ { char path_str[64]; struct sbuf sb; if ((ccb == NULL) || (str == NULL) || (str_len <= 0)) return(NULL); if (flags == CAM_ESF_NONE) return(NULL); switch (ccb->ccb_h.func_code) { case XPT_ATA_IO: switch (proto_flags & CAM_EPF_LEVEL_MASK) { case CAM_EPF_NONE: break; case CAM_EPF_ALL: case CAM_EPF_NORMAL: proto_flags |= CAM_EAF_PRINT_RESULT; /* FALLTHROUGH */ case CAM_EPF_MINIMAL: proto_flags |= CAM_EAF_PRINT_STATUS; /* FALLTHROUGH */ default: break; } break; case XPT_SCSI_IO: switch (proto_flags & CAM_EPF_LEVEL_MASK) { case CAM_EPF_NONE: break; case CAM_EPF_ALL: case CAM_EPF_NORMAL: proto_flags |= CAM_ESF_PRINT_SENSE; /* FALLTHROUGH */ case CAM_EPF_MINIMAL: proto_flags |= CAM_ESF_PRINT_STATUS; /* FALLTHROUGH */ default: break; } break; case XPT_SMP_IO: switch (proto_flags & CAM_EPF_LEVEL_MASK) { case CAM_EPF_NONE: break; case CAM_EPF_ALL: proto_flags |= CAM_ESMF_PRINT_FULL_CMD; /* FALLTHROUGH */ case CAM_EPF_NORMAL: case CAM_EPF_MINIMAL: proto_flags |= CAM_ESMF_PRINT_STATUS; /* FALLTHROUGH */ default: break; } break; default: break; } #ifdef _KERNEL xpt_path_string(ccb->csio.ccb_h.path, path_str, sizeof(path_str)); #else /* !_KERNEL */ cam_path_string(device, path_str, sizeof(path_str)); #endif /* _KERNEL/!_KERNEL */ sbuf_new(&sb, str, str_len, 0); if (flags & CAM_ESF_COMMAND) { sbuf_cat(&sb, path_str); switch (ccb->ccb_h.func_code) { case XPT_ATA_IO: ata_command_sbuf(&ccb->ataio, &sb); sbuf_printf(&sb, "\n"); break; case XPT_SCSI_IO: #ifdef _KERNEL scsi_command_string(&ccb->csio, &sb); #else /* !_KERNEL */ scsi_command_string(device, &ccb->csio, &sb); #endif /* _KERNEL/!_KERNEL */ sbuf_printf(&sb, "\n"); break; case XPT_SMP_IO: smp_command_sbuf(&ccb->smpio, &sb, path_str, 79 - strlen(path_str), (proto_flags & CAM_ESMF_PRINT_FULL_CMD) ? 79 : 0); sbuf_printf(&sb, "\n"); break; default: break; } } if (flags & CAM_ESF_CAM_STATUS) { cam_status status; const struct cam_status_entry *entry; sbuf_cat(&sb, path_str); status = ccb->ccb_h.status & CAM_STATUS_MASK; entry = cam_fetch_status_entry(status); if (entry == NULL) sbuf_printf(&sb, "CAM status: Unknown (%#x)\n", ccb->ccb_h.status); else sbuf_printf(&sb, "CAM status: %s\n", entry->status_text); } if (flags & CAM_ESF_PROTO_STATUS) { switch (ccb->ccb_h.func_code) { case XPT_ATA_IO: if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_ATA_STATUS_ERROR) break; if (proto_flags & CAM_EAF_PRINT_STATUS) { sbuf_cat(&sb, path_str); ata_status_sbuf(&ccb->ataio, &sb); sbuf_printf(&sb, "\n"); } if (proto_flags & CAM_EAF_PRINT_RESULT) { sbuf_cat(&sb, path_str); sbuf_printf(&sb, "RES: "); ata_res_sbuf(&ccb->ataio.res, &sb); sbuf_printf(&sb, "\n"); } break; case XPT_SCSI_IO: if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_SCSI_STATUS_ERROR) break; if (proto_flags & CAM_ESF_PRINT_STATUS) { sbuf_cat(&sb, path_str); sbuf_printf(&sb, "SCSI status: %s\n", scsi_status_string(&ccb->csio)); } if ((proto_flags & CAM_ESF_PRINT_SENSE) && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND) && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)) { #ifdef _KERNEL scsi_sense_sbuf(&ccb->csio, &sb, SSS_FLAG_NONE); #else /* !_KERNEL */ scsi_sense_sbuf(device, &ccb->csio, &sb, SSS_FLAG_NONE); #endif /* _KERNEL/!_KERNEL */ } break; case XPT_SMP_IO: if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_SMP_STATUS_ERROR) break; if (proto_flags & CAM_ESF_PRINT_STATUS) { sbuf_cat(&sb, path_str); sbuf_printf(&sb, "SMP status: %s (%#x)\n", smp_error_desc(ccb->smpio.smp_response[2]), ccb->smpio.smp_response[2]); } /* There is no SMP equivalent to SCSI sense. */ break; default: break; } } sbuf_finish(&sb); return(sbuf_data(&sb)); } #ifdef _KERNEL void cam_error_print(union ccb *ccb, cam_error_string_flags flags, cam_error_proto_flags proto_flags) { char str[512]; printf("%s", cam_error_string(ccb, str, sizeof(str), flags, proto_flags)); } #else /* !_KERNEL */ void cam_error_print(struct cam_device *device, union ccb *ccb, cam_error_string_flags flags, cam_error_proto_flags proto_flags, FILE *ofile) { char str[512]; if ((device == NULL) || (ccb == NULL) || (ofile == NULL)) return; fprintf(ofile, "%s", cam_error_string(device, ccb, str, sizeof(str), flags, proto_flags)); } #endif /* _KERNEL/!_KERNEL */ /* * Common calculate geometry fuction * * Caller should set ccg->volume_size and block_size. * The extended parameter should be zero if extended translation * should not be used. */ void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended) { uint32_t size_mb, secs_per_cylinder; if (ccg->block_size == 0) { ccg->ccb_h.status = CAM_REQ_CMP_ERR; return; } size_mb = (1024L * 1024L) / ccg->block_size; if (size_mb == 0) { ccg->ccb_h.status = CAM_REQ_CMP_ERR; return; } size_mb = ccg->volume_size / size_mb; if (size_mb > 1024 && extended) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; if (secs_per_cylinder == 0) { ccg->ccb_h.status = CAM_REQ_CMP_ERR; return; } ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccg->ccb_h.status = CAM_REQ_CMP; } Index: head/sys/cam/cam.h =================================================================== --- head/sys/cam/cam.h (revision 326264) +++ head/sys/cam/cam.h (revision 326265) @@ -1,413 +1,415 @@ /*- * Data structures and definitions for the CAM system. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_H #define _CAM_CAM_H 1 #ifdef _KERNEL #include "opt_cam.h" #endif #include typedef u_int path_id_t; typedef u_int target_id_t; typedef u_int64_t lun_id_t; #define CAM_XPT_PATH_ID ((path_id_t)~0) #define CAM_BUS_WILDCARD ((path_id_t)~0) #define CAM_TARGET_WILDCARD ((target_id_t)~0) #define CAM_LUN_WILDCARD (~(u_int)0) #define CAM_EXTLUN_BYTE_SWIZZLE(lun) ( \ ((((u_int64_t)lun) & 0xffff000000000000L) >> 48) | \ ((((u_int64_t)lun) & 0x0000ffff00000000L) >> 16) | \ ((((u_int64_t)lun) & 0x00000000ffff0000L) << 16) | \ ((((u_int64_t)lun) & 0x000000000000ffffL) << 48)) /* * Maximum length for a CAM CDB. */ #define CAM_MAX_CDBLEN 16 /* * Definition of a CAM peripheral driver entry. Peripheral drivers instantiate * one of these for each device they wish to communicate with and pass it into * the xpt layer when they wish to schedule work on that device via the * xpt_schedule API. */ struct cam_periph; /* * Priority information for a CAM structure. */ typedef enum { CAM_RL_HOST, CAM_RL_BUS, CAM_RL_XPT, CAM_RL_DEV, CAM_RL_NORMAL, CAM_RL_VALUES } cam_rl; /* * The generation number is incremented every time a new entry is entered into * the queue giving round robin per priority level scheduling. */ typedef struct { u_int32_t priority; #define CAM_PRIORITY_HOST ((CAM_RL_HOST << 8) + 0x80) #define CAM_PRIORITY_BUS ((CAM_RL_BUS << 8) + 0x80) #define CAM_PRIORITY_XPT ((CAM_RL_XPT << 8) + 0x80) #define CAM_PRIORITY_DEV ((CAM_RL_DEV << 8) + 0x80) #define CAM_PRIORITY_OOB (CAM_RL_DEV << 8) #define CAM_PRIORITY_NORMAL ((CAM_RL_NORMAL << 8) + 0x80) #define CAM_PRIORITY_NONE (u_int32_t)-1 u_int32_t generation; int index; #define CAM_UNQUEUED_INDEX -1 #define CAM_ACTIVE_INDEX -2 #define CAM_DONEQ_INDEX -3 #define CAM_EXTRAQ_INDEX INT_MAX } cam_pinfo; /* * Macro to compare two generation numbers. It is used like this: * * if (GENERATIONCMP(a, >=, b)) * ...; * * GERERATIONCMP uses modular arithmetic to guard against wraps * wraps in the generation number. */ #define GENERATIONCMP(x, op, y) ((int32_t)((x) - (y)) op 0) /* CAM flags XXX Move to cam_periph.h ??? */ typedef enum { CAM_FLAG_NONE = 0x00, CAM_EXPECT_INQ_CHANGE = 0x01, CAM_RETRY_SELTO = 0x02 /* Retry Selection Timeouts */ } cam_flags; enum { SF_RETRY_UA = 0x01, /* Retry UNIT ATTENTION conditions. */ SF_NO_PRINT = 0x02, /* Never print error status. */ SF_QUIET_IR = 0x04, /* Be quiet about Illegal Request responses */ SF_PRINT_ALWAYS = 0x08, /* Always print error status. */ SF_NO_RECOVERY = 0x10, /* Don't do active error recovery. */ SF_NO_RETRY = 0x20, /* Don't do any retries. */ SF_RETRY_BUSY = 0x40 /* Retry BUSY status. */ }; /* CAM Status field values */ typedef enum { /* CCB request is in progress */ CAM_REQ_INPROG = 0x00, /* CCB request completed without error */ CAM_REQ_CMP = 0x01, /* CCB request aborted by the host */ CAM_REQ_ABORTED = 0x02, /* Unable to abort CCB request */ CAM_UA_ABORT = 0x03, /* CCB request completed with an error */ CAM_REQ_CMP_ERR = 0x04, /* CAM subsystem is busy */ CAM_BUSY = 0x05, /* CCB request was invalid */ CAM_REQ_INVALID = 0x06, /* Supplied Path ID is invalid */ CAM_PATH_INVALID = 0x07, /* SCSI Device Not Installed/there */ CAM_DEV_NOT_THERE = 0x08, /* Unable to terminate I/O CCB request */ CAM_UA_TERMIO = 0x09, /* Target Selection Timeout */ CAM_SEL_TIMEOUT = 0x0a, /* Command timeout */ CAM_CMD_TIMEOUT = 0x0b, /* SCSI error, look at error code in CCB */ CAM_SCSI_STATUS_ERROR = 0x0c, /* Message Reject Received */ CAM_MSG_REJECT_REC = 0x0d, /* SCSI Bus Reset Sent/Received */ CAM_SCSI_BUS_RESET = 0x0e, /* Uncorrectable parity error occurred */ CAM_UNCOR_PARITY = 0x0f, /* Autosense: request sense cmd fail */ CAM_AUTOSENSE_FAIL = 0x10, /* No HBA Detected error */ CAM_NO_HBA = 0x11, /* Data Overrun error */ CAM_DATA_RUN_ERR = 0x12, /* Unexpected Bus Free */ CAM_UNEXP_BUSFREE = 0x13, /* Target Bus Phase Sequence Failure */ CAM_SEQUENCE_FAIL = 0x14, /* CCB length supplied is inadequate */ CAM_CCB_LEN_ERR = 0x15, /* Unable to provide requested capability*/ CAM_PROVIDE_FAIL = 0x16, /* A SCSI BDR msg was sent to target */ CAM_BDR_SENT = 0x17, /* CCB request terminated by the host */ CAM_REQ_TERMIO = 0x18, /* Unrecoverable Host Bus Adapter Error */ CAM_UNREC_HBA_ERROR = 0x19, /* Request was too large for this host */ CAM_REQ_TOO_BIG = 0x1a, /* * This request should be requeued to preserve * transaction ordering. This typically occurs * when the SIM recognizes an error that should * freeze the queue and must place additional * requests for the target at the sim level * back into the XPT queue. */ CAM_REQUEUE_REQ = 0x1b, /* ATA error, look at error code in CCB */ CAM_ATA_STATUS_ERROR = 0x1c, /* Initiator/Target Nexus lost. */ CAM_SCSI_IT_NEXUS_LOST = 0x1d, /* SMP error, look at error code in CCB */ CAM_SMP_STATUS_ERROR = 0x1e, /* * Command completed without error but exceeded the soft * timeout threshold. */ CAM_REQ_SOFTTIMEOUT = 0x1f, /* * 0x20 - 0x32 are unassigned */ /* Initiator Detected Error */ CAM_IDE = 0x33, /* Resource Unavailable */ CAM_RESRC_UNAVAIL = 0x34, /* Unacknowledged Event by Host */ CAM_UNACKED_EVENT = 0x35, /* Message Received in Host Target Mode */ CAM_MESSAGE_RECV = 0x36, /* Invalid CDB received in Host Target Mode */ CAM_INVALID_CDB = 0x37, /* Lun supplied is invalid */ CAM_LUN_INVALID = 0x38, /* Target ID supplied is invalid */ CAM_TID_INVALID = 0x39, /* The requested function is not available */ CAM_FUNC_NOTAVAIL = 0x3a, /* Nexus is not established */ CAM_NO_NEXUS = 0x3b, /* The initiator ID is invalid */ CAM_IID_INVALID = 0x3c, /* The SCSI CDB has been received */ CAM_CDB_RECVD = 0x3d, /* The LUN is already enabled for target mode */ CAM_LUN_ALRDY_ENA = 0x3e, /* SCSI Bus Busy */ CAM_SCSI_BUSY = 0x3f, /* * Flags */ /* The DEV queue is frozen w/this err */ CAM_DEV_QFRZN = 0x40, /* Autosense data valid for target */ CAM_AUTOSNS_VALID = 0x80, /* SIM ready to take more commands */ CAM_RELEASE_SIMQ = 0x100, /* SIM has this command in its queue */ CAM_SIM_QUEUED = 0x200, /* Quality of service data is valid */ CAM_QOS_VALID = 0x400, /* Mask bits for just the status # */ CAM_STATUS_MASK = 0x3F, /* * Target Specific Adjunct Status */ /* sent sense with status */ CAM_SENT_SENSE = 0x40000000 } cam_status; typedef enum { CAM_ESF_NONE = 0x00, CAM_ESF_COMMAND = 0x01, CAM_ESF_CAM_STATUS = 0x02, CAM_ESF_PROTO_STATUS = 0x04, CAM_ESF_ALL = 0xff } cam_error_string_flags; typedef enum { CAM_EPF_NONE = 0x00, CAM_EPF_MINIMAL = 0x01, CAM_EPF_NORMAL = 0x02, CAM_EPF_ALL = 0x03, CAM_EPF_LEVEL_MASK = 0x0f /* All bits above bit 3 are protocol-specific */ } cam_error_proto_flags; typedef enum { CAM_ESF_PRINT_NONE = 0x00, CAM_ESF_PRINT_STATUS = 0x10, CAM_ESF_PRINT_SENSE = 0x20 } cam_error_scsi_flags; typedef enum { CAM_ESMF_PRINT_NONE = 0x00, CAM_ESMF_PRINT_STATUS = 0x10, CAM_ESMF_PRINT_FULL_CMD = 0x20, } cam_error_smp_flags; typedef enum { CAM_EAF_PRINT_NONE = 0x00, CAM_EAF_PRINT_STATUS = 0x10, CAM_EAF_PRINT_RESULT = 0x20 } cam_error_ata_flags; typedef enum { CAM_STRVIS_FLAG_NONE = 0x00, CAM_STRVIS_FLAG_NONASCII_MASK = 0x03, CAM_STRVIS_FLAG_NONASCII_TRIM = 0x00, CAM_STRVIS_FLAG_NONASCII_RAW = 0x01, CAM_STRVIS_FLAG_NONASCII_SPC = 0x02, CAM_STRVIS_FLAG_NONASCII_ESC = 0x03 } cam_strvis_flags; struct cam_status_entry { cam_status status_code; const char *status_text; }; extern const struct cam_status_entry cam_status_table[]; extern const int num_cam_status_entries; #ifdef _KERNEL extern int cam_sort_io_queues; #endif union ccb; struct sbuf; #ifdef SYSCTL_DECL /* from sysctl.h */ SYSCTL_DECL(_kern_cam); #endif __BEGIN_DECLS typedef int (cam_quirkmatch_t)(caddr_t, caddr_t); caddr_t cam_quirkmatch(caddr_t target, caddr_t quirk_table, int num_entries, int entry_size, cam_quirkmatch_t *comp_func); void cam_strvis(u_int8_t *dst, const u_int8_t *src, int srclen, int dstlen); void cam_strvis_sbuf(struct sbuf *sb, const u_int8_t *src, int srclen, uint32_t flags); int cam_strmatch(const u_int8_t *str, const u_int8_t *pattern, int str_len); const struct cam_status_entry* cam_fetch_status_entry(cam_status status); #ifdef _KERNEL char * cam_error_string(union ccb *ccb, char *str, int str_len, cam_error_string_flags flags, cam_error_proto_flags proto_flags); void cam_error_print(union ccb *ccb, cam_error_string_flags flags, cam_error_proto_flags proto_flags); #else /* _KERNEL */ struct cam_device; char * cam_error_string(struct cam_device *device, union ccb *ccb, char *str, int str_len, cam_error_string_flags flags, cam_error_proto_flags proto_flags); void cam_error_print(struct cam_device *device, union ccb *ccb, cam_error_string_flags flags, cam_error_proto_flags proto_flags, FILE *ofile); #endif /* _KERNEL */ __END_DECLS #ifdef _KERNEL static __inline void cam_init_pinfo(cam_pinfo *pinfo); static __inline void cam_init_pinfo(cam_pinfo *pinfo) { pinfo->priority = CAM_PRIORITY_NONE; pinfo->index = CAM_UNQUEUED_INDEX; } #endif #endif /* _CAM_CAM_H */ Index: head/sys/cam/cam_ccb.h =================================================================== --- head/sys/cam/cam_ccb.h (revision 326264) +++ head/sys/cam/cam_ccb.h (revision 326265) @@ -1,1543 +1,1545 @@ /*- * Data structures and definitions for CAM Control Blocks (CCBs). * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_CCB_H #define _CAM_CAM_CCB_H 1 #include #include #include #include #ifndef _KERNEL #include #endif #include #include #include #include #include /* General allocation length definitions for CCB structures */ #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ #define VUHBALEN 14 /* Vendor Unique HBA length */ #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ #define DEV_IDLEN 16 /* ASCII string len for device names */ #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ /* Struct definitions for CAM control blocks */ /* Common CCB header */ /* CAM CCB flags */ typedef enum { CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ CAM_QUEUE_ENABLE = 0x00000002,/* SIM queue actions are enabled */ CAM_CDB_LINKED = 0x00000004,/* CCB contains a linked CDB */ CAM_NEGOTIATE = 0x00000008,/* * Perform transport negotiation * with this command. */ CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */ CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */ CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */ CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */ CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */ CAM_DATA_MASK = 0x00240010,/* Data type mask */ CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */ CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */ CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/ CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/ CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */ /* Phase cognizant mode flags */ CAM_DIS_AUTOSRP = 0x01000000,/* Disable autosave/restore ptrs */ CAM_DIS_AUTODISC = 0x02000000,/* Disable auto disconnect */ CAM_TGT_CCB_AVAIL = 0x04000000,/* Target CCB available */ CAM_TGT_PHASE_MODE = 0x08000000,/* The SIM runs in phase mode */ CAM_MSGB_VALID = 0x10000000,/* Message buffer valid */ CAM_STATUS_VALID = 0x20000000,/* Status buffer valid */ CAM_DATAB_VALID = 0x40000000,/* Data buffer valid */ /* Host target Mode flags */ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */ CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */ CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */ CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */ } ccb_flags; typedef enum { CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */ CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/ CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */ } ccb_xflags; /* XPT Opcodes for xpt_action */ typedef enum { /* Function code flags are bits greater than 0xff */ XPT_FC_QUEUED = 0x100, /* Non-immediate function code */ XPT_FC_USER_CCB = 0x200, XPT_FC_XPT_ONLY = 0x400, /* Only for the transport layer device */ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, /* Passes through the device queues */ /* Common function commands: 0x00->0x0F */ XPT_NOOP = 0x00, /* Execute Nothing */ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, /* Execute the requested I/O operation */ XPT_GDEV_TYPE = 0x02, /* Get type information for specified device */ XPT_GDEVLIST = 0x03, /* Get a list of peripheral devices */ XPT_PATH_INQ = 0x04, /* Path routing inquiry */ XPT_REL_SIMQ = 0x05, /* Release a frozen device queue */ XPT_SASYNC_CB = 0x06, /* Set Asynchronous Callback Parameters */ XPT_SDEV_TYPE = 0x07, /* Set device type information */ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* (Re)Scan the SCSI Bus */ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, /* Get EDT entries matching the given pattern */ XPT_DEBUG = 0x0a, /* Turn on debugging for a bus, target or lun */ XPT_PATH_STATS = 0x0b, /* Path statistics (error counts, etc.) */ XPT_GDEV_STATS = 0x0c, /* Device statistics (error counts, etc.) */ XPT_DEV_ADVINFO = 0x0e, /* Get/Set Device advanced information */ XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Asynchronous event */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, /* Reset the specified SCSI bus */ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, /* Bus Device Reset the specified SCSI device */ XPT_TERM_IO = 0x13, /* Terminate the I/O process */ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Logical Unit */ XPT_GET_TRAN_SETTINGS = 0x15, /* * Get default/user transfer settings * for the target */ XPT_SET_TRAN_SETTINGS = 0x16, /* * Set transfer rate/width * negotiation settings */ XPT_CALC_GEOMETRY = 0x17, /* * Calculate the geometry parameters for * a device give the sector size and * volume size. */ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, /* Execute the requested ATA I/O operation */ XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */ XPT_SET_SIM_KNOB = 0x19, /* * Set SIM specific knob values. */ XPT_GET_SIM_KNOB = 0x1a, /* * Get SIM specific knob values. */ XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED, /* Serial Management Protocol */ XPT_NVME_IO = 0x1c | XPT_FC_DEV_QUEUED, /* Execute the requested NVMe I/O operation */ XPT_MMC_IO = 0x1d | XPT_FC_DEV_QUEUED, /* Placeholder for MMC / SD / SDIO I/O stuff */ XPT_SCAN_TGT = 0x1e | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Target */ XPT_NVME_ADMIN = 0x1f | XPT_FC_DEV_QUEUED, /* Execute the requested NVMe Admin operation */ /* HBA engine commands 0x20->0x2F */ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, /* HBA engine feature inquiry */ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, /* HBA execute engine request */ /* Target mode commands: 0x30->0x3F */ XPT_EN_LUN = 0x30, /* Enable LUN as a target */ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, /* Execute target I/O request */ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Accept Host Target Mode CDB */ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, /* Continue Host Target I/O Connection */ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event (obsolete) */ XPT_NOTIFY_ACK = 0x35, /* Acknowledgement of event (obsolete) */ XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event */ XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Acknowledgement of event */ XPT_REPROBE_LUN = 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Query device capacity and notify GEOM */ /* Vendor Unique codes: 0x80->0x8F */ XPT_VUNIQUE = 0x80 } xpt_opcode; #define XPT_FC_GROUP_MASK 0xF0 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) #define XPT_FC_GROUP_COMMON 0x00 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 #define XPT_FC_GROUP_HBA_ENGINE 0x20 #define XPT_FC_GROUP_TMODE 0x30 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 #define XPT_FC_IS_DEV_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) #define XPT_FC_IS_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) typedef enum { PROTO_UNKNOWN, PROTO_UNSPECIFIED, PROTO_SCSI, /* Small Computer System Interface */ PROTO_ATA, /* AT Attachment */ PROTO_ATAPI, /* AT Attachment Packetized Interface */ PROTO_SATAPM, /* SATA Port Multiplier */ PROTO_SEMB, /* SATA Enclosure Management Bridge */ PROTO_NVME, /* NVME */ PROTO_MMCSD, /* MMC, SD, SDIO */ } cam_proto; typedef enum { XPORT_UNKNOWN, XPORT_UNSPECIFIED, XPORT_SPI, /* SCSI Parallel Interface */ XPORT_FC, /* Fiber Channel */ XPORT_SSA, /* Serial Storage Architecture */ XPORT_USB, /* Universal Serial Bus */ XPORT_PPB, /* Parallel Port Bus */ XPORT_ATA, /* AT Attachment */ XPORT_SAS, /* Serial Attached SCSI */ XPORT_SATA, /* Serial AT Attachment */ XPORT_ISCSI, /* iSCSI */ XPORT_SRP, /* SCSI RDMA Protocol */ XPORT_NVME, /* NVMe over PCIe */ XPORT_MMCSD, /* MMC, SD, SDIO card */ } cam_xport; #define XPORT_IS_NVME(t) ((t) == XPORT_NVME) #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA) #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \ (t) != XPORT_UNSPECIFIED && \ !XPORT_IS_ATA(t) && !XPORT_IS_NVME(t)) #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \ XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \ DEVSTAT_TYPE_IF_OTHER) #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) #define PROTO_VERSION_UNSPECIFIED UINT_MAX #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) #define XPORT_VERSION_UNSPECIFIED UINT_MAX typedef union { LIST_ENTRY(ccb_hdr) le; SLIST_ENTRY(ccb_hdr) sle; TAILQ_ENTRY(ccb_hdr) tqe; STAILQ_ENTRY(ccb_hdr) stqe; } camq_entry; typedef union { void *ptr; u_long field; u_int8_t bytes[sizeof(uintptr_t)]; } ccb_priv_entry; typedef union { ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_ppriv_area; typedef union { ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_spriv_area; typedef struct { struct timeval *etime; uintptr_t sim_data; uintptr_t periph_data; } ccb_qos_area; struct ccb_hdr { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ u_int32_t retry_count; void (*cbfcnp)(struct cam_periph *, union ccb *); /* Callback on completion function */ xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ lun_id_t target_lun; /* Target LUN number */ u_int32_t flags; /* ccb_flags */ u_int32_t xflags; /* Extended flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; ccb_qos_area qos; u_int32_t timeout; /* Hard timeout value in mseconds */ struct timeval softtimeout; /* Soft timeout value in sec + usec */ }; /* Get Device Information CCB */ struct ccb_getdev { struct ccb_hdr ccb_h; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; u_int8_t serial_num[252]; u_int8_t inq_flags; u_int8_t serial_num_len; void *padding[2]; }; /* Device Statistics CCB */ struct ccb_getdevstats { struct ccb_hdr ccb_h; int dev_openings; /* Space left for more work on device*/ int dev_active; /* Transactions running on the device */ int allocated; /* CCBs allocated for the device */ int queued; /* CCBs queued to be sent to the device */ int held; /* * CCBs held by peripheral drivers * for this device */ int maxtags; /* * Boundary conditions for number of * tagged operations */ int mintags; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { CAM_GDEVLIST_LAST_DEVICE, CAM_GDEVLIST_LIST_CHANGED, CAM_GDEVLIST_MORE_DEVS, CAM_GDEVLIST_ERROR } ccb_getdevlist_status_e; struct ccb_getdevlist { struct ccb_hdr ccb_h; char periph_name[DEV_IDLEN]; u_int32_t unit_number; unsigned int generation; u_int32_t index; ccb_getdevlist_status_e status; }; typedef enum { PERIPH_MATCH_NONE = 0x000, PERIPH_MATCH_PATH = 0x001, PERIPH_MATCH_TARGET = 0x002, PERIPH_MATCH_LUN = 0x004, PERIPH_MATCH_NAME = 0x008, PERIPH_MATCH_UNIT = 0x010, PERIPH_MATCH_ANY = 0x01f } periph_pattern_flags; struct periph_match_pattern { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; periph_pattern_flags flags; }; typedef enum { DEV_MATCH_NONE = 0x000, DEV_MATCH_PATH = 0x001, DEV_MATCH_TARGET = 0x002, DEV_MATCH_LUN = 0x004, DEV_MATCH_INQUIRY = 0x008, DEV_MATCH_DEVID = 0x010, DEV_MATCH_ANY = 0x00f } dev_pattern_flags; struct device_id_match_pattern { uint8_t id_len; uint8_t id[256]; }; struct device_match_pattern { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; dev_pattern_flags flags; union { struct scsi_static_inquiry_pattern inq_pat; struct device_id_match_pattern devid_pat; } data; }; typedef enum { BUS_MATCH_NONE = 0x000, BUS_MATCH_PATH = 0x001, BUS_MATCH_NAME = 0x002, BUS_MATCH_UNIT = 0x004, BUS_MATCH_BUS_ID = 0x008, BUS_MATCH_ANY = 0x00f } bus_pattern_flags; struct bus_match_pattern { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; bus_pattern_flags flags; }; union match_pattern { struct periph_match_pattern periph_pattern; struct device_match_pattern device_pattern; struct bus_match_pattern bus_pattern; }; typedef enum { DEV_MATCH_PERIPH, DEV_MATCH_DEVICE, DEV_MATCH_BUS } dev_match_type; struct dev_match_pattern { dev_match_type type; union match_pattern pattern; }; struct periph_match_result { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; }; typedef enum { DEV_RESULT_NOFLAG = 0x00, DEV_RESULT_UNCONFIGURED = 0x01 } dev_result_flags; struct device_match_result { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; struct mmc_params mmc_ident_data; }; struct bus_match_result { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; }; union match_result { struct periph_match_result periph_result; struct device_match_result device_result; struct bus_match_result bus_result; }; struct dev_match_result { dev_match_type type; union match_result result; }; typedef enum { CAM_DEV_MATCH_LAST, CAM_DEV_MATCH_MORE, CAM_DEV_MATCH_LIST_CHANGED, CAM_DEV_MATCH_SIZE_ERROR, CAM_DEV_MATCH_ERROR } ccb_dev_match_status; typedef enum { CAM_DEV_POS_NONE = 0x000, CAM_DEV_POS_BUS = 0x001, CAM_DEV_POS_TARGET = 0x002, CAM_DEV_POS_DEVICE = 0x004, CAM_DEV_POS_PERIPH = 0x008, CAM_DEV_POS_PDPTR = 0x010, CAM_DEV_POS_TYPEMASK = 0xf00, CAM_DEV_POS_EDT = 0x100, CAM_DEV_POS_PDRV = 0x200 } dev_pos_type; struct ccb_dm_cookie { void *bus; void *target; void *device; void *periph; void *pdrv; }; struct ccb_dev_position { u_int generations[4]; #define CAM_BUS_GENERATION 0x00 #define CAM_TARGET_GENERATION 0x01 #define CAM_DEV_GENERATION 0x02 #define CAM_PERIPH_GENERATION 0x03 dev_pos_type position_type; struct ccb_dm_cookie cookie; }; struct ccb_dev_match { struct ccb_hdr ccb_h; ccb_dev_match_status status; u_int32_t num_patterns; u_int32_t pattern_buf_len; struct dev_match_pattern *patterns; u_int32_t num_matches; u_int32_t match_buf_len; struct dev_match_result *matches; struct ccb_dev_position pos; }; /* * Definitions for the path inquiry CCB fields. */ #define CAM_VERSION 0x19 /* Hex value for current version */ typedef enum { PI_MDP_ABLE = 0x80, /* Supports MDP message */ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ PI_SATAPM = 0x04, /* Supports SATA PM */ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ } pi_inqflag; typedef enum { PIT_PROCESSOR = 0x80, /* Target mode processor mode */ PIT_PHASE = 0x40, /* Target mode phase cog. mode */ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ PIT_GRP_6 = 0x08, /* Group 6 commands supported */ PIT_GRP_7 = 0x04 /* Group 7 commands supported */ } pi_tmflag; typedef enum { PIM_ATA_EXT = 0x200,/* ATA requests can understand ata_ext requests */ PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */ PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */ PIM_UNMAPPED = 0x02, PIM_NOSCAN = 0x01 /* SIM does its own scanning */ } pi_miscflag; /* Path Inquiry CCB */ struct ccb_pathinq_settings_spi { u_int8_t ppr_options; }; struct ccb_pathinq_settings_fc { u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_sas { u_int32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_nvme { uint32_t nsid; /* Namespace ID for this path */ }; #define PATHINQ_SETTINGS_SIZE 128 struct ccb_pathinq { struct ccb_hdr ccb_h; u_int8_t version_num; /* Version number for the SIM/HBA */ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ u_int16_t target_sprt; /* Flags for target mode support */ u_int32_t hba_misc; /* Misc HBA features */ u_int16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ u_int8_t vuhba_flags[VUHBALEN]; u_int32_t max_target; /* Maximum supported Target */ u_int32_t max_lun; /* Maximum supported Lun */ u_int32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ u_int32_t unit_number; /* Unit number for SIM */ u_int32_t bus_id; /* Bus ID for SIM */ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; struct ccb_pathinq_settings_nvme nvme; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ u_int16_t hba_vendor; /* HBA vendor ID */ u_int16_t hba_device; /* HBA device ID */ u_int16_t hba_subvendor; /* HBA subvendor ID */ u_int16_t hba_subdevice; /* HBA subdevice ID */ }; /* Path Statistics CCB */ struct ccb_pathstats { struct ccb_hdr ccb_h; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { SMP_FLAG_NONE = 0x00, SMP_FLAG_REQ_SG = 0x01, SMP_FLAG_RSP_SG = 0x02 } ccb_smp_pass_flags; /* * Serial Management Protocol CCB * XXX Currently the semantics for this CCB are that it is executed either * by the addressed device, or that device's parent (i.e. an expander for * any device on an expander) if the addressed device doesn't support SMP. * Later, once we have the ability to probe SMP-only devices and put them * in CAM's topology, the CCB will only be executed by the addressed device * if possible. */ struct ccb_smpio { struct ccb_hdr ccb_h; uint8_t *smp_request; int smp_request_len; uint16_t smp_request_sglist_cnt; uint8_t *smp_response; int smp_response_len; uint16_t smp_response_sglist_cnt; ccb_smp_pass_flags flags; }; typedef union { u_int8_t *sense_ptr; /* * Pointer to storage * for sense information */ /* Storage Area for sense information */ struct scsi_sense_data sense_buf; } sense_t; typedef union { u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ /* Area for the CDB send */ u_int8_t cdb_bytes[IOCDBLEN]; } cdb_t; /* * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO * function codes. */ struct ccb_scsiio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ u_int8_t *req_map; /* Ptr to mapping info */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ /* Autosense storage */ struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes to autosense */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int16_t sglist_cnt; /* Number of SG list entries */ u_int8_t scsi_status; /* Returned SCSI status */ u_int8_t sense_resid; /* Autosense resid length: 2's comp */ u_int32_t resid; /* Transfer residual length: 2's comp */ cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t *msg_ptr; /* Pointer to the message buffer */ u_int16_t msg_len; /* Number of bytes for the Message */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) struct bio *bio; /* Associated bio */ #endif }; static __inline uint8_t * scsiio_cdb_ptr(struct ccb_scsiio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* * ATA I/O Request CCB used for the XPT_ATA_IO function code. */ struct ccb_ataio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct ata_cmd cmd; /* ATA command register set */ struct ata_res res; /* ATA result register set */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int32_t resid; /* Transfer residual length: 2's comp */ u_int8_t ata_flags; /* Flags for the rest of the buffer */ #define ATA_FLAG_AUX 0x1 uint32_t aux; uint32_t unused; }; /* * MMC I/O Request CCB used for the XPT_MMC_IO function code. */ struct ccb_mmcio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct mmc_command cmd; struct mmc_command stop; }; struct ccb_accept_tio { struct ccb_hdr ccb_h; cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int8_t tag_action; /* What to do for tag queueing */ u_int8_t sense_len; /* Number of bytes of Sense Data */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ struct scsi_sense_data sense_data; }; static __inline uint8_t * atio_cdb_ptr(struct ccb_accept_tio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* Release SIM Queue */ struct ccb_relsim { struct ccb_hdr ccb_h; u_int32_t release_flags; #define RELSIM_ADJUST_OPENINGS 0x01 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 u_int32_t openings; u_int32_t release_timeout; /* Abstract argument. */ u_int32_t qfrozen_cnt; }; /* * NVMe I/O Request CCB used for the XPT_NVME_IO and XPT_NVME_ADMIN function codes. */ struct ccb_nvmeio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct nvme_command cmd; /* NVME command, per NVME standard */ struct nvme_completion cpl; /* NVME completion, per NVME standard */ uint8_t *data_ptr; /* Ptr to the data buf/SG list */ uint32_t dxfer_len; /* Data transfer length */ uint16_t sglist_cnt; /* Number of SG list entries */ uint16_t unused; /* padding for removed uint32_t */ }; /* * Definitions for the asynchronous callback CCB fields. */ typedef enum { AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */ AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */ AC_CONTRACT = 0x1000,/* A contractual callback */ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ AC_LOST_DEVICE = 0x100,/* A device went away */ AC_FOUND_DEVICE = 0x080,/* A new device was found */ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ } ac_code; typedef void ac_callback_t (void *softc, u_int32_t code, struct cam_path *path, void *args); /* * Generic Asynchronous callbacks. * * Generic arguments passed bac which are then interpreted between a per-system * contract number. */ #define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t)) struct ac_contract { u_int64_t contract_number; u_int8_t contract_data[AC_CONTRACT_DATA_MAX]; }; #define AC_CONTRACT_DEV_CHG 1 struct ac_device_changed { u_int64_t wwpn; u_int32_t port; target_id_t target; u_int8_t arrived; }; /* Set Asynchronous Callback CCB */ struct ccb_setasync { struct ccb_hdr ccb_h; u_int32_t event_enable; /* Async Event enables */ ac_callback_t *callback; void *callback_arg; }; /* Set Device Type CCB */ struct ccb_setdev { struct ccb_hdr ccb_h; u_int8_t dev_type; /* Value for dev type field in EDT */ }; /* SCSI Control Functions */ /* Abort XPT request CCB */ struct ccb_abort { struct ccb_hdr ccb_h; union ccb *abort_ccb; /* Pointer to CCB to abort */ }; /* Reset SCSI Bus CCB */ struct ccb_resetbus { struct ccb_hdr ccb_h; }; /* Reset SCSI Device CCB */ struct ccb_resetdev { struct ccb_hdr ccb_h; }; /* Terminate I/O Process Request CCB */ struct ccb_termio { struct ccb_hdr ccb_h; union ccb *termio_ccb; /* Pointer to CCB to terminate */ }; typedef enum { CTS_TYPE_CURRENT_SETTINGS, CTS_TYPE_USER_SETTINGS } cts_type; struct ccb_trans_settings_scsi { u_int valid; /* Which fields to honor */ #define CTS_SCSI_VALID_TQ 0x01 u_int flags; #define CTS_SCSI_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_ata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_TQ 0x01 u_int flags; #define CTS_ATA_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_spi { u_int valid; /* Which fields to honor */ #define CTS_SPI_VALID_SYNC_RATE 0x01 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 #define CTS_SPI_VALID_BUS_WIDTH 0x04 #define CTS_SPI_VALID_DISC 0x08 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 u_int flags; #define CTS_SPI_FLAGS_DISC_ENB 0x01 u_int sync_period; u_int sync_offset; u_int bus_width; u_int ppr_options; }; struct ccb_trans_settings_fc { u_int valid; /* Which fields to honor */ #define CTS_FC_VALID_WWNN 0x8000 #define CTS_FC_VALID_WWPN 0x4000 #define CTS_FC_VALID_PORT 0x2000 #define CTS_FC_VALID_SPEED 0x1000 u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sas { u_int valid; /* Which fields to honor */ #define CTS_SAS_VALID_SPEED 0x1000 u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_pata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_MODE 0x01 #define CTS_ATA_VALID_BYTECOUNT 0x02 #define CTS_ATA_VALID_ATAPI 0x20 #define CTS_ATA_VALID_CAPS 0x40 int mode; /* Mode */ u_int bytecount; /* Length of PIO transaction */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_ATA_CAPS_H 0x0000ffff #define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */ #define CTS_ATA_CAPS_D 0xffff0000 }; struct ccb_trans_settings_sata { u_int valid; /* Which fields to honor */ #define CTS_SATA_VALID_MODE 0x01 #define CTS_SATA_VALID_BYTECOUNT 0x02 #define CTS_SATA_VALID_REVISION 0x04 #define CTS_SATA_VALID_PM 0x08 #define CTS_SATA_VALID_TAGS 0x10 #define CTS_SATA_VALID_ATAPI 0x20 #define CTS_SATA_VALID_CAPS 0x40 int mode; /* Legacy PATA mode */ u_int bytecount; /* Length of PIO transaction */ int revision; /* SATA revision */ u_int pm_present; /* PM is present (XPT->SIM) */ u_int tags; /* Number of allowed tags */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_SATA_CAPS_H 0x0000ffff #define CTS_SATA_CAPS_H_PMREQ 0x00000001 #define CTS_SATA_CAPS_H_APST 0x00000002 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */ #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */ #define CTS_SATA_CAPS_D 0xffff0000 #define CTS_SATA_CAPS_D_PMREQ 0x00010000 #define CTS_SATA_CAPS_D_APST 0x00020000 }; struct ccb_trans_settings_nvme { u_int valid; /* Which fields to honor */ #define CTS_NVME_VALID_SPEC 0x01 #define CTS_NVME_VALID_CAPS 0x02 #define CTS_NVME_VALID_LINK 0x04 uint32_t spec; /* NVMe spec implemented -- same as vs register */ uint32_t max_xfer; /* Max transfer size (0 -> unlimited */ uint32_t caps; uint8_t lanes; /* Number of PCIe lanes */ uint8_t speed; /* PCIe generation for each lane */ uint8_t max_lanes; /* Number of PCIe lanes */ uint8_t max_speed; /* PCIe generation for each lane */ }; #include struct ccb_trans_settings_mmc { struct mmc_ios ios; #define MMC_CLK (1 << 1) #define MMC_VDD (1 << 2) #define MMC_CS (1 << 3) #define MMC_BW (1 << 4) #define MMC_PM (1 << 5) #define MMC_BT (1 << 6) #define MMC_BM (1 << 7) uint32_t ios_valid; /* The folowing is used only for GET_TRAN_SETTINGS */ uint32_t host_ocr; int host_f_min; int host_f_max; #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can do 4-bit data transfers */ #define MMC_CAP_8_BIT_DATA (1 << 1) /* Can do 8-bit data transfers */ #define MMC_CAP_HSPEED (1 << 2) /* Can do High Speed transfers */ uint32_t host_caps; }; /* Get/Set transfer rate/width/disconnection/tag queueing settings */ struct ccb_trans_settings { struct ccb_hdr ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; struct ccb_trans_settings_nvme nvme; struct ccb_trans_settings_mmc mmc; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; struct ccb_trans_settings_nvme nvme; } xport_specific; }; /* * Calculate the geometry parameters for a device * give the block size and volume size in blocks. */ struct ccb_calc_geometry { struct ccb_hdr ccb_h; u_int32_t block_size; u_int64_t volume_size; u_int32_t cylinders; u_int8_t heads; u_int8_t secs_per_track; }; /* * Set or get SIM (and transport) specific knobs */ #define KNOB_VALID_ADDRESS 0x1 #define KNOB_VALID_ROLE 0x2 #define KNOB_ROLE_NONE 0x0 #define KNOB_ROLE_INITIATOR 0x1 #define KNOB_ROLE_TARGET 0x2 #define KNOB_ROLE_BOTH 0x3 struct ccb_sim_knob_settings_spi { u_int valid; u_int initiator_id; u_int role; }; struct ccb_sim_knob_settings_fc { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int role; }; struct ccb_sim_knob_settings_sas { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int role; }; #define KNOB_SETTINGS_SIZE 128 struct ccb_sim_knob { struct ccb_hdr ccb_h; union { u_int valid; /* Which fields to honor */ struct ccb_sim_knob_settings_spi spi; struct ccb_sim_knob_settings_fc fc; struct ccb_sim_knob_settings_sas sas; char pad[KNOB_SETTINGS_SIZE]; } xport_specific; }; /* * Rescan the given bus, or bus/target/lun */ struct ccb_rescan { struct ccb_hdr ccb_h; cam_flags flags; }; /* * Turn on debugging for the given bus, bus/target, or bus/target/lun. */ struct ccb_debug { struct ccb_hdr ccb_h; cam_debug_flags flags; }; /* Target mode structures. */ struct ccb_en_lun { struct ccb_hdr ccb_h; u_int16_t grp6_len; /* Group 6 VU CDB length */ u_int16_t grp7_len; /* Group 7 VU CDB length */ u_int8_t enable; }; /* old, barely used immediate notify, binary compatibility */ struct ccb_immed_notify { struct ccb_hdr ccb_h; struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes in sense buffer */ u_int8_t initiator_id; /* Id of initiator that selected */ u_int8_t message_args[7]; /* Message Arguments */ }; struct ccb_notify_ack { struct ccb_hdr ccb_h; u_int16_t seq_id; /* Sequence identifier */ u_int8_t event; /* Event flags */ }; struct ccb_immediate_notify { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tag for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; struct ccb_notify_acknowledge { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tar for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Response information */ /* * Lower byte of arg is one of RESPONSE CODE values defined below * (subset of response codes from SPL-4 and FCP-4 specifications), * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION. */ #define CAM_RSP_TMF_COMPLETE 0x00 #define CAM_RSP_TMF_REJECTED 0x04 #define CAM_RSP_TMF_FAILED 0x05 #define CAM_RSP_TMF_SUCCEEDED 0x08 #define CAM_RSP_TMF_INCORRECT_LUN 0x09 }; /* HBA engine structures. */ typedef enum { EIT_BUFFER, /* Engine type: buffer memory */ EIT_LOSSLESS, /* Engine type: lossless compression */ EIT_LOSSY, /* Engine type: lossy compression */ EIT_ENCRYPT /* Engine type: encryption */ } ei_type; typedef enum { EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ } ei_algo; struct ccb_eng_inq { struct ccb_hdr ccb_h; u_int16_t eng_num; /* The engine number for this inquiry */ ei_type eng_type; /* Returned engine type */ ei_algo eng_algo; /* Returned engine algorithm type */ u_int32_t eng_memeory; /* Returned engine memory size */ }; struct ccb_eng_exec { /* This structure must match SCSIIO size */ struct ccb_hdr ccb_h; u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ u_int8_t *req_map; /* Ptr for mapping info on the req. */ u_int8_t *data_ptr; /* Pointer to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */ u_int16_t sglist_cnt; /* Num of scatter gather list entries */ u_int32_t dmax_len; /* Destination data maximum length */ u_int32_t dest_len; /* Destination data length */ int32_t src_resid; /* Source residual length: 2's comp */ u_int32_t timeout; /* Timeout value */ u_int16_t eng_num; /* Engine number for this request */ u_int16_t vu_flags; /* Vendor Unique flags */ }; /* * Definitions for the timeout field in the SCSI I/O CCB. */ #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ #define CAM_SUCCESS 0 /* For signaling general success */ #define CAM_FAILURE 1 /* For signaling general failure */ #define CAM_FALSE 0 #define CAM_TRUE 1 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ /* * CCB for working with advanced device information. This operates in a fashion * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer * type requested, and provide a buffer size/buffer to write to. If the * buffer is too small, provsiz will be larger than bufsiz. */ struct ccb_dev_advinfo { struct ccb_hdr ccb_h; uint32_t flags; #define CDAI_FLAG_NONE 0x0 /* No flags set */ #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */ uint32_t buftype; /* IN: Type of data being requested */ /* NB: buftype is interpreted on a per-transport basis */ #define CDAI_TYPE_SCSI_DEVID 1 #define CDAI_TYPE_SERIAL_NUM 2 #define CDAI_TYPE_PHYS_PATH 3 #define CDAI_TYPE_RCAPLONG 4 #define CDAI_TYPE_EXT_INQ 5 #define CDAI_TYPE_NVME_CNTRL 6 /* NVMe Identify Controller data */ #define CDAI_TYPE_NVME_NS 7 /* NVMe Identify Namespace data */ off_t bufsiz; /* IN: Size of external buffer */ #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */ off_t provsiz; /* OUT: Size required/used */ uint8_t *buf; /* IN/OUT: Buffer for requested data */ }; /* * CCB for sending async events */ struct ccb_async { struct ccb_hdr ccb_h; uint32_t async_code; off_t async_arg_size; void *async_arg_ptr; }; /* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc * and the argument to xpt_ccb_free. */ union ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_getdev cgd; struct ccb_getdevlist cgdl; struct ccb_pathinq cpi; struct ccb_relsim crs; struct ccb_setasync csa; struct ccb_setdev csd; struct ccb_pathstats cpis; struct ccb_getdevstats cgds; struct ccb_dev_match cdm; struct ccb_trans_settings cts; struct ccb_calc_geometry ccg; struct ccb_sim_knob knob; struct ccb_abort cab; struct ccb_resetbus crb; struct ccb_resetdev crd; struct ccb_termio tio; struct ccb_accept_tio atio; struct ccb_scsiio ctio; struct ccb_en_lun cel; struct ccb_immed_notify cin; struct ccb_notify_ack cna; struct ccb_immediate_notify cin1; struct ccb_notify_acknowledge cna2; struct ccb_eng_inq cei; struct ccb_eng_exec cee; struct ccb_smpio smpio; struct ccb_rescan crcn; struct ccb_debug cdbg; struct ccb_ataio ataio; struct ccb_dev_advinfo cdai; struct ccb_async casync; struct ccb_nvmeio nvmeio; struct ccb_mmcio mmcio; }; #define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp) \ bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h), \ sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h)) __BEGIN_DECLS static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout); static __inline void cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout); static __inline void cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags, struct mmc_data *mmc_d, uint32_t timeout); static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) csio->bio = NULL; #endif } static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_CONT_TARGET_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->scsi_status = scsi_status; csio->tag_action = tag_action; csio->tag_id = tag_id; csio->init_id = init_id; } static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action __unused, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { ataio->ccb_h.func_code = XPT_ATA_IO; ataio->ccb_h.flags = flags; ataio->ccb_h.retry_count = retries; ataio->ccb_h.cbfcnp = cbfcnp; ataio->ccb_h.timeout = timeout; ataio->data_ptr = data_ptr; ataio->dxfer_len = dxfer_len; ataio->ata_flags = 0; } static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout) { #ifdef _KERNEL KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH, ("direction != CAM_DIR_BOTH")); KASSERT((smp_request != NULL) && (smp_response != NULL), ("need valid request and response buffers")); KASSERT((smp_request_len != 0) && (smp_response_len != 0), ("need non-zero request and response lengths")); #endif /*_KERNEL*/ smpio->ccb_h.func_code = XPT_SMP_IO; smpio->ccb_h.flags = flags; smpio->ccb_h.retry_count = retries; smpio->ccb_h.cbfcnp = cbfcnp; smpio->ccb_h.timeout = timeout; smpio->smp_request = smp_request; smpio->smp_request_len = smp_request_len; smpio->smp_response = smp_response; smpio->smp_response_len = smp_response_len; } static __inline void cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags, struct mmc_data *mmc_d, uint32_t timeout) { mmcio->ccb_h.func_code = XPT_MMC_IO; mmcio->ccb_h.flags = flags; mmcio->ccb_h.retry_count = retries; mmcio->ccb_h.cbfcnp = cbfcnp; mmcio->ccb_h.timeout = timeout; mmcio->cmd.opcode = mmc_opcode; mmcio->cmd.arg = mmc_arg; mmcio->cmd.flags = mmc_flags; mmcio->stop.opcode = 0; mmcio->stop.arg = 0; mmcio->stop.flags = 0; if (mmc_d != NULL) { mmcio->cmd.data = mmc_d; } else mmcio->cmd.data = NULL; mmcio->cmd.resp[0] = 0; mmcio->cmd.resp[1] = 0; mmcio->cmd.resp[2] = 0; mmcio->cmd.resp[3] = 0; } static __inline void cam_set_ccbstatus(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static __inline cam_status cam_ccb_status(union ccb *ccb) { return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK)); } void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); static __inline void cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { nvmeio->ccb_h.func_code = XPT_NVME_IO; nvmeio->ccb_h.flags = flags; nvmeio->ccb_h.retry_count = retries; nvmeio->ccb_h.cbfcnp = cbfcnp; nvmeio->ccb_h.timeout = timeout; nvmeio->data_ptr = data_ptr; nvmeio->dxfer_len = dxfer_len; } static __inline void cam_fill_nvmeadmin(struct ccb_nvmeio *nvmeio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { nvmeio->ccb_h.func_code = XPT_NVME_ADMIN; nvmeio->ccb_h.flags = flags; nvmeio->ccb_h.retry_count = retries; nvmeio->ccb_h.cbfcnp = cbfcnp; nvmeio->ccb_h.timeout = timeout; nvmeio->data_ptr = data_ptr; nvmeio->dxfer_len = dxfer_len; } __END_DECLS #endif /* _CAM_CAM_CCB_H */ Index: head/sys/cam/cam_compat.c =================================================================== --- head/sys/cam/cam_compat.c (revision 326264) +++ head/sys/cam/cam_compat.c (revision 326265) @@ -1,421 +1,423 @@ /*- * CAM ioctl compatibility shims * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2013 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_cam.h" static int cam_compat_handle_0x17(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td, d_ioctl_t *cbfnp); static int cam_compat_handle_0x18(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td, d_ioctl_t *cbfnp); static int cam_compat_translate_dev_match_0x18(union ccb *ccb); int cam_compat_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td, d_ioctl_t *cbfnp) { int error; switch (cmd) { case CAMIOCOMMAND_0x16: { struct ccb_hdr_0x17 *hdr17; hdr17 = (struct ccb_hdr_0x17 *)addr; if (hdr17->flags & CAM_SG_LIST_PHYS_0x16) { hdr17->flags &= ~CAM_SG_LIST_PHYS_0x16; hdr17->flags |= CAM_DATA_SG_PADDR; } if (hdr17->flags & CAM_DATA_PHYS_0x16) { hdr17->flags &= ~CAM_DATA_PHYS_0x16; hdr17->flags |= CAM_DATA_PADDR; } if (hdr17->flags & CAM_SCATTER_VALID_0x16) { hdr17->flags &= CAM_SCATTER_VALID_0x16; hdr17->flags |= CAM_DATA_SG; } cmd = CAMIOCOMMAND; error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp); break; } case CAMGETPASSTHRU_0x16: cmd = CAMGETPASSTHRU; error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp); break; case CAMIOCOMMAND_0x17: cmd = CAMIOCOMMAND; error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp); break; case CAMGETPASSTHRU_0x17: cmd = CAMGETPASSTHRU; error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp); break; case CAMIOCOMMAND_0x18: cmd = CAMIOCOMMAND; error = cam_compat_handle_0x18(dev, cmd, addr, flag, td, cbfnp); break; case CAMGETPASSTHRU_0x18: cmd = CAMGETPASSTHRU; error = cam_compat_handle_0x18(dev, cmd, addr, flag, td, cbfnp); break; default: error = ENOTTY; } return (error); } static int cam_compat_handle_0x17(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td, d_ioctl_t *cbfnp) { union ccb *ccb; struct ccb_hdr *hdr; struct ccb_hdr_0x17 *hdr17; uint8_t *ccbb, *ccbb17; u_int error; hdr17 = (struct ccb_hdr_0x17 *)addr; ccb = xpt_alloc_ccb(); hdr = &ccb->ccb_h; hdr->pinfo = hdr17->pinfo; hdr->xpt_links = hdr17->xpt_links; hdr->sim_links = hdr17->sim_links; hdr->periph_links = hdr17->periph_links; hdr->retry_count = hdr17->retry_count; hdr->cbfcnp = hdr17->cbfcnp; hdr->func_code = hdr17->func_code; hdr->status = hdr17->status; hdr->path = hdr17->path; hdr->path_id = hdr17->path_id; hdr->target_id = hdr17->target_id; hdr->target_lun = hdr17->target_lun; hdr->flags = hdr17->flags; hdr->xflags = 0; hdr->periph_priv = hdr17->periph_priv; hdr->sim_priv = hdr17->sim_priv; hdr->timeout = hdr17->timeout; hdr->softtimeout.tv_sec = 0; hdr->softtimeout.tv_usec = 0; ccbb = (uint8_t *)&hdr[1]; ccbb17 = (uint8_t *)&hdr17[1]; if (ccb->ccb_h.func_code == XPT_SET_TRAN_SETTINGS) { struct ccb_trans_settings *cts; struct ccb_trans_settings_0x17 *cts17; cts = &ccb->cts; cts17 = (struct ccb_trans_settings_0x17 *)hdr17; cts->type = cts17->type; cts->protocol = cts17->protocol; cts->protocol_version = cts17->protocol_version; cts->transport = cts17->transport; cts->transport_version = cts17->transport_version; bcopy(&cts17->proto_specific, &cts->proto_specific, sizeof(cts17->proto_specific)); bcopy(&cts17->xport_specific, &cts->xport_specific, sizeof(cts17->xport_specific)); } else { bcopy(ccbb17, ccbb, CAM_0X17_DATA_LEN); } error = (cbfnp)(dev, cmd, (caddr_t)ccb, flag, td); hdr17->pinfo = hdr->pinfo; hdr17->xpt_links = hdr->xpt_links; hdr17->sim_links = hdr->sim_links; hdr17->periph_links = hdr->periph_links; hdr17->retry_count = hdr->retry_count; hdr17->cbfcnp = hdr->cbfcnp; hdr17->func_code = hdr->func_code; hdr17->status = hdr->status; hdr17->path = hdr->path; hdr17->path_id = hdr->path_id; hdr17->target_id = hdr->target_id; hdr17->target_lun = hdr->target_lun; hdr17->flags = hdr->flags; hdr17->periph_priv = hdr->periph_priv; hdr17->sim_priv = hdr->sim_priv; hdr17->timeout = hdr->timeout; if (ccb->ccb_h.func_code == XPT_PATH_INQ) { struct ccb_pathinq *cpi; struct ccb_pathinq_0x17 *cpi17; /* The PATH_INQ only needs special handling on the way out */ cpi = &ccb->cpi; cpi17 = (struct ccb_pathinq_0x17 *)hdr17; cpi17->version_num = cpi->version_num; cpi17->hba_inquiry = cpi->hba_inquiry; cpi17->target_sprt = (u_int8_t)cpi->target_sprt; cpi17->hba_misc = (u_int8_t)cpi->hba_misc; cpi17->hba_eng_cnt = cpi->hba_eng_cnt; bcopy(&cpi->vuhba_flags[0], &cpi17->vuhba_flags[0], VUHBALEN); cpi17->max_target = cpi->max_target; cpi17->max_lun = cpi->max_lun; cpi17->async_flags = cpi->async_flags; cpi17->hpath_id = cpi->hpath_id; cpi17->initiator_id = cpi->initiator_id; bcopy(&cpi->sim_vid[0], &cpi17->sim_vid[0], SIM_IDLEN); bcopy(&cpi->hba_vid[0], &cpi17->hba_vid[0], HBA_IDLEN); bcopy(&cpi->dev_name[0], &cpi17->dev_name[0], DEV_IDLEN); cpi17->unit_number = cpi->unit_number; cpi17->bus_id = cpi->bus_id; cpi17->base_transfer_speed = cpi->base_transfer_speed; cpi17->protocol = cpi->protocol; cpi17->protocol_version = cpi->protocol_version; cpi17->transport = cpi->transport; cpi17->transport_version = cpi->transport_version; bcopy(&cpi->xport_specific, &cpi17->xport_specific, PATHINQ_SETTINGS_SIZE); cpi17->maxio = cpi->maxio; cpi17->hba_vendor = cpi->hba_vendor; cpi17->hba_device = cpi->hba_device; cpi17->hba_subvendor = cpi->hba_subvendor; cpi17->hba_subdevice = cpi->hba_subdevice; } else if (ccb->ccb_h.func_code == XPT_GET_TRAN_SETTINGS) { struct ccb_trans_settings *cts; struct ccb_trans_settings_0x17 *cts17; cts = &ccb->cts; cts17 = (struct ccb_trans_settings_0x17 *)hdr17; cts17->type = cts->type; cts17->protocol = cts->protocol; cts17->protocol_version = cts->protocol_version; cts17->transport = cts->transport; cts17->transport_version = cts->transport_version; bcopy(&cts->proto_specific, &cts17->proto_specific, sizeof(cts17->proto_specific)); bcopy(&cts->xport_specific, &cts17->xport_specific, sizeof(cts17->xport_specific)); } else if (ccb->ccb_h.func_code == XPT_DEV_MATCH) { /* Copy the rest of the header over */ bcopy(ccbb, ccbb17, CAM_0X17_DATA_LEN); cam_compat_translate_dev_match_0x18(ccb); } else { bcopy(ccbb, ccbb17, CAM_0X17_DATA_LEN); } xpt_free_ccb(ccb); return (error); } static int cam_compat_handle_0x18(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td, d_ioctl_t *cbfnp) { union ccb *ccb; struct ccb_hdr *hdr; struct ccb_hdr_0x18 *hdr18; uint8_t *ccbb, *ccbb18; u_int error; hdr18 = (struct ccb_hdr_0x18 *)addr; ccb = xpt_alloc_ccb(); hdr = &ccb->ccb_h; hdr->pinfo = hdr18->pinfo; hdr->xpt_links = hdr18->xpt_links; hdr->sim_links = hdr18->sim_links; hdr->periph_links = hdr18->periph_links; hdr->retry_count = hdr18->retry_count; hdr->cbfcnp = hdr18->cbfcnp; hdr->func_code = hdr18->func_code; hdr->status = hdr18->status; hdr->path = hdr18->path; hdr->path_id = hdr18->path_id; hdr->target_id = hdr18->target_id; hdr->target_lun = hdr18->target_lun; if (hdr18->xflags & CAM_EXTLUN_VALID_0x18) hdr->target_lun = hdr18->ext_lun; hdr->flags = hdr18->flags; hdr->xflags = hdr18->xflags; hdr->periph_priv = hdr18->periph_priv; hdr->sim_priv = hdr18->sim_priv; hdr->timeout = hdr18->timeout; hdr->softtimeout.tv_sec = 0; hdr->softtimeout.tv_usec = 0; ccbb = (uint8_t *)&hdr[1]; ccbb18 = (uint8_t *)&hdr18[1]; if (ccb->ccb_h.func_code == XPT_SET_TRAN_SETTINGS) { struct ccb_trans_settings *cts; struct ccb_trans_settings_0x18 *cts18; cts = &ccb->cts; cts18 = (struct ccb_trans_settings_0x18 *)hdr18; cts->type = cts18->type; cts->protocol = cts18->protocol; cts->protocol_version = cts18->protocol_version; cts->transport = cts18->transport; cts->transport_version = cts18->transport_version; bcopy(&cts18->proto_specific, &cts->proto_specific, sizeof(cts18->proto_specific)); bcopy(&cts18->xport_specific, &cts->xport_specific, sizeof(cts18->xport_specific)); } else { bcopy(ccbb18, ccbb, CAM_0X18_DATA_LEN); } error = (cbfnp)(dev, cmd, (caddr_t)ccb, flag, td); hdr18->pinfo = hdr->pinfo; hdr18->xpt_links = hdr->xpt_links; hdr18->sim_links = hdr->sim_links; hdr18->periph_links = hdr->periph_links; hdr18->retry_count = hdr->retry_count; hdr18->cbfcnp = hdr->cbfcnp; hdr18->func_code = hdr->func_code; hdr18->status = hdr->status; hdr18->path = hdr->path; hdr18->path_id = hdr->path_id; hdr18->target_id = hdr->target_id; hdr18->target_lun = hdr->target_lun; hdr18->ext_lun = hdr->target_lun; hdr18->flags = hdr->flags; hdr18->xflags = hdr->xflags | CAM_EXTLUN_VALID_0x18; hdr18->periph_priv = hdr->periph_priv; hdr18->sim_priv = hdr->sim_priv; hdr18->timeout = hdr->timeout; if (ccb->ccb_h.func_code == XPT_GET_TRAN_SETTINGS) { struct ccb_trans_settings *cts; struct ccb_trans_settings_0x18 *cts18; cts = &ccb->cts; cts18 = (struct ccb_trans_settings_0x18 *)hdr18; cts18->type = cts->type; cts18->protocol = cts->protocol; cts18->protocol_version = cts->protocol_version; cts18->transport = cts->transport; cts18->transport_version = cts->transport_version; bcopy(&cts->proto_specific, &cts18->proto_specific, sizeof(cts18->proto_specific)); bcopy(&cts->xport_specific, &cts18->xport_specific, sizeof(cts18->xport_specific)); } else if (ccb->ccb_h.func_code == XPT_DEV_MATCH) { bcopy(ccbb, ccbb18, CAM_0X18_DATA_LEN); cam_compat_translate_dev_match_0x18(ccb); } else { bcopy(ccbb, ccbb18, CAM_0X18_DATA_LEN); } xpt_free_ccb(ccb); return (error); } static int cam_compat_translate_dev_match_0x18(union ccb *ccb) { struct dev_match_result *dm; struct dev_match_result_0x18 *dm18; struct cam_periph_map_info mapinfo; int i; /* Remap the CCB into kernel address space */ bzero(&mapinfo, sizeof(mapinfo)); cam_periph_mapmem(ccb, &mapinfo, MAXPHYS); dm = ccb->cdm.matches; /* Translate in-place: old fields are smaller */ dm18 = (struct dev_match_result_0x18 *)(dm); for (i = 0; i < ccb->cdm.num_matches; i++) { dm18[i].type = dm[i].type; switch (dm[i].type) { case DEV_MATCH_PERIPH: memcpy(&dm18[i].result.periph_result.periph_name, &dm[i].result.periph_result.periph_name, DEV_IDLEN); dm18[i].result.periph_result.unit_number = dm[i].result.periph_result.unit_number; dm18[i].result.periph_result.path_id = dm[i].result.periph_result.path_id; dm18[i].result.periph_result.target_id = dm[i].result.periph_result.target_id; dm18[i].result.periph_result.target_lun = dm[i].result.periph_result.target_lun; break; case DEV_MATCH_DEVICE: dm18[i].result.device_result.path_id = dm[i].result.device_result.path_id; dm18[i].result.device_result.target_id = dm[i].result.device_result.target_id; dm18[i].result.device_result.target_lun = dm[i].result.device_result.target_lun; dm18[i].result.device_result.protocol = dm[i].result.device_result.protocol; memcpy(&dm18[i].result.device_result.inq_data, &dm[i].result.device_result.inq_data, sizeof(struct scsi_inquiry_data)); memcpy(&dm18[i].result.device_result.ident_data, &dm[i].result.device_result.ident_data, sizeof(struct ata_params)); dm18[i].result.device_result.flags = dm[i].result.device_result.flags; break; case DEV_MATCH_BUS: memcpy(&dm18[i].result.bus_result, &dm[i].result.bus_result, sizeof(struct bus_match_result)); break; } } cam_periph_unmapmem(ccb, &mapinfo); return (0); } Index: head/sys/cam/cam_compat.h =================================================================== --- head/sys/cam/cam_compat.h (revision 326264) +++ head/sys/cam/cam_compat.h (revision 326265) @@ -1,223 +1,225 @@ /*- * CAM ioctl compatibility shims * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2013 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_COMPAT_H #define _CAM_CAM_COMPAT_H /* No user-serviceable parts in here. */ #ifdef _KERNEL int cam_compat_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td, int(*cbfnp)(struct cdev *, u_long, caddr_t, int, struct thread *)); /* Version 0x16 compatibility */ #define CAM_VERSION_0x16 0x16 /* The size of the union ccb didn't change when going to 0x17 */ #define CAMIOCOMMAND_0x16 _IOC(IOC_INOUT, CAM_VERSION_0x16, 2, CAM_0X17_LEN) #define CAMGETPASSTHRU_0x16 _IOC(IOC_INOUT, CAM_VERSION_0x16, 3, CAM_0X17_LEN) #define CAM_SCATTER_VALID_0x16 0x00000010 #define CAM_SG_LIST_PHYS_0x16 0x00040000 #define CAM_DATA_PHYS_0x16 0x00200000 /* Version 0x17 compatibility */ #define CAM_VERSION_0x17 0x17 struct ccb_hdr_0x17 { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ u_int32_t retry_count; void (*cbfcnp)(struct cam_periph *, union ccb *); xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ u_int target_lun; /* Target LUN number */ u_int32_t flags; /* ccb_flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; u_int32_t timeout; /* Hard timeout value in seconds */ struct callout_handle timeout_ch; }; struct ccb_pathinq_0x17 { struct ccb_hdr_0x17 ccb_h; u_int8_t version_num; /* Version number for the SIM/HBA */ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ u_int8_t target_sprt; /* Flags for target mode support */ u_int8_t hba_misc; /* Misc HBA features */ u_int16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ u_int8_t vuhba_flags[VUHBALEN]; u_int32_t max_target; /* Maximum supported Target */ u_int32_t max_lun; /* Maximum supported Lun */ u_int32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ u_int32_t unit_number; /* Unit number for SIM */ u_int32_t bus_id; /* Bus ID for SIM */ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ u_int16_t hba_vendor; /* HBA vendor ID */ u_int16_t hba_device; /* HBA device ID */ u_int16_t hba_subvendor; /* HBA subvendor ID */ u_int16_t hba_subdevice; /* HBA subdevice ID */ }; struct ccb_trans_settings_0x17 { struct ccb_hdr_0x17 ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; } xport_specific; }; #define CAM_0X17_DATA_LEN CAM_0X18_DATA_LEN #define CAM_0X17_LEN (sizeof(struct ccb_hdr_0x17) + CAM_0X17_DATA_LEN) #define CAMIOCOMMAND_0x17 _IOC(IOC_INOUT, CAM_VERSION_0x17, 2, CAM_0X17_LEN) #define CAMGETPASSTHRU_0x17 _IOC(IOC_INOUT, CAM_VERSION_0x17, 3, CAM_0X17_LEN) /* Version 0x18 compatibility */ #define CAM_VERSION_0x18 0x18 struct ccb_hdr_0x18 { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ u_int32_t retry_count; void (*cbfcnp)(struct cam_periph *, union ccb *); xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ u_int target_lun; /* Target LUN number */ u_int64_t ext_lun; /* 64-bit LUN, more or less */ u_int32_t flags; /* ccb_flags */ u_int32_t xflags; /* extended ccb_flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; ccb_qos_area qos; u_int32_t timeout; /* Hard timeout value in seconds */ struct timeval softtimeout; /* Soft timeout value in sec + usec */ }; typedef enum { CAM_EXTLUN_VALID_0x18 = 0x00000001,/* 64bit lun field is valid */ } ccb_xflags_0x18; struct ccb_trans_settings_0x18 { struct ccb_hdr_0x18 ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; } xport_specific; }; struct dev_match_result_0x18 { dev_match_type type; union { struct { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; u_int target_lun; } periph_result; struct { path_id_t path_id; target_id_t target_id; u_int target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; } device_result; struct bus_match_result bus_result; } result; }; #define CAM_0X18_DATA_LEN (sizeof(union ccb) - 2*sizeof(void *) - sizeof(struct ccb_hdr)) #define CAM_0X18_LEN (sizeof(struct ccb_hdr_0x18) + CAM_0X18_DATA_LEN) #define CAMIOCOMMAND_0x18 _IOC(IOC_INOUT, CAM_VERSION_0x18, 2, CAM_0X18_LEN) #define CAMGETPASSTHRU_0x18 _IOC(IOC_INOUT, CAM_VERSION_0x18, 3, CAM_0X18_LEN) #endif #endif Index: head/sys/cam/cam_debug.h =================================================================== --- head/sys/cam/cam_debug.h (revision 326264) +++ head/sys/cam/cam_debug.h (revision 326265) @@ -1,138 +1,140 @@ /*- * Macros for tracing/loging information in the CAM layer * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_DEBUG_H #define _CAM_CAM_DEBUG_H 1 /* * Debugging flags. */ typedef enum { CAM_DEBUG_NONE = 0x00, /* no debugging */ CAM_DEBUG_INFO = 0x01, /* scsi commands, errors, data */ CAM_DEBUG_TRACE = 0x02, /* routine flow tracking */ CAM_DEBUG_SUBTRACE = 0x04, /* internal to routine flows */ CAM_DEBUG_CDB = 0x08, /* print out SCSI CDBs only */ CAM_DEBUG_XPT = 0x10, /* print out xpt scheduling */ CAM_DEBUG_PERIPH = 0x20, /* print out peripheral calls */ CAM_DEBUG_PROBE = 0x40 /* print out probe actions */ } cam_debug_flags; #if defined(_KERNEL) #ifndef CAM_DEBUG_FLAGS #define CAM_DEBUG_FLAGS CAM_DEBUG_NONE #endif #ifndef CAM_DEBUG_COMPILE #ifdef CAMDEBUG #define CAM_DEBUG_COMPILE (-1) #else #define CAM_DEBUG_COMPILE (CAM_DEBUG_INFO | CAM_DEBUG_CDB | \ CAM_DEBUG_PERIPH | CAM_DEBUG_PROBE | \ CAM_DEBUG_FLAGS) #endif #endif #ifndef CAM_DEBUG_BUS #define CAM_DEBUG_BUS CAM_BUS_WILDCARD #endif #ifndef CAM_DEBUG_TARGET #define CAM_DEBUG_TARGET CAM_TARGET_WILDCARD #endif #ifndef CAM_DEBUG_LUN #define CAM_DEBUG_LUN CAM_LUN_WILDCARD #endif #ifndef CAM_DEBUG_DELAY #define CAM_DEBUG_DELAY 0 #endif /* Path we want to debug */ extern struct cam_path *cam_dpath; /* Current debug levels set */ extern u_int32_t cam_dflags; /* Printf delay value (to prevent scrolling) */ extern u_int32_t cam_debug_delay; /* Debugging macros. */ #define CAM_DEBUGGED(path, flag) \ (((flag) & (CAM_DEBUG_COMPILE) & cam_dflags) \ && (cam_dpath != NULL) \ && (xpt_path_comp(cam_dpath, path) >= 0) \ && (xpt_path_comp(cam_dpath, path) < 2)) #define CAM_DEBUG(path, flag, printfargs) \ if (((flag) & (CAM_DEBUG_COMPILE) & cam_dflags) \ && (cam_dpath != NULL) \ && (xpt_path_comp(cam_dpath, path) >= 0) \ && (xpt_path_comp(cam_dpath, path) < 2)) { \ xpt_print_path(path); \ printf printfargs; \ if (cam_debug_delay != 0) \ DELAY(cam_debug_delay); \ } #define CAM_DEBUG_DEV(dev, flag, printfargs) \ if (((flag) & (CAM_DEBUG_COMPILE) & cam_dflags) \ && (cam_dpath != NULL) \ && (xpt_path_comp_dev(cam_dpath, dev) >= 0) \ && (xpt_path_comp_dev(cam_dpath, dev) < 2)) { \ xpt_print_device(dev); \ printf printfargs; \ if (cam_debug_delay != 0) \ DELAY(cam_debug_delay); \ } #define CAM_DEBUG_PRINT(flag, printfargs) \ if (((flag) & (CAM_DEBUG_COMPILE) & cam_dflags)) { \ printf("cam_debug: "); \ printf printfargs; \ if (cam_debug_delay != 0) \ DELAY(cam_debug_delay); \ } #define CAM_DEBUG_PATH_PRINT(flag, path, printfargs) \ if (((flag) & (CAM_DEBUG_COMPILE) & cam_dflags)) { \ xpt_print(path, "cam_debug: "); \ printf printfargs; \ if (cam_debug_delay != 0) \ DELAY(cam_debug_delay); \ } #else /* !_KERNEL */ #define CAM_DEBUGGED(A, B) 0 #define CAM_DEBUG(A, B, C) #define CAM_DEBUG_PRINT(A, B) #define CAM_DEBUG_PATH_PRINT(A, B, C) #endif /* _KERNEL */ #endif /* _CAM_CAM_DEBUG_H */ Index: head/sys/cam/cam_periph.c =================================================================== --- head/sys/cam/cam_periph.c (revision 326264) +++ head/sys/cam/cam_periph.c (revision 326265) @@ -1,2023 +1,2025 @@ /*- * Common functions for CAM "type" (peripheral) drivers. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997, 1998 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static u_int camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, path_id_t pathid, target_id_t target, lun_id_t lun); static u_int camperiphunit(struct periph_driver *p_drv, path_id_t pathid, target_id_t target, lun_id_t lun); static void camperiphdone(struct cam_periph *periph, union ccb *done_ccb); static void camperiphfree(struct cam_periph *periph); static int camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb, cam_flags camflags, u_int32_t sense_flags, int *openings, u_int32_t *relsim_flags, u_int32_t *timeout, u_int32_t *action, const char **action_string); static int camperiphscsisenseerror(union ccb *ccb, union ccb **orig_ccb, cam_flags camflags, u_int32_t sense_flags, int *openings, u_int32_t *relsim_flags, u_int32_t *timeout, u_int32_t *action, const char **action_string); static void cam_periph_devctl_notify(union ccb *ccb); static int nperiph_drivers; static int initialized = 0; struct periph_driver **periph_drivers; static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); static int periph_selto_delay = 1000; TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); static int periph_noresrc_delay = 500; TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); static int periph_busy_delay = 500; TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); void periphdriver_register(void *data) { struct periph_driver *drv = (struct periph_driver *)data; struct periph_driver **newdrivers, **old; int ndrivers; again: ndrivers = nperiph_drivers + 2; newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, M_WAITOK); xpt_lock_buses(); if (ndrivers != nperiph_drivers + 2) { /* * Lost race against itself; go around. */ xpt_unlock_buses(); free(newdrivers, M_CAMPERIPH); goto again; } if (periph_drivers) bcopy(periph_drivers, newdrivers, sizeof(*newdrivers) * nperiph_drivers); newdrivers[nperiph_drivers] = drv; newdrivers[nperiph_drivers + 1] = NULL; old = periph_drivers; periph_drivers = newdrivers; nperiph_drivers++; xpt_unlock_buses(); if (old) free(old, M_CAMPERIPH); /* If driver marked as early or it is late now, initialize it. */ if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || initialized > 1) (*drv->init)(); } int periphdriver_unregister(void *data) { struct periph_driver *drv = (struct periph_driver *)data; int error, n; /* If driver marked as early or it is late now, deinitialize it. */ if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || initialized > 1) { if (drv->deinit == NULL) { printf("CAM periph driver '%s' doesn't have deinit.\n", drv->driver_name); return (EOPNOTSUPP); } error = drv->deinit(); if (error != 0) return (error); } xpt_lock_buses(); for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++) ; KASSERT(n < nperiph_drivers, ("Periph driver '%s' was not registered", drv->driver_name)); for (; n + 1 < nperiph_drivers; n++) periph_drivers[n] = periph_drivers[n + 1]; periph_drivers[n + 1] = NULL; nperiph_drivers--; xpt_unlock_buses(); return (0); } void periphdriver_init(int level) { int i, early; initialized = max(initialized, level); for (i = 0; periph_drivers[i] != NULL; i++) { early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2; if (early == initialized) (*periph_drivers[i]->init)(); } } cam_status cam_periph_alloc(periph_ctor_t *periph_ctor, periph_oninv_t *periph_oninvalidate, periph_dtor_t *periph_dtor, periph_start_t *periph_start, char *name, cam_periph_type type, struct cam_path *path, ac_callback_t *ac_callback, ac_code code, void *arg) { struct periph_driver **p_drv; struct cam_sim *sim; struct cam_periph *periph; struct cam_periph *cur_periph; path_id_t path_id; target_id_t target_id; lun_id_t lun_id; cam_status status; u_int init_level; init_level = 0; /* * Handle Hot-Plug scenarios. If there is already a peripheral * of our type assigned to this path, we are likely waiting for * final close on an old, invalidated, peripheral. If this is * the case, queue up a deferred call to the peripheral's async * handler. If it looks like a mistaken re-allocation, complain. */ if ((periph = cam_periph_find(path, name)) != NULL) { if ((periph->flags & CAM_PERIPH_INVALID) != 0 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; periph->deferred_callback = ac_callback; periph->deferred_ac = code; return (CAM_REQ_INPROG); } else { printf("cam_periph_alloc: attempt to re-allocate " "valid device %s%d rejected flags %#x " "refcount %d\n", periph->periph_name, periph->unit_number, periph->flags, periph->refcount); } return (CAM_REQ_INVALID); } periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, M_NOWAIT|M_ZERO); if (periph == NULL) return (CAM_RESRC_UNAVAIL); init_level++; sim = xpt_path_sim(path); path_id = xpt_path_path_id(path); target_id = xpt_path_target_id(path); lun_id = xpt_path_lun_id(path); periph->periph_start = periph_start; periph->periph_dtor = periph_dtor; periph->periph_oninval = periph_oninvalidate; periph->type = type; periph->periph_name = name; periph->scheduled_priority = CAM_PRIORITY_NONE; periph->immediate_priority = CAM_PRIORITY_NONE; periph->refcount = 1; /* Dropped by invalidation. */ periph->sim = sim; SLIST_INIT(&periph->ccb_list); status = xpt_create_path(&path, periph, path_id, target_id, lun_id); if (status != CAM_REQ_CMP) goto failure; periph->path = path; xpt_lock_buses(); for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { if (strcmp((*p_drv)->driver_name, name) == 0) break; } if (*p_drv == NULL) { printf("cam_periph_alloc: invalid periph name '%s'\n", name); xpt_unlock_buses(); xpt_free_path(periph->path); free(periph, M_CAMPERIPH); return (CAM_REQ_INVALID); } periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); cur_periph = TAILQ_FIRST(&(*p_drv)->units); while (cur_periph != NULL && cur_periph->unit_number < periph->unit_number) cur_periph = TAILQ_NEXT(cur_periph, unit_links); if (cur_periph != NULL) { KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list")); TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); } else { TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); (*p_drv)->generation++; } xpt_unlock_buses(); init_level++; status = xpt_add_periph(periph); if (status != CAM_REQ_CMP) goto failure; init_level++; CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n")); status = periph_ctor(periph, arg); if (status == CAM_REQ_CMP) init_level++; failure: switch (init_level) { case 4: /* Initialized successfully */ break; case 3: CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); xpt_remove_periph(periph); /* FALLTHROUGH */ case 2: xpt_lock_buses(); TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); xpt_unlock_buses(); xpt_free_path(periph->path); /* FALLTHROUGH */ case 1: free(periph, M_CAMPERIPH); /* FALLTHROUGH */ case 0: /* No cleanup to perform. */ break; default: panic("%s: Unknown init level", __func__); } return(status); } /* * Find a peripheral structure with the specified path, target, lun, * and (optionally) type. If the name is NULL, this function will return * the first peripheral driver that matches the specified path. */ struct cam_periph * cam_periph_find(struct cam_path *path, char *name) { struct periph_driver **p_drv; struct cam_periph *periph; xpt_lock_buses(); for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) continue; TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { if (xpt_path_comp(periph->path, path) == 0) { xpt_unlock_buses(); cam_periph_assert(periph, MA_OWNED); return(periph); } } if (name != NULL) { xpt_unlock_buses(); return(NULL); } } xpt_unlock_buses(); return(NULL); } /* * Find peripheral driver instances attached to the specified path. */ int cam_periph_list(struct cam_path *path, struct sbuf *sb) { struct sbuf local_sb; struct periph_driver **p_drv; struct cam_periph *periph; int count; int sbuf_alloc_len; sbuf_alloc_len = 16; retry: sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN); count = 0; xpt_lock_buses(); for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { if (xpt_path_comp(periph->path, path) != 0) continue; if (sbuf_len(&local_sb) != 0) sbuf_cat(&local_sb, ","); sbuf_printf(&local_sb, "%s%d", periph->periph_name, periph->unit_number); if (sbuf_error(&local_sb) == ENOMEM) { sbuf_alloc_len *= 2; xpt_unlock_buses(); sbuf_delete(&local_sb); goto retry; } count++; } } xpt_unlock_buses(); sbuf_finish(&local_sb); sbuf_cpy(sb, sbuf_data(&local_sb)); sbuf_delete(&local_sb); return (count); } cam_status cam_periph_acquire(struct cam_periph *periph) { cam_status status; status = CAM_REQ_CMP_ERR; if (periph == NULL) return (status); xpt_lock_buses(); if ((periph->flags & CAM_PERIPH_INVALID) == 0) { periph->refcount++; status = CAM_REQ_CMP; } xpt_unlock_buses(); return (status); } void cam_periph_doacquire(struct cam_periph *periph) { xpt_lock_buses(); KASSERT(periph->refcount >= 1, ("cam_periph_doacquire() with refcount == %d", periph->refcount)); periph->refcount++; xpt_unlock_buses(); } void cam_periph_release_locked_buses(struct cam_periph *periph) { cam_periph_assert(periph, MA_OWNED); KASSERT(periph->refcount >= 1, ("periph->refcount >= 1")); if (--periph->refcount == 0) camperiphfree(periph); } void cam_periph_release_locked(struct cam_periph *periph) { if (periph == NULL) return; xpt_lock_buses(); cam_periph_release_locked_buses(periph); xpt_unlock_buses(); } void cam_periph_release(struct cam_periph *periph) { struct mtx *mtx; if (periph == NULL) return; cam_periph_assert(periph, MA_NOTOWNED); mtx = cam_periph_mtx(periph); mtx_lock(mtx); cam_periph_release_locked(periph); mtx_unlock(mtx); } int cam_periph_hold(struct cam_periph *periph, int priority) { int error; /* * Increment the reference count on the peripheral * while we wait for our lock attempt to succeed * to ensure the peripheral doesn't disappear out * from user us while we sleep. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); cam_periph_assert(periph, MA_OWNED); while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { periph->flags |= CAM_PERIPH_LOCK_WANTED; if ((error = cam_periph_sleep(periph, periph, priority, "caplck", 0)) != 0) { cam_periph_release_locked(periph); return (error); } if (periph->flags & CAM_PERIPH_INVALID) { cam_periph_release_locked(periph); return (ENXIO); } } periph->flags |= CAM_PERIPH_LOCKED; return (0); } void cam_periph_unhold(struct cam_periph *periph) { cam_periph_assert(periph, MA_OWNED); periph->flags &= ~CAM_PERIPH_LOCKED; if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { periph->flags &= ~CAM_PERIPH_LOCK_WANTED; wakeup(periph); } cam_periph_release_locked(periph); } /* * Look for the next unit number that is not currently in use for this * peripheral type starting at "newunit". Also exclude unit numbers that * are reserved by for future "hardwiring" unless we already know that this * is a potential wired device. Only assume that the device is "wired" the * first time through the loop since after that we'll be looking at unit * numbers that did not match a wiring entry. */ static u_int camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, path_id_t pathid, target_id_t target, lun_id_t lun) { struct cam_periph *periph; char *periph_name; int i, val, dunit, r; const char *dname, *strval; periph_name = p_drv->driver_name; for (;;newunit++) { for (periph = TAILQ_FIRST(&p_drv->units); periph != NULL && periph->unit_number != newunit; periph = TAILQ_NEXT(periph, unit_links)) ; if (periph != NULL && periph->unit_number == newunit) { if (wired != 0) { xpt_print(periph->path, "Duplicate Wired " "Device entry!\n"); xpt_print(periph->path, "Second device (%s " "device at scbus%d target %d lun %d) will " "not be wired\n", periph_name, pathid, target, lun); wired = 0; } continue; } if (wired) break; /* * Don't match entries like "da 4" as a wired down * device, but do match entries like "da 4 target 5" * or even "da 4 scbus 1". */ i = 0; dname = periph_name; for (;;) { r = resource_find_dev(&i, dname, &dunit, NULL, NULL); if (r != 0) break; /* if no "target" and no specific scbus, skip */ if (resource_int_value(dname, dunit, "target", &val) && (resource_string_value(dname, dunit, "at",&strval)|| strcmp(strval, "scbus") == 0)) continue; if (newunit == dunit) break; } if (r != 0) break; } return (newunit); } static u_int camperiphunit(struct periph_driver *p_drv, path_id_t pathid, target_id_t target, lun_id_t lun) { u_int unit; int wired, i, val, dunit; const char *dname, *strval; char pathbuf[32], *periph_name; periph_name = p_drv->driver_name; snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); unit = 0; i = 0; dname = periph_name; for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; wired = 0) { if (resource_string_value(dname, dunit, "at", &strval) == 0) { if (strcmp(strval, pathbuf) != 0) continue; wired++; } if (resource_int_value(dname, dunit, "target", &val) == 0) { if (val != target) continue; wired++; } if (resource_int_value(dname, dunit, "lun", &val) == 0) { if (val != lun) continue; wired++; } if (wired != 0) { unit = dunit; break; } } /* * Either start from 0 looking for the next unit or from * the unit number given in the resource config. This way, * if we have wildcard matches, we don't return the same * unit number twice. */ unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); return (unit); } void cam_periph_invalidate(struct cam_periph *periph) { cam_periph_assert(periph, MA_OWNED); /* * We only call this routine the first time a peripheral is * invalidated. */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) return; CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n")); if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) { struct sbuf sb; char buffer[160]; sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN); xpt_denounce_periph_sbuf(periph, &sb); sbuf_finish(&sb); sbuf_putbuf(&sb); } periph->flags |= CAM_PERIPH_INVALID; periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; if (periph->periph_oninval != NULL) periph->periph_oninval(periph); cam_periph_release_locked(periph); } static void camperiphfree(struct cam_periph *periph) { struct periph_driver **p_drv; struct periph_driver *drv; cam_periph_assert(periph, MA_OWNED); KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating", periph->periph_name, periph->unit_number)); for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) break; } if (*p_drv == NULL) { printf("camperiphfree: attempt to free non-existant periph\n"); return; } /* * Cache a pointer to the periph_driver structure. If a * periph_driver is added or removed from the array (see * periphdriver_register()) while we drop the toplogy lock * below, p_drv may change. This doesn't protect against this * particular periph_driver going away. That will require full * reference counting in the periph_driver infrastructure. */ drv = *p_drv; /* * We need to set this flag before dropping the topology lock, to * let anyone who is traversing the list that this peripheral is * about to be freed, and there will be no more reference count * checks. */ periph->flags |= CAM_PERIPH_FREE; /* * The peripheral destructor semantics dictate calling with only the * SIM mutex held. Since it might sleep, it should not be called * with the topology lock held. */ xpt_unlock_buses(); /* * We need to call the peripheral destructor prior to removing the * peripheral from the list. Otherwise, we risk running into a * scenario where the peripheral unit number may get reused * (because it has been removed from the list), but some resources * used by the peripheral are still hanging around. In particular, * the devfs nodes used by some peripherals like the pass(4) driver * aren't fully cleaned up until the destructor is run. If the * unit number is reused before the devfs instance is fully gone, * devfs will panic. */ if (periph->periph_dtor != NULL) periph->periph_dtor(periph); /* * The peripheral list is protected by the topology lock. */ xpt_lock_buses(); TAILQ_REMOVE(&drv->units, periph, unit_links); drv->generation++; xpt_remove_periph(periph); xpt_unlock_buses(); if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) xpt_print(periph->path, "Periph destroyed\n"); else CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { union ccb ccb; void *arg; switch (periph->deferred_ac) { case AC_FOUND_DEVICE: ccb.ccb_h.func_code = XPT_GDEV_TYPE; xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); xpt_action(&ccb); arg = &ccb; break; case AC_PATH_REGISTERED: ccb.ccb_h.func_code = XPT_PATH_INQ; xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); xpt_action(&ccb); arg = &ccb; break; default: arg = NULL; break; } periph->deferred_callback(NULL, periph->deferred_ac, periph->path, arg); } xpt_free_path(periph->path); free(periph, M_CAMPERIPH); xpt_lock_buses(); } /* * Map user virtual pointers into kernel virtual address space, so we can * access the memory. This is now a generic function that centralizes most * of the sanity checks on the data flags, if any. * This also only works for up to MAXPHYS memory. Since we use * buffers to map stuff in and out, we're limited to the buffer size. */ int cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo, u_int maxmap) { int numbufs, i, j; int flags[CAM_PERIPH_MAXMAPS]; u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; u_int32_t lengths[CAM_PERIPH_MAXMAPS]; u_int32_t dirs[CAM_PERIPH_MAXMAPS]; if (maxmap == 0) maxmap = DFLTPHYS; /* traditional default */ else if (maxmap > MAXPHYS) maxmap = MAXPHYS; /* for safety */ switch(ccb->ccb_h.func_code) { case XPT_DEV_MATCH: if (ccb->cdm.match_buf_len == 0) { printf("cam_periph_mapmem: invalid match buffer " "length 0\n"); return(EINVAL); } if (ccb->cdm.pattern_buf_len > 0) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; lengths[0] = ccb->cdm.pattern_buf_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; lengths[1] = ccb->cdm.match_buf_len; dirs[1] = CAM_DIR_IN; numbufs = 2; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; lengths[0] = ccb->cdm.match_buf_len; dirs[0] = CAM_DIR_IN; numbufs = 1; } /* * This request will not go to the hardware, no reason * to be so strict. vmapbuf() is able to map up to MAXPHYS. */ maxmap = MAXPHYS; break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) return (EINVAL); data_ptrs[0] = &ccb->csio.data_ptr; lengths[0] = ccb->csio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 1; break; case XPT_ATA_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) return (EINVAL); data_ptrs[0] = &ccb->ataio.data_ptr; lengths[0] = ccb->ataio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 1; break; case XPT_MMC_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); /* Two mappings: one for cmd->data and one for cmd->data->data */ data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data; lengths[0] = sizeof(struct mmc_data *); dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data; lengths[1] = ccb->mmcio.cmd.data->len; dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 2; break; case XPT_SMP_IO: data_ptrs[0] = &ccb->smpio.smp_request; lengths[0] = ccb->smpio.smp_request_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = &ccb->smpio.smp_response; lengths[1] = ccb->smpio.smp_response_len; dirs[1] = CAM_DIR_IN; numbufs = 2; break; case XPT_NVME_IO: case XPT_NVME_ADMIN: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return (0); if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) return (EINVAL); data_ptrs[0] = &ccb->nvmeio.data_ptr; lengths[0] = ccb->nvmeio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 1; break; case XPT_DEV_ADVINFO: if (ccb->cdai.bufsiz == 0) return (0); data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; lengths[0] = ccb->cdai.bufsiz; dirs[0] = CAM_DIR_IN; numbufs = 1; /* * This request will not go to the hardware, no reason * to be so strict. vmapbuf() is able to map up to MAXPHYS. */ maxmap = MAXPHYS; break; default: return(EINVAL); break; /* NOTREACHED */ } /* * Check the transfer length and permissions first, so we don't * have to unmap any previously mapped buffers. */ for (i = 0; i < numbufs; i++) { flags[i] = 0; /* * The userland data pointer passed in may not be page * aligned. vmapbuf() truncates the address to a page * boundary, so if the address isn't page aligned, we'll * need enough space for the given transfer length, plus * whatever extra space is necessary to make it to the page * boundary. */ if ((lengths[i] + (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){ printf("cam_periph_mapmem: attempt to map %lu bytes, " "which is greater than %lu\n", (long)(lengths[i] + (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), (u_long)maxmap); return(E2BIG); } if (dirs[i] & CAM_DIR_OUT) { flags[i] = BIO_WRITE; } if (dirs[i] & CAM_DIR_IN) { flags[i] = BIO_READ; } } /* * This keeps the kernel stack of current thread from getting * swapped. In low-memory situations where the kernel stack might * otherwise get swapped out, this holds it and allows the thread * to make progress and release the kernel mapped pages sooner. * * XXX KDM should I use P_NOSWAP instead? */ PHOLD(curproc); for (i = 0; i < numbufs; i++) { /* * Get the buffer. */ mapinfo->bp[i] = getpbuf(NULL); /* put our pointer in the data slot */ mapinfo->bp[i]->b_data = *data_ptrs[i]; /* save the user's data address */ mapinfo->bp[i]->b_caller1 = *data_ptrs[i]; /* set the transfer length, we know it's < MAXPHYS */ mapinfo->bp[i]->b_bufsize = lengths[i]; /* set the direction */ mapinfo->bp[i]->b_iocmd = flags[i]; /* * Map the buffer into kernel memory. * * Note that useracc() alone is not a sufficient test. * vmapbuf() can still fail due to a smaller file mapped * into a larger area of VM, or if userland races against * vmapbuf() after the useracc() check. */ if (vmapbuf(mapinfo->bp[i], 1) < 0) { for (j = 0; j < i; ++j) { *data_ptrs[j] = mapinfo->bp[j]->b_caller1; vunmapbuf(mapinfo->bp[j]); relpbuf(mapinfo->bp[j], NULL); } relpbuf(mapinfo->bp[i], NULL); PRELE(curproc); return(EACCES); } /* set our pointer to the new mapped area */ *data_ptrs[i] = mapinfo->bp[i]->b_data; mapinfo->num_bufs_used++; } /* * Now that we've gotten this far, change ownership to the kernel * of the buffers so that we don't run afoul of returning to user * space with locks (on the buffer) held. */ for (i = 0; i < numbufs; i++) { BUF_KERNPROC(mapinfo->bp[i]); } return(0); } /* * Unmap memory segments mapped into kernel virtual address space by * cam_periph_mapmem(). */ void cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) { int numbufs, i; u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; if (mapinfo->num_bufs_used <= 0) { /* nothing to free and the process wasn't held. */ return; } switch (ccb->ccb_h.func_code) { case XPT_DEV_MATCH: numbufs = min(mapinfo->num_bufs_used, 2); if (numbufs == 1) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; } break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: data_ptrs[0] = &ccb->csio.data_ptr; numbufs = min(mapinfo->num_bufs_used, 1); break; case XPT_ATA_IO: data_ptrs[0] = &ccb->ataio.data_ptr; numbufs = min(mapinfo->num_bufs_used, 1); break; case XPT_SMP_IO: numbufs = min(mapinfo->num_bufs_used, 2); data_ptrs[0] = &ccb->smpio.smp_request; data_ptrs[1] = &ccb->smpio.smp_response; break; case XPT_DEV_ADVINFO: numbufs = min(mapinfo->num_bufs_used, 1); data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; break; case XPT_NVME_IO: case XPT_NVME_ADMIN: data_ptrs[0] = &ccb->nvmeio.data_ptr; numbufs = min(mapinfo->num_bufs_used, 1); break; default: /* allow ourselves to be swapped once again */ PRELE(curproc); return; break; /* NOTREACHED */ } for (i = 0; i < numbufs; i++) { /* Set the user's pointer back to the original value */ *data_ptrs[i] = mapinfo->bp[i]->b_caller1; /* unmap the buffer */ vunmapbuf(mapinfo->bp[i]); /* release the buffer */ relpbuf(mapinfo->bp[i], NULL); } /* allow ourselves to be swapped once again */ PRELE(curproc); } int cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, int (*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags)) { union ccb *ccb; int error; int found; error = found = 0; switch(cmd){ case CAMGETPASSTHRU: ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); ccb->ccb_h.func_code = XPT_GDEVLIST; /* * Basically, the point of this is that we go through * getting the list of devices, until we find a passthrough * device. In the current version of the CAM code, the * only way to determine what type of device we're dealing * with is by its name. */ while (found == 0) { ccb->cgdl.index = 0; ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { /* we want the next device in the list */ xpt_action(ccb); if (strncmp(ccb->cgdl.periph_name, "pass", 4) == 0){ found = 1; break; } } if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && (found == 0)) { ccb->cgdl.periph_name[0] = '\0'; ccb->cgdl.unit_number = 0; break; } } /* copy the result back out */ bcopy(ccb, addr, sizeof(union ccb)); /* and release the ccb */ xpt_release_ccb(ccb); break; default: error = ENOTTY; break; } return(error); } static void cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb) { panic("%s: already done with ccb %p", __func__, done_ccb); } static void cam_periph_done(struct cam_periph *periph, union ccb *done_ccb) { /* Caller will release the CCB */ xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED); done_ccb->ccb_h.cbfcnp = cam_periph_done_panic; wakeup(&done_ccb->ccb_h.cbfcnp); } static void cam_periph_ccbwait(union ccb *ccb) { if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { while (ccb->ccb_h.cbfcnp != cam_periph_done_panic) xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, PRIBIO, "cbwait", 0); } KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX && (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG, ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, " "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code, ccb->ccb_h.status, ccb->ccb_h.pinfo.index)); } int cam_periph_runccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags), cam_flags camflags, u_int32_t sense_flags, struct devstat *ds) { struct bintime *starttime; struct bintime ltime; int error; starttime = NULL; xpt_path_assert(ccb->ccb_h.path, MA_OWNED); KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0, ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb, ccb->ccb_h.func_code, ccb->ccb_h.flags)); /* * If the user has supplied a stats structure, and if we understand * this particular type of ccb, record the transaction start. */ if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO || ccb->ccb_h.func_code == XPT_ATA_IO)) { starttime = <ime; binuptime(starttime); devstat_start_transaction(ds, starttime); } ccb->ccb_h.cbfcnp = cam_periph_done; xpt_action(ccb); do { cam_periph_ccbwait(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) error = 0; else if (error_routine != NULL) { ccb->ccb_h.cbfcnp = cam_periph_done; error = (*error_routine)(ccb, camflags, sense_flags); } else error = 0; } while (error == ERESTART); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(ccb->ccb_h.path, /* relsim_flags */0, /* openings */0, /* timeout */0, /* getcount_only */ FALSE); ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } if (ds != NULL) { if (ccb->ccb_h.func_code == XPT_SCSI_IO) { devstat_end_transaction(ds, ccb->csio.dxfer_len - ccb->csio.resid, ccb->csio.tag_action & 0x3, ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime); } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { devstat_end_transaction(ds, ccb->ataio.dxfer_len - ccb->ataio.resid, 0, /* Not used in ATA */ ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime); } } return(error); } void cam_freeze_devq(struct cam_path *path) { struct ccb_hdr ccb_h; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n")); xpt_setup_ccb(&ccb_h, path, /*priority*/1); ccb_h.func_code = XPT_NOOP; ccb_h.flags = CAM_DEV_QFREEZE; xpt_action((union ccb *)&ccb_h); } u_int32_t cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, u_int32_t openings, u_int32_t arg, int getcount_only) { struct ccb_relsim crs; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n", relsim_flags, openings, arg, getcount_only)); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; crs.release_flags = relsim_flags; crs.openings = openings; crs.release_timeout = arg; xpt_action((union ccb *)&crs); return (crs.qfrozen_cnt); } #define saved_ccb_ptr ppriv_ptr0 static void camperiphdone(struct cam_periph *periph, union ccb *done_ccb) { union ccb *saved_ccb; cam_status status; struct scsi_start_stop_unit *scsi_cmd; int error_code, sense_key, asc, ascq; scsi_cmd = (struct scsi_start_stop_unit *) &done_ccb->csio.cdb_io.cdb_bytes; status = done_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) { /* * If the error is "invalid field in CDB", * and the load/eject flag is set, turn the * flag off and try again. This is just in * case the drive in question barfs on the * load eject flag. The CAM code should set * the load/eject flag by default for * removable media. */ if ((scsi_cmd->opcode == START_STOP_UNIT) && ((scsi_cmd->how & SSS_LOEJ) != 0) && (asc == 0x24) && (ascq == 0x00)) { scsi_cmd->how &= ~SSS_LOEJ; if (status & CAM_DEV_QFRZN) { cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } xpt_action(done_ccb); goto out; } } if (cam_periph_error(done_ccb, 0, SF_RETRY_UA | SF_NO_PRINT, NULL) == ERESTART) goto out; if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) { cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } } else { /* * If we have successfully taken a device from the not * ready to ready state, re-scan the device and re-get * the inquiry information. Many devices (mostly disks) * don't properly report their inquiry information unless * they are spun up. */ if (scsi_cmd->opcode == START_STOP_UNIT) xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL); } /* * Perform the final retry with the original CCB so that final * error processing is performed by the owner of the CCB. */ saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; bcopy(saved_ccb, done_ccb, sizeof(*done_ccb)); xpt_free_ccb(saved_ccb); if (done_ccb->ccb_h.cbfcnp != camperiphdone) periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; xpt_action(done_ccb); out: /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); } /* * Generic Async Event handler. Peripheral drivers usually * filter out the events that require personal attention, * and leave the rest to this function. */ void cam_periph_async(struct cam_periph *periph, u_int32_t code, struct cam_path *path, void *arg) { switch (code) { case AC_LOST_DEVICE: cam_periph_invalidate(periph); break; default: break; } } void cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) { struct ccb_getdevstats cgds; xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgds.ccb_h.func_code = XPT_GDEV_STATS; xpt_action((union ccb *)&cgds); cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); } void cam_periph_freeze_after_event(struct cam_periph *periph, struct timeval* event_time, u_int duration_ms) { struct timeval delta; struct timeval duration_tv; if (!timevalisset(event_time)) return; microtime(&delta); timevalsub(&delta, event_time); duration_tv.tv_sec = duration_ms / 1000; duration_tv.tv_usec = (duration_ms % 1000) * 1000; if (timevalcmp(&delta, &duration_tv, <)) { timevalsub(&duration_tv, &delta); duration_ms = duration_tv.tv_sec * 1000; duration_ms += duration_tv.tv_usec / 1000; cam_freeze_devq(periph->path); cam_release_devq(periph->path, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/duration_ms, /*getcount_only*/0); } } static int camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb, cam_flags camflags, u_int32_t sense_flags, int *openings, u_int32_t *relsim_flags, u_int32_t *timeout, u_int32_t *action, const char **action_string) { int error; switch (ccb->csio.scsi_status) { case SCSI_STATUS_OK: case SCSI_STATUS_COND_MET: case SCSI_STATUS_INTERMED: case SCSI_STATUS_INTERMED_COND_MET: error = 0; break; case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: error = camperiphscsisenseerror(ccb, orig_ccb, camflags, sense_flags, openings, relsim_flags, timeout, action, action_string); break; case SCSI_STATUS_QUEUE_FULL: { /* no decrement */ struct ccb_getdevstats cgds; /* * First off, find out what the current * transaction counts are. */ xpt_setup_ccb(&cgds.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgds.ccb_h.func_code = XPT_GDEV_STATS; xpt_action((union ccb *)&cgds); /* * If we were the only transaction active, treat * the QUEUE FULL as if it were a BUSY condition. */ if (cgds.dev_active != 0) { int total_openings; /* * Reduce the number of openings to * be 1 less than the amount it took * to get a queue full bounded by the * minimum allowed tag count for this * device. */ total_openings = cgds.dev_active + cgds.dev_openings; *openings = cgds.dev_active; if (*openings < cgds.mintags) *openings = cgds.mintags; if (*openings < total_openings) *relsim_flags = RELSIM_ADJUST_OPENINGS; else { /* * Some devices report queue full for * temporary resource shortages. For * this reason, we allow a minimum * tag count to be entered via a * quirk entry to prevent the queue * count on these devices from falling * to a pessimisticly low value. We * still wait for the next successful * completion, however, before queueing * more transactions to the device. */ *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; } *timeout = 0; error = ERESTART; *action &= ~SSQ_PRINT_SENSE; break; } /* FALLTHROUGH */ } case SCSI_STATUS_BUSY: /* * Restart the queue after either another * command completes or a 1 second timeout. */ if ((sense_flags & SF_RETRY_BUSY) != 0 || (ccb->ccb_h.retry_count--) > 0) { error = ERESTART; *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT | RELSIM_RELEASE_AFTER_CMDCMPLT; *timeout = 1000; } else { error = EIO; } break; case SCSI_STATUS_RESERV_CONFLICT: default: error = EIO; break; } return (error); } static int camperiphscsisenseerror(union ccb *ccb, union ccb **orig, cam_flags camflags, u_int32_t sense_flags, int *openings, u_int32_t *relsim_flags, u_int32_t *timeout, u_int32_t *action, const char **action_string) { struct cam_periph *periph; union ccb *orig_ccb = ccb; int error, recoveryccb; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL) biotrack(ccb->csio.bio, __func__); #endif periph = xpt_path_periph(ccb->ccb_h.path); recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone); if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) { /* * If error recovery is already in progress, don't attempt * to process this error, but requeue it unconditionally * and attempt to process it once error recovery has * completed. This failed command is probably related to * the error that caused the currently active error recovery * action so our current recovery efforts should also * address this command. Be aware that the error recovery * code assumes that only one recovery action is in progress * on a particular peripheral instance at any given time * (e.g. only one saved CCB for error recovery) so it is * imperitive that we don't violate this assumption. */ error = ERESTART; *action &= ~SSQ_PRINT_SENSE; } else { scsi_sense_action err_action; struct ccb_getdev cgd; /* * Grab the inquiry data for this device. */ xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); err_action = scsi_error_action(&ccb->csio, &cgd.inq_data, sense_flags); error = err_action & SS_ERRMASK; /* * Do not autostart sequential access devices * to avoid unexpected tape loading. */ if ((err_action & SS_MASK) == SS_START && SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) { *action_string = "Will not autostart a " "sequential access device"; goto sense_error_done; } /* * Avoid recovery recursion if recovery action is the same. */ if ((err_action & SS_MASK) >= SS_START && recoveryccb) { if (((err_action & SS_MASK) == SS_START && ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) || ((err_action & SS_MASK) == SS_TUR && (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) { err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; *timeout = 500; } } /* * If the recovery action will consume a retry, * make sure we actually have retries available. */ if ((err_action & SSQ_DECREMENT_COUNT) != 0) { if (ccb->ccb_h.retry_count > 0 && (periph->flags & CAM_PERIPH_INVALID) == 0) ccb->ccb_h.retry_count--; else { *action_string = "Retries exhausted"; goto sense_error_done; } } if ((err_action & SS_MASK) >= SS_START) { /* * Do common portions of commands that * use recovery CCBs. */ orig_ccb = xpt_alloc_ccb_nowait(); if (orig_ccb == NULL) { *action_string = "Can't allocate recovery CCB"; goto sense_error_done; } /* * Clear freeze flag for original request here, as * this freeze will be dropped as part of ERESTART. */ ccb->ccb_h.status &= ~CAM_DEV_QFRZN; bcopy(ccb, orig_ccb, sizeof(*orig_ccb)); } switch (err_action & SS_MASK) { case SS_NOP: *action_string = "No recovery action needed"; error = 0; break; case SS_RETRY: *action_string = "Retrying command (per sense data)"; error = ERESTART; break; case SS_FAIL: *action_string = "Unretryable error"; break; case SS_START: { int le; /* * Send a start unit command to the device, and * then retry the command. */ *action_string = "Attempting to start unit"; periph->flags |= CAM_PERIPH_RECOVERY_INPROG; /* * Check for removable media and set * load/eject flag appropriately. */ if (SID_IS_REMOVABLE(&cgd.inq_data)) le = TRUE; else le = FALSE; scsi_start_stop(&ccb->csio, /*retries*/1, camperiphdone, MSG_SIMPLE_Q_TAG, /*start*/TRUE, /*load/eject*/le, /*immediate*/FALSE, SSD_FULL_SIZE, /*timeout*/50000); break; } case SS_TUR: { /* * Send a Test Unit Ready to the device. * If the 'many' flag is set, we send 120 * test unit ready commands, one every half * second. Otherwise, we just send one TUR. * We only want to do this if the retry * count has not been exhausted. */ int retries; if ((err_action & SSQ_MANY) != 0) { *action_string = "Polling device for readiness"; retries = 120; } else { *action_string = "Testing device for readiness"; retries = 1; } periph->flags |= CAM_PERIPH_RECOVERY_INPROG; scsi_test_unit_ready(&ccb->csio, retries, camperiphdone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, /*timeout*/5000); /* * Accomplish our 500ms delay by deferring * the release of our device queue appropriately. */ *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; *timeout = 500; break; } default: panic("Unhandled error action %x", err_action); } if ((err_action & SS_MASK) >= SS_START) { /* * Drop the priority, so that the recovery * CCB is the first to execute. Freeze the queue * after this command is sent so that we can * restore the old csio and have it queued in * the proper order before we release normal * transactions to the device. */ ccb->ccb_h.pinfo.priority--; ccb->ccb_h.flags |= CAM_DEV_QFREEZE; ccb->ccb_h.saved_ccb_ptr = orig_ccb; error = ERESTART; *orig = orig_ccb; } sense_error_done: *action = err_action; } return (error); } /* * Generic error handler. Peripheral drivers usually filter * out the errors that they handle in a unique manner, then * call this function. */ int cam_periph_error(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags, union ccb *save_ccb) { struct cam_path *newpath; union ccb *orig_ccb, *scan_ccb; struct cam_periph *periph; const char *action_string; cam_status status; int frozen, error, openings, devctl_err; u_int32_t action, relsim_flags, timeout; action = SSQ_PRINT_SENSE; periph = xpt_path_periph(ccb->ccb_h.path); action_string = NULL; status = ccb->ccb_h.status; frozen = (status & CAM_DEV_QFRZN) != 0; status &= CAM_STATUS_MASK; devctl_err = openings = relsim_flags = timeout = 0; orig_ccb = ccb; /* Filter the errors that should be reported via devctl */ switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: case CAM_SCSI_STATUS_ERROR: case CAM_ATA_STATUS_ERROR: case CAM_SMP_STATUS_ERROR: devctl_err++; break; default: break; } switch (status) { case CAM_REQ_CMP: error = 0; action &= ~SSQ_PRINT_SENSE; break; case CAM_SCSI_STATUS_ERROR: error = camperiphscsistatuserror(ccb, &orig_ccb, camflags, sense_flags, &openings, &relsim_flags, &timeout, &action, &action_string); break; case CAM_AUTOSENSE_FAIL: error = EIO; /* we have to kill the command */ break; case CAM_UA_ABORT: case CAM_UA_TERMIO: case CAM_MSG_REJECT_REC: /* XXX Don't know that these are correct */ error = EIO; break; case CAM_SEL_TIMEOUT: if ((camflags & CAM_RETRY_SELTO) != 0) { if (ccb->ccb_h.retry_count > 0 && (periph->flags & CAM_PERIPH_INVALID) == 0) { ccb->ccb_h.retry_count--; error = ERESTART; /* * Wait a bit to give the device * time to recover before we try again. */ relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; timeout = periph_selto_delay; break; } action_string = "Retries exhausted"; } /* FALLTHROUGH */ case CAM_DEV_NOT_THERE: error = ENXIO; action = SSQ_LOST; break; case CAM_REQ_INVALID: case CAM_PATH_INVALID: case CAM_NO_HBA: case CAM_PROVIDE_FAIL: case CAM_REQ_TOO_BIG: case CAM_LUN_INVALID: case CAM_TID_INVALID: case CAM_FUNC_NOTAVAIL: error = EINVAL; break; case CAM_SCSI_BUS_RESET: case CAM_BDR_SENT: /* * Commands that repeatedly timeout and cause these * kinds of error recovery actions, should return * CAM_CMD_TIMEOUT, which allows us to safely assume * that this command was an innocent bystander to * these events and should be unconditionally * retried. */ case CAM_REQUEUE_REQ: /* Unconditional requeue if device is still there */ if (periph->flags & CAM_PERIPH_INVALID) { action_string = "Periph was invalidated"; error = EIO; } else if (sense_flags & SF_NO_RETRY) { error = EIO; action_string = "Retry was blocked"; } else { error = ERESTART; action &= ~SSQ_PRINT_SENSE; } break; case CAM_RESRC_UNAVAIL: /* Wait a bit for the resource shortage to abate. */ timeout = periph_noresrc_delay; /* FALLTHROUGH */ case CAM_BUSY: if (timeout == 0) { /* Wait a bit for the busy condition to abate. */ timeout = periph_busy_delay; } relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; /* FALLTHROUGH */ case CAM_ATA_STATUS_ERROR: case CAM_REQ_CMP_ERR: case CAM_CMD_TIMEOUT: case CAM_UNEXP_BUSFREE: case CAM_UNCOR_PARITY: case CAM_DATA_RUN_ERR: default: if (periph->flags & CAM_PERIPH_INVALID) { error = EIO; action_string = "Periph was invalidated"; } else if (ccb->ccb_h.retry_count == 0) { error = EIO; action_string = "Retries exhausted"; } else if (sense_flags & SF_NO_RETRY) { error = EIO; action_string = "Retry was blocked"; } else { ccb->ccb_h.retry_count--; error = ERESTART; } break; } if ((sense_flags & SF_PRINT_ALWAYS) || CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO)) action |= SSQ_PRINT_SENSE; else if (sense_flags & SF_NO_PRINT) action &= ~SSQ_PRINT_SENSE; if ((action & SSQ_PRINT_SENSE) != 0) cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL); if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) { if (error != ERESTART) { if (action_string == NULL) action_string = "Unretryable error"; xpt_print(ccb->ccb_h.path, "Error %d, %s\n", error, action_string); } else if (action_string != NULL) xpt_print(ccb->ccb_h.path, "%s\n", action_string); else xpt_print(ccb->ccb_h.path, "Retrying command\n"); } if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0)) cam_periph_devctl_notify(orig_ccb); if ((action & SSQ_LOST) != 0) { lun_id_t lun_id; /* * For a selection timeout, we consider all of the LUNs on * the target to be gone. If the status is CAM_DEV_NOT_THERE, * then we only get rid of the device(s) specified by the * path in the original CCB. */ if (status == CAM_SEL_TIMEOUT) lun_id = CAM_LUN_WILDCARD; else lun_id = xpt_path_lun_id(ccb->ccb_h.path); /* Should we do more if we can't create the path?? */ if (xpt_create_path(&newpath, periph, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), lun_id) == CAM_REQ_CMP) { /* * Let peripheral drivers know that this * device has gone away. */ xpt_async(AC_LOST_DEVICE, newpath, NULL); xpt_free_path(newpath); } } /* Broadcast UNIT ATTENTIONs to all periphs. */ if ((action & SSQ_UA) != 0) xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb); /* Rescan target on "Reported LUNs data has changed" */ if ((action & SSQ_RESCAN) != 0) { if (xpt_create_path(&newpath, NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), CAM_LUN_WILDCARD) == CAM_REQ_CMP) { scan_ccb = xpt_alloc_ccb_nowait(); if (scan_ccb != NULL) { scan_ccb->ccb_h.path = newpath; scan_ccb->ccb_h.func_code = XPT_SCAN_TGT; scan_ccb->crcn.flags = 0; xpt_rescan(scan_ccb); } else { xpt_print(newpath, "Can't allocate CCB to rescan target\n"); xpt_free_path(newpath); } } } /* Attempt a retry */ if (error == ERESTART || error == 0) { if (frozen != 0) ccb->ccb_h.status &= ~CAM_DEV_QFRZN; if (error == ERESTART) xpt_action(ccb); if (frozen != 0) cam_release_devq(ccb->ccb_h.path, relsim_flags, openings, timeout, /*getcount_only*/0); } return (error); } #define CAM_PERIPH_DEVD_MSG_SIZE 256 static void cam_periph_devctl_notify(union ccb *ccb) { struct cam_periph *periph; struct ccb_getdev *cgd; struct sbuf sb; int serr, sk, asc, ascq; char *sbmsg, *type; sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT); if (sbmsg == NULL) return; sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN); periph = xpt_path_periph(ccb->ccb_h.path); sbuf_printf(&sb, "device=%s%d ", periph->periph_name, periph->unit_number); sbuf_printf(&sb, "serial=\""); if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) { xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); if (cgd->ccb_h.status == CAM_REQ_CMP) sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len); xpt_free_ccb((union ccb *)cgd); } sbuf_printf(&sb, "\" "); sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status); switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout); type = "timeout"; break; case CAM_SCSI_STATUS_ERROR: sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status); if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq)) sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ", serr, sk, asc, ascq); type = "error"; break; case CAM_ATA_STATUS_ERROR: sbuf_printf(&sb, "RES=\""); ata_res_sbuf(&ccb->ataio.res, &sb); sbuf_printf(&sb, "\" "); type = "error"; break; default: type = "error"; break; } if (ccb->ccb_h.func_code == XPT_SCSI_IO) { sbuf_printf(&sb, "CDB=\""); scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb); sbuf_printf(&sb, "\" "); } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { sbuf_printf(&sb, "ACB=\""); ata_cmd_sbuf(&ccb->ataio.cmd, &sb); sbuf_printf(&sb, "\" "); } if (sbuf_finish(&sb) == 0) devctl_notify("CAM", "periph", type, sbuf_data(&sb)); sbuf_delete(&sb); free(sbmsg, M_CAMPERIPH); } Index: head/sys/cam/cam_periph.h =================================================================== --- head/sys/cam/cam_periph.h (revision 326264) +++ head/sys/cam/cam_periph.h (revision 326265) @@ -1,259 +1,261 @@ /*- * Data structures and definitions for CAM peripheral ("type") drivers. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_PERIPH_H #define _CAM_CAM_PERIPH_H 1 #include #include #ifdef _KERNEL #include #include struct devstat; extern struct cam_periph *xpt_periph; extern struct periph_driver **periph_drivers; void periphdriver_register(void *); int periphdriver_unregister(void *); void periphdriver_init(int level); #include #define PERIPHDRIVER_DECLARE(name, driver) \ static int name ## _modevent(module_t mod, int type, void *data) \ { \ switch (type) { \ case MOD_LOAD: \ periphdriver_register(data); \ break; \ case MOD_UNLOAD: \ return (periphdriver_unregister(data)); \ default: \ return EOPNOTSUPP; \ } \ return 0; \ } \ static moduledata_t name ## _mod = { \ #name, \ name ## _modevent, \ (void *)&driver \ }; \ DECLARE_MODULE(name, name ## _mod, SI_SUB_DRIVERS, SI_ORDER_ANY); \ MODULE_DEPEND(name, cam, 1, 1, 1) /* * Callback informing the peripheral driver it can perform it's * initialization since the XPT is now fully initialized. */ typedef void (periph_init_t)(void); /* * Callback requesting the peripheral driver to remove its instances * and shutdown, if possible. */ typedef int (periph_deinit_t)(void); struct periph_driver { periph_init_t *init; char *driver_name; TAILQ_HEAD(,cam_periph) units; u_int generation; u_int flags; #define CAM_PERIPH_DRV_EARLY 0x01 periph_deinit_t *deinit; }; typedef enum { CAM_PERIPH_BIO } cam_periph_type; /* Generically useful offsets into the peripheral private area */ #define ppriv_ptr0 periph_priv.entries[0].ptr #define ppriv_ptr1 periph_priv.entries[1].ptr #define ppriv_field0 periph_priv.entries[0].field #define ppriv_field1 periph_priv.entries[1].field typedef void periph_start_t (struct cam_periph *periph, union ccb *start_ccb); typedef cam_status periph_ctor_t (struct cam_periph *periph, void *arg); typedef void periph_oninv_t (struct cam_periph *periph); typedef void periph_dtor_t (struct cam_periph *periph); struct cam_periph { periph_start_t *periph_start; periph_oninv_t *periph_oninval; periph_dtor_t *periph_dtor; char *periph_name; struct cam_path *path; /* Compiled path to device */ void *softc; struct cam_sim *sim; u_int32_t unit_number; cam_periph_type type; u_int32_t flags; #define CAM_PERIPH_RUNNING 0x01 #define CAM_PERIPH_LOCKED 0x02 #define CAM_PERIPH_LOCK_WANTED 0x04 #define CAM_PERIPH_INVALID 0x08 #define CAM_PERIPH_NEW_DEV_FOUND 0x10 #define CAM_PERIPH_RECOVERY_INPROG 0x20 #define CAM_PERIPH_RUN_TASK 0x40 #define CAM_PERIPH_FREE 0x80 #define CAM_PERIPH_ANNOUNCED 0x100 uint32_t scheduled_priority; uint32_t immediate_priority; int periph_allocating; int periph_allocated; u_int32_t refcount; SLIST_HEAD(, ccb_hdr) ccb_list; /* For "immediate" requests */ SLIST_ENTRY(cam_periph) periph_links; TAILQ_ENTRY(cam_periph) unit_links; ac_callback_t *deferred_callback; ac_code deferred_ac; struct task periph_run_task; }; #define CAM_PERIPH_MAXMAPS 2 struct cam_periph_map_info { int num_bufs_used; struct buf *bp[CAM_PERIPH_MAXMAPS]; }; cam_status cam_periph_alloc(periph_ctor_t *periph_ctor, periph_oninv_t *periph_oninvalidate, periph_dtor_t *periph_dtor, periph_start_t *periph_start, char *name, cam_periph_type type, struct cam_path *, ac_callback_t *, ac_code, void *arg); struct cam_periph *cam_periph_find(struct cam_path *path, char *name); int cam_periph_list(struct cam_path *, struct sbuf *); cam_status cam_periph_acquire(struct cam_periph *periph); void cam_periph_doacquire(struct cam_periph *periph); void cam_periph_release(struct cam_periph *periph); void cam_periph_release_locked(struct cam_periph *periph); void cam_periph_release_locked_buses(struct cam_periph *periph); int cam_periph_hold(struct cam_periph *periph, int priority); void cam_periph_unhold(struct cam_periph *periph); void cam_periph_invalidate(struct cam_periph *periph); int cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo, u_int maxmap); void cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo); union ccb *cam_periph_getccb(struct cam_periph *periph, u_int32_t priority); int cam_periph_runccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags), cam_flags camflags, u_int32_t sense_flags, struct devstat *ds); int cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, int (*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags)); void cam_freeze_devq(struct cam_path *path); u_int32_t cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, u_int32_t opening_reduction, u_int32_t arg, int getcount_only); void cam_periph_async(struct cam_periph *periph, u_int32_t code, struct cam_path *path, void *arg); void cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle_ms); void cam_periph_freeze_after_event(struct cam_periph *periph, struct timeval* event_time, u_int duration_ms); int cam_periph_error(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags, union ccb *save_ccb); static __inline struct mtx * cam_periph_mtx(struct cam_periph *periph) { return (xpt_path_mtx(periph->path)); } #define cam_periph_owned(periph) \ mtx_owned(xpt_path_mtx((periph)->path)) #define cam_periph_lock(periph) \ mtx_lock(xpt_path_mtx((periph)->path)) #define cam_periph_unlock(periph) \ mtx_unlock(xpt_path_mtx((periph)->path)) #define cam_periph_assert(periph, what) \ mtx_assert(xpt_path_mtx((periph)->path), (what)) #define cam_periph_sleep(periph, chan, priority, wmesg, timo) \ xpt_path_sleep((periph)->path, (chan), (priority), (wmesg), (timo)) static inline struct cam_periph * cam_periph_acquire_first(struct periph_driver *driver) { struct cam_periph *periph; xpt_lock_buses(); periph = TAILQ_FIRST(&driver->units); while (periph != NULL && (periph->flags & CAM_PERIPH_INVALID) != 0) periph = TAILQ_NEXT(periph, unit_links); if (periph != NULL) periph->refcount++; xpt_unlock_buses(); return (periph); } static inline struct cam_periph * cam_periph_acquire_next(struct cam_periph *pperiph) { struct cam_periph *periph = pperiph; cam_periph_assert(pperiph, MA_NOTOWNED); xpt_lock_buses(); do { periph = TAILQ_NEXT(periph, unit_links); } while (periph != NULL && (periph->flags & CAM_PERIPH_INVALID) != 0); if (periph != NULL) periph->refcount++; xpt_unlock_buses(); cam_periph_release(pperiph); return (periph); } #define CAM_PERIPH_FOREACH(periph, driver) \ for ((periph) = cam_periph_acquire_first(driver); \ (periph) != NULL; \ (periph) = cam_periph_acquire_next(periph)) #endif /* _KERNEL */ #endif /* _CAM_CAM_PERIPH_H */ Index: head/sys/cam/cam_queue.c =================================================================== --- head/sys/cam/cam_queue.c (revision 326264) +++ head/sys/cam/cam_queue.c (revision 326265) @@ -1,408 +1,410 @@ /*- * CAM request queue management functions. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_CAMQ, "CAM queue", "CAM queue buffers"); static MALLOC_DEFINE(M_CAMDEVQ, "CAM dev queue", "CAM dev queue buffers"); static MALLOC_DEFINE(M_CAMCCBQ, "CAM ccb queue", "CAM ccb queue buffers"); static __inline int queue_cmp(cam_pinfo **queue_array, int i, int j); static __inline void swap(cam_pinfo **queue_array, int i, int j); static void heap_up(cam_pinfo **queue_array, int new_index); static void heap_down(cam_pinfo **queue_array, int index, int last_index); struct camq * camq_alloc(int size) { struct camq *camq; camq = (struct camq *)malloc(sizeof(*camq), M_CAMQ, M_NOWAIT); if (camq != NULL) { if (camq_init(camq, size) != 0) { free(camq, M_CAMQ); camq = NULL; } } return (camq); } int camq_init(struct camq *camq, int size) { bzero(camq, sizeof(*camq)); camq->array_size = size; if (camq->array_size != 0) { camq->queue_array = (cam_pinfo**)malloc(size*sizeof(cam_pinfo*), M_CAMQ, M_NOWAIT); if (camq->queue_array == NULL) { printf("camq_init: - cannot malloc array!\n"); return (1); } /* * Heap algorithms like everything numbered from 1, so * offset our pointer into the heap array by one element. */ camq->queue_array--; } return (0); } /* * Free a camq structure. This should only be called if a controller * driver failes somehow during its attach routine or is unloaded and has * obtained a camq structure. The XPT should ensure that the queue * is empty before calling this routine. */ void camq_free(struct camq *queue) { if (queue != NULL) { camq_fini(queue); free(queue, M_CAMQ); } } void camq_fini(struct camq *queue) { if (queue->queue_array != NULL) { /* * Heap algorithms like everything numbered from 1, so * our pointer into the heap array is offset by one element. */ queue->queue_array++; free(queue->queue_array, M_CAMQ); } } u_int32_t camq_resize(struct camq *queue, int new_size) { cam_pinfo **new_array; KASSERT(new_size >= queue->entries, ("camq_resize: " "New queue size can't accommodate queued entries (%d < %d).", new_size, queue->entries)); new_array = (cam_pinfo **)malloc(new_size * sizeof(cam_pinfo *), M_CAMQ, M_NOWAIT); if (new_array == NULL) { /* Couldn't satisfy request */ return (CAM_RESRC_UNAVAIL); } /* * Heap algorithms like everything numbered from 1, so * remember that our pointer into the heap array is offset * by one element. */ if (queue->queue_array != NULL) { queue->queue_array++; bcopy(queue->queue_array, new_array, queue->entries * sizeof(cam_pinfo *)); free(queue->queue_array, M_CAMQ); } queue->queue_array = new_array-1; queue->array_size = new_size; return (CAM_REQ_CMP); } /* * camq_insert: Given an array of cam_pinfo* elememnts with * the Heap(1, num_elements) property and array_size - num_elements >= 1, * output Heap(1, num_elements+1) including new_entry in the array. */ void camq_insert(struct camq *queue, cam_pinfo *new_entry) { KASSERT(queue->entries < queue->array_size, ("camq_insert: Attempt to insert into a full queue (%d >= %d)", queue->entries, queue->array_size)); queue->entries++; queue->queue_array[queue->entries] = new_entry; new_entry->index = queue->entries; if (queue->entries != 0) heap_up(queue->queue_array, queue->entries); } /* * camq_remove: Given an array of cam_pinfo* elevements with the * Heap(1, num_elements) property and an index such that 1 <= index <= * num_elements, remove that entry and restore the Heap(1, num_elements-1) * property. */ cam_pinfo * camq_remove(struct camq *queue, int index) { cam_pinfo *removed_entry; if (index <= 0 || index > queue->entries) panic("%s: Attempt to remove out-of-bounds index %d " "from queue %p of size %d", __func__, index, queue, queue->entries); removed_entry = queue->queue_array[index]; if (queue->entries != index) { queue->queue_array[index] = queue->queue_array[queue->entries]; queue->queue_array[index]->index = index; heap_down(queue->queue_array, index, queue->entries - 1); } removed_entry->index = CAM_UNQUEUED_INDEX; queue->entries--; return (removed_entry); } /* * camq_change_priority: Given an array of cam_pinfo* elements with the * Heap(1, num_entries) property, an index such that 1 <= index <= num_elements, * and a new priority for the element at index, change the priority of * element index and restore the Heap(0, num_elements) property. */ void camq_change_priority(struct camq *queue, int index, u_int32_t new_priority) { if (new_priority > queue->queue_array[index]->priority) { queue->queue_array[index]->priority = new_priority; heap_down(queue->queue_array, index, queue->entries); } else { /* new_priority <= old_priority */ queue->queue_array[index]->priority = new_priority; heap_up(queue->queue_array, index); } } struct cam_devq * cam_devq_alloc(int devices, int openings) { struct cam_devq *devq; devq = (struct cam_devq *)malloc(sizeof(*devq), M_CAMDEVQ, M_NOWAIT); if (devq == NULL) { printf("cam_devq_alloc: - cannot malloc!\n"); return (NULL); } if (cam_devq_init(devq, devices, openings) != 0) { free(devq, M_CAMDEVQ); return (NULL); } return (devq); } int cam_devq_init(struct cam_devq *devq, int devices, int openings) { bzero(devq, sizeof(*devq)); mtx_init(&devq->send_mtx, "CAM queue lock", NULL, MTX_DEF); if (camq_init(&devq->send_queue, devices) != 0) return (1); devq->send_openings = openings; devq->send_active = 0; return (0); } void cam_devq_free(struct cam_devq *devq) { camq_fini(&devq->send_queue); mtx_destroy(&devq->send_mtx); free(devq, M_CAMDEVQ); } u_int32_t cam_devq_resize(struct cam_devq *camq, int devices) { u_int32_t retval; retval = camq_resize(&camq->send_queue, devices); return (retval); } struct cam_ccbq * cam_ccbq_alloc(int openings) { struct cam_ccbq *ccbq; ccbq = (struct cam_ccbq *)malloc(sizeof(*ccbq), M_CAMCCBQ, M_NOWAIT); if (ccbq == NULL) { printf("cam_ccbq_alloc: - cannot malloc!\n"); return (NULL); } if (cam_ccbq_init(ccbq, openings) != 0) { free(ccbq, M_CAMCCBQ); return (NULL); } return (ccbq); } void cam_ccbq_free(struct cam_ccbq *ccbq) { if (ccbq) { cam_ccbq_fini(ccbq); free(ccbq, M_CAMCCBQ); } } u_int32_t cam_ccbq_resize(struct cam_ccbq *ccbq, int new_size) { int delta; delta = new_size - (ccbq->dev_active + ccbq->dev_openings); ccbq->total_openings += delta; ccbq->dev_openings += delta; new_size = imax(64, 1 << fls(new_size + new_size / 2)); if (new_size > ccbq->queue.array_size) return (camq_resize(&ccbq->queue, new_size)); else return (CAM_REQ_CMP); } int cam_ccbq_init(struct cam_ccbq *ccbq, int openings) { bzero(ccbq, sizeof(*ccbq)); if (camq_init(&ccbq->queue, imax(64, 1 << fls(openings + openings / 2))) != 0) return (1); ccbq->total_openings = openings; ccbq->dev_openings = openings; return (0); } void cam_ccbq_fini(struct cam_ccbq *ccbq) { camq_fini(&ccbq->queue); } /* * Heap routines for manipulating CAM queues. */ /* * queue_cmp: Given an array of cam_pinfo* elements and indexes i * and j, return less than 0, 0, or greater than 0 if i is less than, * equal too, or greater than j respectively. */ static __inline int queue_cmp(cam_pinfo **queue_array, int i, int j) { if (queue_array[i]->priority == queue_array[j]->priority) return ( queue_array[i]->generation - queue_array[j]->generation ); else return ( queue_array[i]->priority - queue_array[j]->priority ); } /* * swap: Given an array of cam_pinfo* elements and indexes i and j, * exchange elements i and j. */ static __inline void swap(cam_pinfo **queue_array, int i, int j) { cam_pinfo *temp_qentry; temp_qentry = queue_array[j]; queue_array[j] = queue_array[i]; queue_array[i] = temp_qentry; queue_array[j]->index = j; queue_array[i]->index = i; } /* * heap_up: Given an array of cam_pinfo* elements with the * Heap(1, new_index-1) property and a new element in location * new_index, output Heap(1, new_index). */ static void heap_up(cam_pinfo **queue_array, int new_index) { int child; int parent; child = new_index; while (child != 1) { parent = child >> 1; if (queue_cmp(queue_array, parent, child) <= 0) break; swap(queue_array, parent, child); child = parent; } } /* * heap_down: Given an array of cam_pinfo* elements with the * Heap(index + 1, num_entries) property with index containing * an unsorted entry, output Heap(index, num_entries). */ static void heap_down(cam_pinfo **queue_array, int index, int num_entries) { int child; int parent; parent = index; child = parent << 1; for (; child <= num_entries; child = parent << 1) { if (child < num_entries) { /* child+1 is the right child of parent */ if (queue_cmp(queue_array, child + 1, child) < 0) child++; } /* child is now the least child of parent */ if (queue_cmp(queue_array, parent, child) <= 0) break; swap(queue_array, child, parent); parent = child; } } Index: head/sys/cam/cam_queue.h =================================================================== --- head/sys/cam/cam_queue.h (revision 326264) +++ head/sys/cam/cam_queue.h (revision 326265) @@ -1,292 +1,294 @@ /*- * CAM request queue management definitions. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_QUEUE_H #define _CAM_CAM_QUEUE_H 1 #ifdef _KERNEL #include #include #include #include /* * This structure implements a heap based priority queue. The queue * assumes that the objects stored in it begin with a cam_qentry * structure holding the priority information used to sort the objects. * This structure is opaque to clients (outside of the XPT layer) to allow * the implementation to change without affecting them. */ struct camq { cam_pinfo **queue_array; int array_size; int entries; u_int32_t generation; u_int32_t qfrozen_cnt; }; TAILQ_HEAD(ccb_hdr_tailq, ccb_hdr); LIST_HEAD(ccb_hdr_list, ccb_hdr); SLIST_HEAD(ccb_hdr_slist, ccb_hdr); struct cam_ccbq { struct camq queue; struct ccb_hdr_tailq queue_extra_head; int queue_extra_entries; int total_openings; int allocated; int dev_openings; int dev_active; }; struct cam_ed; struct cam_devq { struct mtx send_mtx; struct camq send_queue; int send_openings; int send_active; }; struct cam_devq *cam_devq_alloc(int devices, int openings); int cam_devq_init(struct cam_devq *devq, int devices, int openings); void cam_devq_free(struct cam_devq *devq); u_int32_t cam_devq_resize(struct cam_devq *camq, int openings); /* * Allocate a cam_ccb_queue structure and initialize it. */ struct cam_ccbq *cam_ccbq_alloc(int openings); u_int32_t cam_ccbq_resize(struct cam_ccbq *ccbq, int devices); int cam_ccbq_init(struct cam_ccbq *ccbq, int openings); void cam_ccbq_free(struct cam_ccbq *ccbq); void cam_ccbq_fini(struct cam_ccbq *ccbq); /* * Allocate and initialize a cam_queue structure. */ struct camq *camq_alloc(int size); /* * Resize a cam queue */ u_int32_t camq_resize(struct camq *queue, int new_size); /* * Initialize a camq structure. Return 0 on success, 1 on failure. */ int camq_init(struct camq *camq, int size); /* * Free a cam_queue structure. This should only be called if a controller * driver failes somehow during its attach routine or is unloaded and has * obtained a cam_queue structure. */ void camq_free(struct camq *queue); /* * Finialize any internal storage or state of a cam_queue. */ void camq_fini(struct camq *queue); /* * cam_queue_insert: Given a CAM queue with at least one open spot, * insert the new entry maintaining order. */ void camq_insert(struct camq *queue, cam_pinfo *new_entry); /* * camq_remove: Remove and arbitrary entry from the queue maintaining * queue order. */ cam_pinfo *camq_remove(struct camq *queue, int index); #define CAMQ_HEAD 1 /* Head of queue index */ /* Index the first element in the heap */ #define CAMQ_GET_HEAD(camq) ((camq)->queue_array[CAMQ_HEAD]) /* Get the first element priority. */ #define CAMQ_GET_PRIO(camq) (((camq)->entries > 0) ? \ ((camq)->queue_array[CAMQ_HEAD]->priority) : 0) /* * camq_change_priority: Raise or lower the priority of an entry * maintaining queue order. */ void camq_change_priority(struct camq *queue, int index, u_int32_t new_priority); static __inline int cam_ccbq_pending_ccb_count(struct cam_ccbq *ccbq); static __inline void cam_ccbq_take_opening(struct cam_ccbq *ccbq); static __inline void cam_ccbq_insert_ccb(struct cam_ccbq *ccbq, union ccb *new_ccb); static __inline void cam_ccbq_remove_ccb(struct cam_ccbq *ccbq, union ccb *ccb); static __inline union ccb * cam_ccbq_peek_ccb(struct cam_ccbq *ccbq, int index); static __inline void cam_ccbq_send_ccb(struct cam_ccbq *queue, union ccb *send_ccb); static __inline void cam_ccbq_ccb_done(struct cam_ccbq *ccbq, union ccb *done_ccb); static __inline void cam_ccbq_release_opening(struct cam_ccbq *ccbq); static __inline int cam_ccbq_pending_ccb_count(struct cam_ccbq *ccbq) { return (ccbq->queue.entries + ccbq->queue_extra_entries); } static __inline void cam_ccbq_take_opening(struct cam_ccbq *ccbq) { ccbq->allocated++; } static __inline void cam_ccbq_insert_ccb(struct cam_ccbq *ccbq, union ccb *new_ccb) { struct ccb_hdr *old_ccb; struct camq *queue = &ccbq->queue; KASSERT((new_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0 && (new_ccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0, ("%s: Cannot queue ccb %p func_code %#x", __func__, new_ccb, new_ccb->ccb_h.func_code)); /* * If queue is already full, try to resize. * If resize fail, push CCB with lowest priority out to the TAILQ. */ if (queue->entries == queue->array_size && camq_resize(&ccbq->queue, queue->array_size * 2) != CAM_REQ_CMP) { old_ccb = (struct ccb_hdr *)camq_remove(queue, queue->entries); TAILQ_INSERT_HEAD(&ccbq->queue_extra_head, old_ccb, xpt_links.tqe); old_ccb->pinfo.index = CAM_EXTRAQ_INDEX; ccbq->queue_extra_entries++; } camq_insert(queue, &new_ccb->ccb_h.pinfo); } static __inline void cam_ccbq_remove_ccb(struct cam_ccbq *ccbq, union ccb *ccb) { struct ccb_hdr *cccb, *bccb; struct camq *queue = &ccbq->queue; cam_pinfo *removed_entry __unused; /* If the CCB is on the TAILQ, remove it from there. */ if (ccb->ccb_h.pinfo.index == CAM_EXTRAQ_INDEX) { TAILQ_REMOVE(&ccbq->queue_extra_head, &ccb->ccb_h, xpt_links.tqe); ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; ccbq->queue_extra_entries--; return; } removed_entry = camq_remove(queue, ccb->ccb_h.pinfo.index); KASSERT(removed_entry == &ccb->ccb_h.pinfo, ("%s: Removed wrong entry from queue (%p != %p)", __func__, removed_entry, &ccb->ccb_h.pinfo)); /* * If there are some CCBs on TAILQ, find the best one and move it * to the emptied space in the queue. */ bccb = TAILQ_FIRST(&ccbq->queue_extra_head); if (bccb == NULL) return; TAILQ_FOREACH(cccb, &ccbq->queue_extra_head, xpt_links.tqe) { if (bccb->pinfo.priority > cccb->pinfo.priority || (bccb->pinfo.priority == cccb->pinfo.priority && GENERATIONCMP(bccb->pinfo.generation, >, cccb->pinfo.generation))) bccb = cccb; } TAILQ_REMOVE(&ccbq->queue_extra_head, bccb, xpt_links.tqe); ccbq->queue_extra_entries--; camq_insert(queue, &bccb->pinfo); } static __inline union ccb * cam_ccbq_peek_ccb(struct cam_ccbq *ccbq, int index) { return((union ccb *)ccbq->queue.queue_array[index]); } static __inline void cam_ccbq_send_ccb(struct cam_ccbq *ccbq, union ccb *send_ccb) { send_ccb->ccb_h.pinfo.index = CAM_ACTIVE_INDEX; ccbq->dev_active++; ccbq->dev_openings--; } static __inline void cam_ccbq_ccb_done(struct cam_ccbq *ccbq, union ccb *done_ccb) { ccbq->dev_active--; ccbq->dev_openings++; } static __inline void cam_ccbq_release_opening(struct cam_ccbq *ccbq) { ccbq->allocated--; } #endif /* _KERNEL */ #endif /* _CAM_CAM_QUEUE_H */ Index: head/sys/cam/cam_sim.c =================================================================== --- head/sys/cam/cam_sim.c (revision 326264) +++ head/sys/cam/cam_sim.c (revision 326265) @@ -1,173 +1,175 @@ /*- * Common functions for SCSI Interface Modules (SIMs). * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #define CAM_PATH_ANY (u_int32_t)-1 static MALLOC_DEFINE(M_CAMSIM, "CAM SIM", "CAM SIM buffers"); static struct mtx cam_sim_free_mtx; MTX_SYSINIT(cam_sim_free_init, &cam_sim_free_mtx, "CAM SIM free lock", MTX_DEF); struct cam_devq * cam_simq_alloc(u_int32_t max_sim_transactions) { return (cam_devq_alloc(/*size*/0, max_sim_transactions)); } void cam_simq_free(struct cam_devq *devq) { cam_devq_free(devq); } struct cam_sim * cam_sim_alloc(sim_action_func sim_action, sim_poll_func sim_poll, const char *sim_name, void *softc, u_int32_t unit, struct mtx *mtx, int max_dev_transactions, int max_tagged_dev_transactions, struct cam_devq *queue) { struct cam_sim *sim; sim = (struct cam_sim *)malloc(sizeof(struct cam_sim), M_CAMSIM, M_ZERO | M_NOWAIT); if (sim == NULL) return (NULL); sim->sim_action = sim_action; sim->sim_poll = sim_poll; sim->sim_name = sim_name; sim->softc = softc; sim->path_id = CAM_PATH_ANY; sim->unit_number = unit; sim->bus_id = 0; /* set in xpt_bus_register */ sim->max_tagged_dev_openings = max_tagged_dev_transactions; sim->max_dev_openings = max_dev_transactions; sim->flags = 0; sim->refcount = 1; sim->devq = queue; sim->mtx = mtx; if (mtx == &Giant) { sim->flags |= 0; callout_init(&sim->callout, 0); } else { sim->flags |= CAM_SIM_MPSAFE; callout_init(&sim->callout, 1); } return (sim); } void cam_sim_free(struct cam_sim *sim, int free_devq) { struct mtx *mtx = sim->mtx; int error; if (mtx) { mtx_assert(mtx, MA_OWNED); } else { mtx = &cam_sim_free_mtx; mtx_lock(mtx); } sim->refcount--; if (sim->refcount > 0) { error = msleep(sim, mtx, PRIBIO, "simfree", 0); KASSERT(error == 0, ("invalid error value for msleep(9)")); } KASSERT(sim->refcount == 0, ("sim->refcount == 0")); if (sim->mtx == NULL) mtx_unlock(mtx); if (free_devq) cam_simq_free(sim->devq); free(sim, M_CAMSIM); } void cam_sim_release(struct cam_sim *sim) { struct mtx *mtx = sim->mtx; if (mtx) { if (!mtx_owned(mtx)) mtx_lock(mtx); else mtx = NULL; } else { mtx = &cam_sim_free_mtx; mtx_lock(mtx); } KASSERT(sim->refcount >= 1, ("sim->refcount >= 1")); sim->refcount--; if (sim->refcount == 0) wakeup(sim); if (mtx) mtx_unlock(mtx); } void cam_sim_hold(struct cam_sim *sim) { struct mtx *mtx = sim->mtx; if (mtx) { if (!mtx_owned(mtx)) mtx_lock(mtx); else mtx = NULL; } else { mtx = &cam_sim_free_mtx; mtx_lock(mtx); } KASSERT(sim->refcount >= 1, ("sim->refcount >= 1")); sim->refcount++; if (mtx) mtx_unlock(mtx); } void cam_sim_set_path(struct cam_sim *sim, u_int32_t path_id) { sim->path_id = path_id; } Index: head/sys/cam/cam_sim.h =================================================================== --- head/sys/cam/cam_sim.h (revision 326264) +++ head/sys/cam/cam_sim.h (revision 326265) @@ -1,146 +1,148 @@ /*- * Data structures and definitions for SCSI Interface Modules (SIMs). * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_SIM_H #define _CAM_CAM_SIM_H 1 #ifdef _KERNEL /* * The sim driver creates a sim for each controller. The sim device * queue is separately created in order to allow resource sharing between * sims. For instance, a driver may create one sim for each channel of * a multi-channel controller and use the same queue for each channel. * In this way, the queue resources are shared across all the channels * of the multi-channel controller. */ struct cam_sim; struct cam_devq; typedef void (*sim_action_func)(struct cam_sim *sim, union ccb *ccb); typedef void (*sim_poll_func)(struct cam_sim *sim); struct cam_devq * cam_simq_alloc(u_int32_t max_sim_transactions); void cam_simq_free(struct cam_devq *devq); struct cam_sim * cam_sim_alloc(sim_action_func sim_action, sim_poll_func sim_poll, const char *sim_name, void *softc, u_int32_t unit, struct mtx *mtx, int max_dev_transactions, int max_tagged_dev_transactions, struct cam_devq *queue); void cam_sim_free(struct cam_sim *sim, int free_devq); void cam_sim_hold(struct cam_sim *sim); void cam_sim_release(struct cam_sim *sim); /* Optional sim attributes may be set with these. */ void cam_sim_set_path(struct cam_sim *sim, u_int32_t path_id); /* Convenience routines for accessing sim attributes. */ static __inline u_int32_t cam_sim_path(struct cam_sim *sim); static __inline const char * cam_sim_name(struct cam_sim *sim); static __inline void * cam_sim_softc(struct cam_sim *sim); static __inline u_int32_t cam_sim_unit(struct cam_sim *sim); static __inline u_int32_t cam_sim_bus(struct cam_sim *sim); /* Generically useful offsets into the sim private area */ #define spriv_ptr0 sim_priv.entries[0].ptr #define spriv_ptr1 sim_priv.entries[1].ptr #define spriv_field0 sim_priv.entries[0].field #define spriv_field1 sim_priv.entries[1].field /* * The sim driver should not access anything directly from this * structure. */ struct cam_sim { sim_action_func sim_action; sim_poll_func sim_poll; const char *sim_name; void *softc; struct mtx *mtx; TAILQ_HEAD(, ccb_hdr) sim_doneq; TAILQ_ENTRY(cam_sim) links; u_int32_t path_id;/* The Boot device may set this to 0? */ u_int32_t unit_number; u_int32_t bus_id; int max_tagged_dev_openings; int max_dev_openings; u_int32_t flags; #define CAM_SIM_REL_TIMEOUT_PENDING 0x01 #define CAM_SIM_MPSAFE 0x02 struct callout callout; struct cam_devq *devq; /* Device Queue to use for this SIM */ int refcount; /* References to the SIM. */ }; #define CAM_SIM_LOCK(sim) mtx_lock((sim)->mtx) #define CAM_SIM_UNLOCK(sim) mtx_unlock((sim)->mtx) static __inline u_int32_t cam_sim_path(struct cam_sim *sim) { return (sim->path_id); } static __inline const char * cam_sim_name(struct cam_sim *sim) { return (sim->sim_name); } static __inline void * cam_sim_softc(struct cam_sim *sim) { return (sim->softc); } static __inline u_int32_t cam_sim_unit(struct cam_sim *sim) { return (sim->unit_number); } static __inline u_int32_t cam_sim_bus(struct cam_sim *sim) { return (sim->bus_id); } #endif /* _KERNEL */ #endif /* _CAM_CAM_SIM_H */ Index: head/sys/cam/cam_xpt.c =================================================================== --- head/sys/cam/cam_xpt.c (revision 326264) +++ head/sys/cam/cam_xpt.c (revision 326265) @@ -1,5583 +1,5585 @@ /*- * Implementation of the Common Access Method Transport (XPT) layer. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_printf.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* geometry translation */ #include /* for xpt_print below */ #include "opt_cam.h" /* Wild guess based on not wanting to grow the stack too much */ #define XPT_PRINT_MAXLEN 512 #ifdef PRINTF_BUFR_SIZE #define XPT_PRINT_LEN PRINTF_BUFR_SIZE #else #define XPT_PRINT_LEN 128 #endif _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large"); /* * This is the maximum number of high powered commands (e.g. start unit) * that can be outstanding at a particular time. */ #ifndef CAM_MAX_HIGHPOWER #define CAM_MAX_HIGHPOWER 4 #endif /* Datastructures internal to the xpt layer */ MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); /* Object for defering XPT actions to a taskqueue */ struct xpt_task { struct task task; void *data1; uintptr_t data2; }; struct xpt_softc { uint32_t xpt_generation; /* number of high powered commands that can go through right now */ struct mtx xpt_highpower_lock; STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; int num_highpower; /* queue for handling async rescan requests. */ TAILQ_HEAD(, ccb_hdr) ccb_scanq; int buses_to_config; int buses_config_done; int announce_nosbuf; /* * Registered buses * * N.B., "busses" is an archaic spelling of "buses". In new code * "buses" is preferred. */ TAILQ_HEAD(,cam_eb) xpt_busses; u_int bus_generation; struct intr_config_hook *xpt_config_hook; int boot_delay; struct callout boot_callout; struct mtx xpt_topo_lock; struct mtx xpt_lock; struct taskqueue *xpt_taskq; }; typedef enum { DM_RET_COPY = 0x01, DM_RET_FLAG_MASK = 0x0f, DM_RET_NONE = 0x00, DM_RET_STOP = 0x10, DM_RET_DESCEND = 0x20, DM_RET_ERROR = 0x30, DM_RET_ACTION_MASK = 0xf0 } dev_match_ret; typedef enum { XPT_DEPTH_BUS, XPT_DEPTH_TARGET, XPT_DEPTH_DEVICE, XPT_DEPTH_PERIPH } xpt_traverse_depth; struct xpt_traverse_config { xpt_traverse_depth depth; void *tr_func; void *tr_arg; }; typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); /* Transport layer configuration information */ static struct xpt_softc xsoftc; MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, &xsoftc.boot_delay, 0, "Bus registration wait time"); SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); SYSCTL_INT(_kern_cam, OID_AUTO, announce_nosbuf, CTLFLAG_RWTUN, &xsoftc.announce_nosbuf, 0, "Don't use sbuf for announcements"); struct cam_doneq { struct mtx_padalign cam_doneq_mtx; STAILQ_HEAD(, ccb_hdr) cam_doneq; int cam_doneq_sleep; }; static struct cam_doneq cam_doneqs[MAXCPU]; static int cam_num_doneqs; static struct proc *cam_proc; SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, &cam_num_doneqs, 0, "Number of completion queues/threads"); struct cam_periph *xpt_periph; static periph_init_t xpt_periph_init; static struct periph_driver xpt_driver = { xpt_periph_init, "xpt", TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(xpt, xpt_driver); static d_open_t xptopen; static d_close_t xptclose; static d_ioctl_t xptioctl; static d_ioctl_t xptdoioctl; static struct cdevsw xpt_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = xptopen, .d_close = xptclose, .d_ioctl = xptioctl, .d_name = "xpt", }; /* Storage for debugging datastructures */ struct cam_path *cam_dpath; u_int32_t cam_dflags = CAM_DEBUG_FLAGS; SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, &cam_dflags, 0, "Enabled debug flags"); u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, &cam_debug_delay, 0, "Delay in us after each debug message"); /* Our boot-time initialization hook */ static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t cam_moduledata = { "cam", cam_module_event_handler, NULL }; static int xpt_init(void *); DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); MODULE_VERSION(cam, 1); static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg); static path_id_t xptnextfreepathid(void); static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); static union ccb *xpt_get_ccb(struct cam_periph *periph); static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); static void xpt_run_allocq(struct cam_periph *periph, int sleep); static void xpt_run_allocq_task(void *context, int pending); static void xpt_run_devq(struct cam_devq *devq); static timeout_t xpt_release_devq_timeout; static void xpt_release_simq_timeout(void *arg) __unused; static void xpt_acquire_bus(struct cam_eb *bus); static void xpt_release_bus(struct cam_eb *bus); static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); static int xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue); static struct cam_et* xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); static void xpt_acquire_target(struct cam_et *target); static void xpt_release_target(struct cam_et *target); static struct cam_eb* xpt_find_bus(path_id_t path_id); static struct cam_et* xpt_find_target(struct cam_eb *bus, target_id_t target_id); static struct cam_ed* xpt_find_device(struct cam_et *target, lun_id_t lun_id); static void xpt_config(void *arg); static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, u_int32_t new_priority); static xpt_devicefunc_t xptpassannouncefunc; static void xptaction(struct cam_sim *sim, union ccb *work_ccb); static void xptpoll(struct cam_sim *sim); static void camisr_runqueue(void); static void xpt_done_process(struct ccb_hdr *ccb_h); static void xpt_done_td(void *); static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus); static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device); static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph); static xpt_busfunc_t xptedtbusfunc; static xpt_targetfunc_t xptedttargetfunc; static xpt_devicefunc_t xptedtdevicefunc; static xpt_periphfunc_t xptedtperiphfunc; static xpt_pdrvfunc_t xptplistpdrvfunc; static xpt_periphfunc_t xptplistperiphfunc; static int xptedtmatch(struct ccb_dev_match *cdm); static int xptperiphlistmatch(struct ccb_dev_match *cdm); static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg); static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg); static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg); static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg); static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static xpt_busfunc_t xptdefbusfunc; static xpt_targetfunc_t xptdeftargetfunc; static xpt_devicefunc_t xptdefdevicefunc; static xpt_periphfunc_t xptdefperiphfunc; static void xpt_finishconfig_task(void *context, int pending); static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static xpt_devicefunc_t xptsetasyncfunc; static xpt_busfunc_t xptsetasyncbusfunc; static cam_status xptregister(struct cam_periph *periph, void *arg); static __inline int device_is_queued(struct cam_ed *device); static __inline int xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) { int retval; mtx_assert(&devq->send_mtx, MA_OWNED); if ((dev->ccbq.queue.entries > 0) && (dev->ccbq.dev_openings > 0) && (dev->ccbq.queue.qfrozen_cnt == 0)) { /* * The priority of a device waiting for controller * resources is that of the highest priority CCB * enqueued. */ retval = xpt_schedule_dev(&devq->send_queue, &dev->devq_entry, CAMQ_GET_PRIO(&dev->ccbq.queue)); } else { retval = 0; } return (retval); } static __inline int device_is_queued(struct cam_ed *device) { return (device->devq_entry.index != CAM_UNQUEUED_INDEX); } static void xpt_periph_init() { make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); } static int xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) return(EPERM); /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { printf("%s: can't do nonblocking access\n", devtoname(dev)); return(ENODEV); } return(0); } static int xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return(0); } /* * Don't automatically grab the xpt softc lock here even though this is going * through the xpt device. The xpt device is really just a back door for * accessing other devices and SIMs, so the right thing to do is to grab * the appropriate SIM lock once the bus/SIM is located. */ static int xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); } return (error); } static int xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; error = 0; switch(cmd) { /* * For the transport layer CAMIOCOMMAND ioctl, we really only want * to accept CCB types that don't quite make sense to send through a * passthrough driver. XPT_PATH_INQ is an exception to this, as stated * in the CAM spec. */ case CAMIOCOMMAND: { union ccb *ccb; union ccb *inccb; struct cam_eb *bus; inccb = (union ccb *)addr; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (inccb->ccb_h.func_code == XPT_SCSI_IO) inccb->csio.bio = NULL; #endif if (inccb->ccb_h.flags & CAM_UNLOCKED) return (EINVAL); bus = xpt_find_bus(inccb->ccb_h.path_id); if (bus == NULL) return (EINVAL); switch (inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { xpt_release_bus(bus); return (EINVAL); } break; case XPT_SCAN_TGT: if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { xpt_release_bus(bus); return (EINVAL); } break; default: break; } switch(inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: case XPT_PATH_INQ: case XPT_ENG_INQ: case XPT_SCAN_LUN: case XPT_SCAN_TGT: ccb = xpt_alloc_ccb(); /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb->ccb_h.path, NULL, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; xpt_free_ccb(ccb); break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(ccb, inccb); xpt_path_lock(ccb->ccb_h.path); cam_periph_runccb(ccb, NULL, 0, 0, NULL); xpt_path_unlock(ccb->ccb_h.path); bcopy(ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); break; case XPT_DEBUG: { union ccb ccb; /* * This is an immediate CCB, so it's okay to * allocate it on the stack. */ /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb.ccb_h.path, NULL, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(&ccb, inccb); xpt_action(&ccb); bcopy(&ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb.ccb_h.path); break; } case XPT_DEV_MATCH: { struct cam_periph_map_info mapinfo; struct cam_path *old_path; /* * We can't deal with physical addresses for this * type of transaction. */ if ((inccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) { error = EINVAL; break; } /* * Save this in case the caller had it set to * something in particular. */ old_path = inccb->ccb_h.path; /* * We really don't need a path for the matching * code. The path is needed because of the * debugging statements in xpt_action(). They * assume that the CCB has a valid path. */ inccb->ccb_h.path = xpt_periph->path; bzero(&mapinfo, sizeof(mapinfo)); /* * Map the pattern and match buffers into kernel * virtual address space. */ error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS); if (error) { inccb->ccb_h.path = old_path; break; } /* * This is an immediate CCB, we can send it on directly. */ xpt_action(inccb); /* * Map the buffers back into user space. */ cam_periph_unmapmem(inccb, &mapinfo); inccb->ccb_h.path = old_path; error = 0; break; } default: error = ENOTSUP; break; } xpt_release_bus(bus); break; } /* * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, * with the periphal driver name and unit name filled in. The other * fields don't really matter as input. The passthrough driver name * ("pass"), and unit number are passed back in the ccb. The current * device generation number, and the index into the device peripheral * driver list, and the status are also passed back. Note that * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is * (or rather should be) impossible for the device peripheral driver * list to change since we look at the whole thing in one pass, and * we do it with lock protection. * */ case CAMGETPASSTHRU: { union ccb *ccb; struct cam_periph *periph; struct periph_driver **p_drv; char *name; u_int unit; int base_periph_found; ccb = (union ccb *)addr; unit = ccb->cgdl.unit_number; name = ccb->cgdl.periph_name; base_periph_found = 0; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.bio = NULL; #endif /* * Sanity check -- make sure we don't get a null peripheral * driver name. */ if (*ccb->cgdl.periph_name == '\0') { error = EINVAL; break; } /* Keep the list from changing while we traverse it */ xpt_lock_buses(); /* first find our driver in the list of drivers */ for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) if (strcmp((*p_drv)->driver_name, name) == 0) break; if (*p_drv == NULL) { xpt_unlock_buses(); ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; break; } /* * Run through every peripheral instance of this driver * and check to see whether it matches the unit passed * in by the user. If it does, get out of the loops and * find the passthrough driver associated with that * peripheral driver. */ for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; periph = TAILQ_NEXT(periph, unit_links)) { if (periph->unit_number == unit) break; } /* * If we found the peripheral driver that the user passed * in, go through all of the peripheral drivers for that * particular device and look for a passthrough driver. */ if (periph != NULL) { struct cam_ed *device; int i; base_periph_found = 1; device = periph->path->device; for (i = 0, periph = SLIST_FIRST(&device->periphs); periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++) { /* * Check to see whether we have a * passthrough device or not. */ if (strcmp(periph->periph_name, "pass") == 0) { /* * Fill in the getdevlist fields. */ strlcpy(ccb->cgdl.periph_name, periph->periph_name, sizeof(ccb->cgdl.periph_name)); ccb->cgdl.unit_number = periph->unit_number; if (SLIST_NEXT(periph, periph_links)) ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; else ccb->cgdl.status = CAM_GDEVLIST_LAST_DEVICE; ccb->cgdl.generation = device->generation; ccb->cgdl.index = i; /* * Fill in some CCB header fields * that the user may want. */ ccb->ccb_h.path_id = periph->path->bus->path_id; ccb->ccb_h.target_id = periph->path->target->target_id; ccb->ccb_h.target_lun = periph->path->device->lun_id; ccb->ccb_h.status = CAM_REQ_CMP; break; } } } /* * If the periph is null here, one of two things has * happened. The first possibility is that we couldn't * find the unit number of the particular peripheral driver * that the user is asking about. e.g. the user asks for * the passthrough driver for "da11". We find the list of * "da" peripherals all right, but there is no unit 11. * The other possibility is that we went through the list * of peripheral drivers attached to the device structure, * but didn't find one with the name "pass". Either way, * we return ENOENT, since we couldn't find something. */ if (periph == NULL) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; /* * It is unfortunate that this is even necessary, * but there are many, many clueless users out there. * If this is true, the user is looking for the * passthrough driver, but doesn't have one in his * kernel. */ if (base_periph_found == 1) { printf("xptioctl: pass driver is not in the " "kernel\n"); printf("xptioctl: put \"device pass\" in " "your kernel config file\n"); } } xpt_unlock_buses(); break; } default: error = ENOTTY; break; } return(error); } static int cam_module_event_handler(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: if ((error = xpt_init(NULL)) != 0) return (error); break; case MOD_UNLOAD: return EBUSY; default: return EOPNOTSUPP; } return 0; } static struct xpt_proto * xpt_proto_find(cam_proto proto) { struct xpt_proto **pp; SET_FOREACH(pp, cam_xpt_proto_set) { if ((*pp)->proto == proto) return *pp; } return NULL; } static void xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) { if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } else { done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); } xpt_release_boot(); } /* thread to handle bus rescans */ static void xpt_scanner_thread(void *dummy) { union ccb *ccb; struct cam_path path; xpt_lock_buses(); for (;;) { if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, "-", 0); if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xpt_unlock_buses(); /* * Since lock can be dropped inside and path freed * by completion callback even before return here, * take our own path copy for reference. */ xpt_copy_path(&path, ccb->ccb_h.path); xpt_path_lock(&path); xpt_action(ccb); xpt_path_unlock(&path); xpt_release_path(&path); xpt_lock_buses(); } } } void xpt_rescan(union ccb *ccb) { struct ccb_hdr *hdr; /* Prepare request */ if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_TGT; else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_LUN; else { xpt_print(ccb->ccb_h.path, "illegal scan path\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, xpt_action_name(ccb->ccb_h.func_code))); ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; ccb->ccb_h.cbfcnp = xpt_rescan_done; xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); /* Don't make duplicate entries for the same paths. */ xpt_lock_buses(); if (ccb->ccb_h.ppriv_ptr1 == NULL) { TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); xpt_print(ccb->ccb_h.path, "rescan already queued\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } } } TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xsoftc.buses_to_config++; wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); } /* Functions accessed by the peripheral drivers */ static int xpt_init(void *dummy) { struct cam_sim *xpt_sim; struct cam_path *path; struct cam_devq *devq; cam_status status; int error, i; TAILQ_INIT(&xsoftc.xpt_busses); TAILQ_INIT(&xsoftc.ccb_scanq); STAILQ_INIT(&xsoftc.highpowerq); xsoftc.num_highpower = CAM_MAX_HIGHPOWER; mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); #ifdef CAM_BOOT_DELAY /* * Override this value at compile time to assist our users * who don't use loader to boot a kernel. */ xsoftc.boot_delay = CAM_BOOT_DELAY; #endif /* * The xpt layer is, itself, the equivalent of a SIM. * Allow 16 ccbs in the ccb pool for it. This should * give decent parallelism when we probe buses and * perform other XPT functions. */ devq = cam_simq_alloc(16); xpt_sim = cam_sim_alloc(xptaction, xptpoll, "xpt", /*softc*/NULL, /*unit*/0, /*mtx*/&xsoftc.xpt_lock, /*max_dev_transactions*/0, /*max_tagged_dev_transactions*/0, devq); if (xpt_sim == NULL) return (ENOMEM); mtx_lock(&xsoftc.xpt_lock); if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { mtx_unlock(&xsoftc.xpt_lock); printf("xpt_init: xpt_bus_register failed with status %#x," " failing attach\n", status); return (EINVAL); } mtx_unlock(&xsoftc.xpt_lock); /* * Looking at the XPT from the SIM layer, the XPT is * the equivalent of a peripheral driver. Allocate * a peripheral driver entry for us. */ if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { printf("xpt_init: xpt_create_path failed with status %#x," " failing attach\n", status); return (EINVAL); } xpt_path_lock(path); cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, path, NULL, 0, xpt_sim); xpt_path_unlock(path); xpt_free_path(path); if (cam_num_doneqs < 1) cam_num_doneqs = 1 + mp_ncpus / 6; else if (cam_num_doneqs > MAXCPU) cam_num_doneqs = MAXCPU; for (i = 0; i < cam_num_doneqs; i++) { mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, MTX_DEF); STAILQ_INIT(&cam_doneqs[i].cam_doneq); error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); if (error != 0) { cam_num_doneqs = i; break; } } if (cam_num_doneqs < 1) { printf("xpt_init: Cannot init completion queues " "- failing attach\n"); return (ENOMEM); } /* * Register a callback for when interrupts are enabled. */ xsoftc.xpt_config_hook = (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), M_CAMXPT, M_NOWAIT | M_ZERO); if (xsoftc.xpt_config_hook == NULL) { printf("xpt_init: Cannot malloc config hook " "- failing attach\n"); return (ENOMEM); } xsoftc.xpt_config_hook->ich_func = xpt_config; if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { free (xsoftc.xpt_config_hook, M_CAMXPT); printf("xpt_init: config_intrhook_establish failed " "- failing attach\n"); } return (0); } static cam_status xptregister(struct cam_periph *periph, void *arg) { struct cam_sim *xpt_sim; if (periph == NULL) { printf("xptregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } xpt_sim = (struct cam_sim *)arg; xpt_sim->softc = periph; xpt_periph = periph; periph->softc = NULL; return(CAM_REQ_CMP); } int32_t xpt_add_periph(struct cam_periph *periph) { struct cam_ed *device; int32_t status; TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); device = periph->path->device; status = CAM_REQ_CMP; if (device != NULL) { mtx_lock(&device->target->bus->eb_mtx); device->generation++; SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); mtx_unlock(&device->target->bus->eb_mtx); atomic_add_32(&xsoftc.xpt_generation, 1); } return (status); } void xpt_remove_periph(struct cam_periph *periph) { struct cam_ed *device; device = periph->path->device; if (device != NULL) { mtx_lock(&device->target->bus->eb_mtx); device->generation++; SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); mtx_unlock(&device->target->bus->eb_mtx); atomic_add_32(&xsoftc.xpt_generation, 1); } } void xpt_announce_periph(struct cam_periph *periph, char *announce_string) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); periph->flags |= CAM_PERIPH_ANNOUNCED; printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); printf("%s%d: ", periph->periph_name, periph->unit_number); proto = xpt_proto_find(path->device->protocol); if (proto) proto->ops->announce(path->device); else printf("%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) { /* Don't wrap the screen - print only the first 60 chars */ printf("%s%d: Serial Number %.60s\n", periph->periph_name, periph->unit_number, path->device->serial_num); } /* Announce transport details. */ path->bus->xport->ops->announce(periph); /* Announce command queueing. */ if (path->device->inq_flags & SID_CmdQue || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { printf("%s%d: Command Queueing enabled\n", periph->periph_name, periph->unit_number); } /* Announce caller's details if they've passed in. */ if (announce_string != NULL) printf("%s%d: %s\n", periph->periph_name, periph->unit_number, announce_string); } void xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb, char *announce_string) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); periph->flags |= CAM_PERIPH_ANNOUNCED; /* Fall back to the non-sbuf method if necessary */ if (xsoftc.announce_nosbuf != 0) { xpt_announce_periph(periph, announce_string); return; } proto = xpt_proto_find(path->device->protocol); if (((proto != NULL) && (proto->ops->announce_sbuf == NULL)) || (path->bus->xport->ops->announce_sbuf == NULL)) { xpt_announce_periph(periph, announce_string); return; } sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); if (proto) proto->ops->announce_sbuf(path->device, sb); else sbuf_printf(sb, "%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) { /* Don't wrap the screen - print only the first 60 chars */ sbuf_printf(sb, "%s%d: Serial Number %.60s\n", periph->periph_name, periph->unit_number, path->device->serial_num); } /* Announce transport details. */ path->bus->xport->ops->announce_sbuf(periph, sb); /* Announce command queueing. */ if (path->device->inq_flags & SID_CmdQue || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { sbuf_printf(sb, "%s%d: Command Queueing enabled\n", periph->periph_name, periph->unit_number); } /* Announce caller's details if they've passed in. */ if (announce_string != NULL) sbuf_printf(sb, "%s%d: %s\n", periph->periph_name, periph->unit_number, announce_string); } void xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) { if (quirks != 0) { printf("%s%d: quirks=0x%b\n", periph->periph_name, periph->unit_number, quirks, bit_string); } } void xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb, int quirks, char *bit_string) { if (xsoftc.announce_nosbuf != 0) { xpt_announce_quirks(periph, quirks, bit_string); return; } if (quirks != 0) { sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name, periph->unit_number, quirks, bit_string); } } void xpt_denounce_periph(struct cam_periph *periph) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); printf("%s%d: ", periph->periph_name, periph->unit_number); proto = xpt_proto_find(path->device->protocol); if (proto) proto->ops->denounce(path->device); else printf("%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) printf(" s/n %.60s", path->device->serial_num); printf(" detached\n"); } void xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) { struct cam_path *path = periph->path; struct xpt_proto *proto; cam_periph_assert(periph, MA_OWNED); /* Fall back to the non-sbuf method if necessary */ if (xsoftc.announce_nosbuf != 0) { xpt_denounce_periph(periph); return; } proto = xpt_proto_find(path->device->protocol); if ((proto != NULL) && (proto->ops->denounce_sbuf == NULL)) { xpt_denounce_periph(periph); return; } sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); if (proto) proto->ops->denounce_sbuf(path->device, sb); else sbuf_printf(sb, "%s%d: Unknown protocol device %d\n", periph->periph_name, periph->unit_number, path->device->protocol); if (path->device->serial_num_len > 0) sbuf_printf(sb, " s/n %.60s", path->device->serial_num); sbuf_printf(sb, " detached\n"); } int xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) { int ret = -1, l, o; struct ccb_dev_advinfo cdai; struct scsi_vpd_id_descriptor *idd; xpt_path_assert(path, MA_OWNED); memset(&cdai, 0, sizeof(cdai)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.flags = CDAI_FLAG_NONE; cdai.bufsiz = len; if (!strcmp(attr, "GEOM::ident")) cdai.buftype = CDAI_TYPE_SERIAL_NUM; else if (!strcmp(attr, "GEOM::physpath")) cdai.buftype = CDAI_TYPE_PHYS_PATH; else if (strcmp(attr, "GEOM::lunid") == 0 || strcmp(attr, "GEOM::lunname") == 0) { cdai.buftype = CDAI_TYPE_SCSI_DEVID; cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; } else goto out; cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO); if (cdai.buf == NULL) { ret = ENOMEM; goto out; } xpt_action((union ccb *)&cdai); /* can only be synchronous */ if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.provsiz == 0) goto out; if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) { if (strcmp(attr, "GEOM::lunid") == 0) { idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_lun_naa); if (idd == NULL) idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_lun_eui64); if (idd == NULL) idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_lun_uuid); if (idd == NULL) idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_lun_md5); } else idd = NULL; if (idd == NULL) idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_lun_t10); if (idd == NULL) idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_lun_name); if (idd == NULL) goto out; ret = 0; if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) { if (idd->length < len) { for (l = 0; l < idd->length; l++) buf[l] = idd->identifier[l] ? idd->identifier[l] : ' '; buf[l] = 0; } else ret = EFAULT; } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) { l = strnlen(idd->identifier, idd->length); if (l < len) { bcopy(idd->identifier, buf, l); buf[l] = 0; } else ret = EFAULT; } else if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) { if ((idd->length - 2) * 2 + 4 < len) { for (l = 2, o = 0; l < idd->length; l++) { if (l == 6 || l == 8 || l == 10 || l == 12) o += sprintf(buf + o, "-"); o += sprintf(buf + o, "%02x", idd->identifier[l]); } } else ret = EFAULT; } else { if (idd->length * 2 < len) { for (l = 0; l < idd->length; l++) sprintf(buf + l * 2, "%02x", idd->identifier[l]); } else ret = EFAULT; } } else { ret = 0; if (strlcpy(buf, cdai.buf, len) >= len) ret = EFAULT; } out: if (cdai.buf != NULL) free(cdai.buf, M_CAMXPT); return ret; } static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus) { dev_match_ret retval; u_int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (bus == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this bus matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct bus_match_pattern *cur_pattern; /* * If the pattern in question isn't for a bus node, we * aren't interested. However, we do indicate to the * calling routine that we should continue descending the * tree, since the user wants to match against lower-level * EDT elements. */ if (patterns[i].type != DEV_MATCH_BUS) { if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.bus_pattern; /* * If they want to match any bus node, we give them any * device node. */ if (cur_pattern->flags == BUS_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * If we've already decided on an action, go ahead * and return. */ if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == BUS_MATCH_NONE) continue; if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) && (cur_pattern->path_id != bus->path_id)) continue; if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) && (cur_pattern->bus_id != bus->sim->bus_id)) continue; if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) && (cur_pattern->unit_number != bus->sim->unit_number)) continue; if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this bus. So tell the caller to copy the * data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a non-bus matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a non-bus * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen anything other than bus matching patterns. So * tell the caller to stop descending the tree -- the user doesn't * want to match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device) { dev_match_ret retval; u_int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (device == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this device matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct device_match_pattern *cur_pattern; struct scsi_vpd_device_id *device_id_page; /* * If the pattern in question isn't for a device node, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_DEVICE) { if ((patterns[i].type == DEV_MATCH_PERIPH) && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.device_pattern; /* Error out if mutually exclusive options are specified. */ if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) return(DM_RET_ERROR); /* * If they want to match any device node, we give them any * device node. */ if (cur_pattern->flags == DEV_MATCH_ANY) goto copy_dev_node; /* * Not sure why someone would do this... */ if (cur_pattern->flags == DEV_MATCH_NONE) continue; if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) && (cur_pattern->path_id != device->target->bus->path_id)) continue; if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) && (cur_pattern->target_id != device->target->target_id)) continue; if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) && (cur_pattern->target_lun != device->lun_id)) continue; if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) && (cam_quirkmatch((caddr_t)&device->inq_data, (caddr_t)&cur_pattern->data.inq_pat, 1, sizeof(cur_pattern->data.inq_pat), scsi_static_inquiry_match) == NULL)) continue; device_id_page = (struct scsi_vpd_device_id *)device->device_id; if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN || scsi_devid_match((uint8_t *)device_id_page->desc_list, device->device_id_len - SVPD_DEVICE_ID_HDR_LEN, cur_pattern->data.devid_pat.id, cur_pattern->data.devid_pat.id_len) != 0)) continue; copy_dev_node: /* * If we get to this point, the user definitely wants * information on this device. So tell the caller to copy * the data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a peripheral matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a peripheral * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen any peripheral matching patterns. So tell the * caller to stop descending the tree -- the user doesn't want to * match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } /* * Match a single peripheral against any number of match patterns. */ static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph) { dev_match_ret retval; u_int i; /* * If we aren't given something to match against, that's an error. */ if (periph == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this peripheral matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_STOP | DM_RET_COPY); /* * There aren't any nodes below a peripheral node, so there's no * reason to descend the tree any further. */ retval = DM_RET_STOP; for (i = 0; i < num_patterns; i++) { struct periph_match_pattern *cur_pattern; /* * If the pattern in question isn't for a peripheral, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_PERIPH) continue; cur_pattern = &patterns[i].pattern.periph_pattern; /* * If they want to match on anything, then we will do so. */ if (cur_pattern->flags == PERIPH_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * We've already set the return action to stop, * since there are no nodes below peripherals in * the tree. */ return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == PERIPH_MATCH_NONE) continue; if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) && (cur_pattern->path_id != periph->path->bus->path_id)) continue; /* * For the target and lun id's, we have to make sure the * target and lun pointers aren't NULL. The xpt peripheral * has a wildcard target and device. */ if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) && ((periph->path->target == NULL) ||(cur_pattern->target_id != periph->path->target->target_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) && ((periph->path->device == NULL) || (cur_pattern->target_lun != periph->path->device->lun_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) && (cur_pattern->unit_number != periph->unit_number)) continue; if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) && (strncmp(cur_pattern->periph_name, periph->periph_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this peripheral. So tell the caller to * copy the data out. */ retval |= DM_RET_COPY; /* * The return action has already been set to stop, since * peripherals don't have any nodes below them in the EDT. */ return(retval); } /* * If we get to this point, the peripheral that was passed in * doesn't match any of the patterns. */ return(retval); } static int xptedtbusfunc(struct cam_eb *bus, void *arg) { struct ccb_dev_match *cdm; struct cam_et *target; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) retval = DM_RET_DESCEND; else retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); /* * If we got an error, bail out of the search. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this bus out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; cdm->pos.cookie.bus = bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_BUS; cdm->matches[j].result.bus_result.path_id = bus->path_id; cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; cdm->matches[j].result.bus_result.unit_number = bus->sim->unit_number; strlcpy(cdm->matches[j].result.bus_result.dev_name, bus->sim->sim_name, sizeof(cdm->matches[j].result.bus_result.dev_name)); } /* * If the user is only interested in buses, there's no * reason to descend to the next level in the tree. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a target generation recorded, check it to * make sure the target list hasn't changed. */ mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) { if ((cdm->pos.generations[CAM_TARGET_GENERATION] != bus->generation)) { mtx_unlock(&bus->eb_mtx); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return (0); } target = (struct cam_et *)cdm->pos.cookie.target; target->refcount++; } else target = NULL; mtx_unlock(&bus->eb_mtx); return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); } static int xptedttargetfunc(struct cam_et *target, void *arg) { struct ccb_dev_match *cdm; struct cam_eb *bus; struct cam_ed *device; cdm = (struct ccb_dev_match *)arg; bus = target->bus; /* * If there is a device list generation recorded, check it to * make sure the device list hasn't changed. */ mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device != NULL)) { if (cdm->pos.generations[CAM_DEV_GENERATION] != target->generation) { mtx_unlock(&bus->eb_mtx); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } device = (struct cam_ed *)cdm->pos.cookie.device; device->refcount++; } else device = NULL; mtx_unlock(&bus->eb_mtx); return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); } static int xptedtdevicefunc(struct cam_ed *device, void *arg) { struct cam_eb *bus; struct cam_periph *periph; struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; bus = device->target->bus; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) retval = DM_RET_DESCEND; else retval = xptdevicematch(cdm->patterns, cdm->num_patterns, device); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this device out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; cdm->pos.cookie.bus = device->target->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = device->target; cdm->pos.generations[CAM_TARGET_GENERATION] = device->target->bus->generation; cdm->pos.cookie.device = device; cdm->pos.generations[CAM_DEV_GENERATION] = device->target->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_DEVICE; cdm->matches[j].result.device_result.path_id = device->target->bus->path_id; cdm->matches[j].result.device_result.target_id = device->target->target_id; cdm->matches[j].result.device_result.target_lun = device->lun_id; cdm->matches[j].result.device_result.protocol = device->protocol; bcopy(&device->inq_data, &cdm->matches[j].result.device_result.inq_data, sizeof(struct scsi_inquiry_data)); bcopy(&device->ident_data, &cdm->matches[j].result.device_result.ident_data, sizeof(struct ata_params)); bcopy(&device->mmc_ident_data, &cdm->matches[j].result.device_result.mmc_ident_data, sizeof(struct mmc_params)); /* Let the user know whether this device is unconfigured */ if (device->flags & CAM_DEV_UNCONFIGURED) cdm->matches[j].result.device_result.flags = DEV_RESULT_UNCONFIGURED; else cdm->matches[j].result.device_result.flags = DEV_RESULT_NOFLAG; } /* * If the user isn't interested in peripherals, don't descend * the tree any further. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a peripheral list generation recorded, make sure * it hasn't changed. */ xpt_lock_buses(); mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == device->target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) { if (cdm->pos.generations[CAM_PERIPH_GENERATION] != device->generation) { mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } periph = (struct cam_periph *)cdm->pos.cookie.periph; periph->refcount++; } else periph = NULL; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); } static int xptedtperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; size_t l; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | CAM_DEV_POS_PERIPH; cdm->pos.cookie.bus = periph->path->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = periph->path->target; cdm->pos.generations[CAM_TARGET_GENERATION] = periph->path->bus->generation; cdm->pos.cookie.device = periph->path->device; cdm->pos.generations[CAM_DEV_GENERATION] = periph->path->target->generation; cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = periph->path->device->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; l = sizeof(cdm->matches[j].result.periph_result.periph_name); strlcpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, l); } return(1); } static int xptedtmatch(struct ccb_dev_match *cdm) { struct cam_eb *bus; int ret; cdm->num_matches = 0; /* * Check the bus list generation. If it has changed, the user * needs to reset everything and start over. */ xpt_lock_buses(); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus != NULL)) { if (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation) { xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } bus = (struct cam_eb *)cdm->pos.cookie.bus; bus->refcount++; } else bus = NULL; xpt_unlock_buses(); ret = xptbustraverse(bus, xptedtbusfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the EDT. It also means that one of the subroutines * has set the status field to the proper value. If we get back 1, * we've fully traversed the EDT and copied out any matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) { struct cam_periph *periph; struct ccb_dev_match *cdm; cdm = (struct ccb_dev_match *)arg; xpt_lock_buses(); if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv == pdrv) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) { if (cdm->pos.generations[CAM_PERIPH_GENERATION] != (*pdrv)->generation) { xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } periph = (struct cam_periph *)cdm->pos.cookie.periph; periph->refcount++; } else periph = NULL; xpt_unlock_buses(); return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); } static int xptplistperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; size_t l; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { struct periph_driver **pdrv; pdrv = NULL; bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | CAM_DEV_POS_PERIPH; /* * This may look a bit non-sensical, but it is * actually quite logical. There are very few * peripheral drivers, and bloating every peripheral * structure with a pointer back to its parent * peripheral driver linker set entry would cost * more in the long run than doing this quick lookup. */ for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { if (strcmp((*pdrv)->driver_name, periph->periph_name) == 0) break; } if (*pdrv == NULL) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } cdm->pos.cookie.pdrv = pdrv; /* * The periph generation slot does double duty, as * does the periph pointer slot. They are used for * both edt and pdrv lookups and positioning. */ cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = (*pdrv)->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; /* * The transport layer peripheral doesn't have a target or * lun. */ if (periph->path->target) cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; else cdm->matches[j].result.periph_result.target_id = CAM_TARGET_WILDCARD; if (periph->path->device) cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; else cdm->matches[j].result.periph_result.target_lun = CAM_LUN_WILDCARD; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; l = sizeof(cdm->matches[j].result.periph_result.periph_name); strlcpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, l); } return(1); } static int xptperiphlistmatch(struct ccb_dev_match *cdm) { int ret; cdm->num_matches = 0; /* * At this point in the edt traversal function, we check the bus * list generation to make sure that no buses have been added or * removed since the user last sent a XPT_DEV_MATCH ccb through. * For the peripheral driver list traversal function, however, we * don't have to worry about new peripheral driver types coming or * going; they're in a linker set, and therefore can't change * without a recompile. */ if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv != NULL)) ret = xptpdrvtraverse( (struct periph_driver **)cdm->pos.cookie.pdrv, xptplistpdrvfunc, cdm); else ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the peripheral driver tree. It also means that one of * the subroutines has set the status field to the proper value. If * we get back 1, we've fully traversed the EDT and copied out any * matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) { struct cam_eb *bus, *next_bus; int retval; retval = 1; if (start_bus) bus = start_bus; else { xpt_lock_buses(); bus = TAILQ_FIRST(&xsoftc.xpt_busses); if (bus == NULL) { xpt_unlock_buses(); return (retval); } bus->refcount++; xpt_unlock_buses(); } for (; bus != NULL; bus = next_bus) { retval = tr_func(bus, arg); if (retval == 0) { xpt_release_bus(bus); break; } xpt_lock_buses(); next_bus = TAILQ_NEXT(bus, links); if (next_bus) next_bus->refcount++; xpt_unlock_buses(); xpt_release_bus(bus); } return(retval); } static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg) { struct cam_et *target, *next_target; int retval; retval = 1; if (start_target) target = start_target; else { mtx_lock(&bus->eb_mtx); target = TAILQ_FIRST(&bus->et_entries); if (target == NULL) { mtx_unlock(&bus->eb_mtx); return (retval); } target->refcount++; mtx_unlock(&bus->eb_mtx); } for (; target != NULL; target = next_target) { retval = tr_func(target, arg); if (retval == 0) { xpt_release_target(target); break; } mtx_lock(&bus->eb_mtx); next_target = TAILQ_NEXT(target, links); if (next_target) next_target->refcount++; mtx_unlock(&bus->eb_mtx); xpt_release_target(target); } return(retval); } static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg) { struct cam_eb *bus; struct cam_ed *device, *next_device; int retval; retval = 1; bus = target->bus; if (start_device) device = start_device; else { mtx_lock(&bus->eb_mtx); device = TAILQ_FIRST(&target->ed_entries); if (device == NULL) { mtx_unlock(&bus->eb_mtx); return (retval); } device->refcount++; mtx_unlock(&bus->eb_mtx); } for (; device != NULL; device = next_device) { mtx_lock(&device->device_mtx); retval = tr_func(device, arg); mtx_unlock(&device->device_mtx); if (retval == 0) { xpt_release_device(device); break; } mtx_lock(&bus->eb_mtx); next_device = TAILQ_NEXT(device, links); if (next_device) next_device->refcount++; mtx_unlock(&bus->eb_mtx); xpt_release_device(device); } return(retval); } static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_eb *bus; struct cam_periph *periph, *next_periph; int retval; retval = 1; bus = device->target->bus; if (start_periph) periph = start_periph; else { xpt_lock_buses(); mtx_lock(&bus->eb_mtx); periph = SLIST_FIRST(&device->periphs); while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) periph = SLIST_NEXT(periph, periph_links); if (periph == NULL) { mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); return (retval); } periph->refcount++; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); } for (; periph != NULL; periph = next_periph) { retval = tr_func(periph, arg); if (retval == 0) { cam_periph_release_locked(periph); break; } xpt_lock_buses(); mtx_lock(&bus->eb_mtx); next_periph = SLIST_NEXT(periph, periph_links); while (next_periph != NULL && (next_periph->flags & CAM_PERIPH_FREE) != 0) next_periph = SLIST_NEXT(next_periph, periph_links); if (next_periph) next_periph->refcount++; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); cam_periph_release_locked(periph); } return(retval); } static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg) { struct periph_driver **pdrv; int retval; retval = 1; /* * We don't traverse the peripheral driver list like we do the * other lists, because it is a linker set, and therefore cannot be * changed during runtime. If the peripheral driver list is ever * re-done to be something other than a linker set (i.e. it can * change while the system is running), the list traversal should * be modified to work like the other traversal functions. */ for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); *pdrv != NULL; pdrv++) { retval = tr_func(pdrv, arg); if (retval == 0) return(retval); } return(retval); } static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_periph *periph, *next_periph; int retval; retval = 1; if (start_periph) periph = start_periph; else { xpt_lock_buses(); periph = TAILQ_FIRST(&(*pdrv)->units); while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) periph = TAILQ_NEXT(periph, unit_links); if (periph == NULL) { xpt_unlock_buses(); return (retval); } periph->refcount++; xpt_unlock_buses(); } for (; periph != NULL; periph = next_periph) { cam_periph_lock(periph); retval = tr_func(periph, arg); cam_periph_unlock(periph); if (retval == 0) { cam_periph_release(periph); break; } xpt_lock_buses(); next_periph = TAILQ_NEXT(periph, unit_links); while (next_periph != NULL && (next_periph->flags & CAM_PERIPH_FREE) != 0) next_periph = TAILQ_NEXT(next_periph, unit_links); if (next_periph) next_periph->refcount++; xpt_unlock_buses(); cam_periph_release(periph); } return(retval); } static int xptdefbusfunc(struct cam_eb *bus, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_BUS) { xpt_busfunc_t *tr_func; tr_func = (xpt_busfunc_t *)tr_config->tr_func; return(tr_func(bus, tr_config->tr_arg)); } else return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); } static int xptdeftargetfunc(struct cam_et *target, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_TARGET) { xpt_targetfunc_t *tr_func; tr_func = (xpt_targetfunc_t *)tr_config->tr_func; return(tr_func(target, tr_config->tr_arg)); } else return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); } static int xptdefdevicefunc(struct cam_ed *device, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_DEVICE) { xpt_devicefunc_t *tr_func; tr_func = (xpt_devicefunc_t *)tr_config->tr_func; return(tr_func(device, tr_config->tr_arg)); } else return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); } static int xptdefperiphfunc(struct cam_periph *periph, void *arg) { struct xpt_traverse_config *tr_config; xpt_periphfunc_t *tr_func; tr_config = (struct xpt_traverse_config *)arg; tr_func = (xpt_periphfunc_t *)tr_config->tr_func; /* * Unlike the other default functions, we don't check for depth * here. The peripheral driver level is the last level in the EDT, * so if we're here, we should execute the function in question. */ return(tr_func(periph, tr_config->tr_arg)); } /* * Execute the given function for every bus in the EDT. */ static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_BUS; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } /* * Execute the given function for every device in the EDT. */ static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_DEVICE; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } static int xptsetasyncfunc(struct cam_ed *device, void *arg) { struct cam_path path; struct ccb_getdev cgd; struct ccb_setasync *csa = (struct ccb_setasync *)arg; /* * Don't report unconfigured devices (Wildcard devs, * devices only for target mode, device instances * that have been invalidated but are waiting for * their last reference count to be released). */ if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) return (1); xpt_compile_path(&path, NULL, device->target->bus->path_id, device->target->target_id, device->lun_id); xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); csa->callback(csa->callback_arg, AC_FOUND_DEVICE, &path, &cgd); xpt_release_path(&path); return(1); } static int xptsetasyncbusfunc(struct cam_eb *bus, void *arg) { struct cam_path path; struct ccb_pathinq cpi; struct ccb_setasync *csa = (struct ccb_setasync *)arg; xpt_compile_path(&path, /*periph*/NULL, bus->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); xpt_path_lock(&path); xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); csa->callback(csa->callback_arg, AC_PATH_REGISTERED, &path, &cpi); xpt_path_unlock(&path); xpt_release_path(&path); return(1); } void xpt_action(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code))); start_ccb->ccb_h.status = CAM_REQ_INPROG; (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); } void xpt_action_default(union ccb *start_ccb) { struct cam_path *path; struct cam_sim *sim; struct mtx *mtx; path = start_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code))); switch (start_ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct cam_ed *device; /* * For the sake of compatibility with SCSI-1 * devices that may not understand the identify * message, we include lun information in the * second byte of all commands. SCSI-1 specifies * that luns are a 3 bit value and reserves only 3 * bits for lun information in the CDB. Later * revisions of the SCSI spec allow for more than 8 * luns, but have deprecated lun information in the * CDB. So, if the lun won't fit, we must omit. * * Also be aware that during initial probing for devices, * the inquiry information is unknown but initialized to 0. * This means that this code will be exercised while probing * devices with an ANSI revision greater than 2. */ device = path->device; if (device->protocol_version <= SCSI_REV_2 && start_ccb->ccb_h.target_lun < 8 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { start_ccb->csio.cdb_io.cdb_bytes[1] |= start_ccb->ccb_h.target_lun << 5; } start_ccb->csio.scsi_status = SCSI_STATUS_OK; } /* FALLTHROUGH */ case XPT_TARGET_IO: case XPT_CONT_TARGET_IO: start_ccb->csio.sense_resid = 0; start_ccb->csio.resid = 0; /* FALLTHROUGH */ case XPT_ATA_IO: if (start_ccb->ccb_h.func_code == XPT_ATA_IO) start_ccb->ataio.resid = 0; /* FALLTHROUGH */ case XPT_NVME_IO: /* FALLTHROUGH */ case XPT_NVME_ADMIN: /* FALLTHROUGH */ case XPT_MMC_IO: /* XXX just like nmve_io? */ case XPT_RESET_DEV: case XPT_ENG_EXEC: case XPT_SMP_IO: { struct cam_devq *devq; devq = path->bus->sim->devq; mtx_lock(&devq->send_mtx); cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); if (xpt_schedule_devq(devq, path->device) != 0) xpt_run_devq(devq); mtx_unlock(&devq->send_mtx); break; } case XPT_CALC_GEOMETRY: /* Filter out garbage */ if (start_ccb->ccg.block_size == 0 || start_ccb->ccg.volume_size == 0) { start_ccb->ccg.cylinders = 0; start_ccb->ccg.heads = 0; start_ccb->ccg.secs_per_track = 0; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } #if defined(__sparc64__) /* * For sparc64, we may need adjust the geometry of large * disks in order to fit the limitations of the 16-bit * fields of the VTOC8 disk label. */ if (scsi_da_bios_params(&start_ccb->ccg) != 0) { start_ccb->ccb_h.status = CAM_REQ_CMP; break; } #endif goto call_sim; case XPT_ABORT: { union ccb* abort_ccb; abort_ccb = start_ccb->cab.abort_ccb; if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { struct cam_ed *device; struct cam_devq *devq; device = abort_ccb->ccb_h.path->device; devq = device->sim->devq; mtx_lock(&devq->send_mtx); if (abort_ccb->ccb_h.pinfo.index > 0) { cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq_device(device, 1); mtx_unlock(&devq->send_mtx); xpt_done(abort_ccb); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } mtx_unlock(&devq->send_mtx); if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { /* * We've caught this ccb en route to * the SIM. Flag it for abort and the * SIM will do so just before starting * real work on the CCB. */ abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq(abort_ccb->ccb_h.path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } } if (XPT_FC_IS_QUEUED(abort_ccb) && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { /* * It's already completed but waiting * for our SWI to get to it. */ start_ccb->ccb_h.status = CAM_UA_ABORT; break; } /* * If we weren't able to take care of the abort request * in the XPT, pass the request down to the SIM for processing. */ } /* FALLTHROUGH */ case XPT_ACCEPT_TARGET_IO: case XPT_EN_LUN: case XPT_IMMED_NOTIFY: case XPT_NOTIFY_ACK: case XPT_RESET_BUS: case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: case XPT_GET_SIM_KNOB_OLD: case XPT_GET_SIM_KNOB: case XPT_SET_SIM_KNOB: case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: case XPT_PATH_INQ: call_sim: sim = path->bus->sim; mtx = sim->mtx; if (mtx && !mtx_owned(mtx)) mtx_lock(mtx); else mtx = NULL; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code)); (*(sim->sim_action))(sim, start_ccb); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status)); if (mtx) mtx_unlock(mtx); break; case XPT_PATH_STATS: start_ccb->cpis.last_reset = path->bus->last_reset; start_ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_GDEV_TYPE: { struct cam_ed *dev; dev = path->device; if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { struct ccb_getdev *cgd; cgd = &start_ccb->cgd; cgd->protocol = dev->protocol; cgd->inq_data = dev->inq_data; cgd->ident_data = dev->ident_data; cgd->inq_flags = dev->inq_flags; cgd->ccb_h.status = CAM_REQ_CMP; cgd->serial_num_len = dev->serial_num_len; if ((dev->serial_num_len > 0) && (dev->serial_num != NULL)) bcopy(dev->serial_num, cgd->serial_num, dev->serial_num_len); } break; } case XPT_GDEV_STATS: { struct ccb_getdevstats *cgds = &start_ccb->cgds; struct cam_ed *dev = path->device; struct cam_eb *bus = path->bus; struct cam_et *tar = path->target; struct cam_devq *devq = bus->sim->devq; mtx_lock(&devq->send_mtx); cgds->dev_openings = dev->ccbq.dev_openings; cgds->dev_active = dev->ccbq.dev_active; cgds->allocated = dev->ccbq.allocated; cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); cgds->held = cgds->allocated - cgds->dev_active - cgds->queued; cgds->last_reset = tar->last_reset; cgds->maxtags = dev->maxtags; cgds->mintags = dev->mintags; if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) cgds->last_reset = bus->last_reset; mtx_unlock(&devq->send_mtx); cgds->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GDEVLIST: { struct cam_periph *nperiph; struct periph_list *periph_head; struct ccb_getdevlist *cgdl; u_int i; struct cam_ed *device; int found; found = 0; /* * Don't want anyone mucking with our data. */ device = path->device; periph_head = &device->periphs; cgdl = &start_ccb->cgdl; /* * Check and see if the list has changed since the user * last requested a list member. If so, tell them that the * list has changed, and therefore they need to start over * from the beginning. */ if ((cgdl->index != 0) && (cgdl->generation != device->generation)) { cgdl->status = CAM_GDEVLIST_LIST_CHANGED; break; } /* * Traverse the list of peripherals and attempt to find * the requested peripheral. */ for (nperiph = SLIST_FIRST(periph_head), i = 0; (nperiph != NULL) && (i <= cgdl->index); nperiph = SLIST_NEXT(nperiph, periph_links), i++) { if (i == cgdl->index) { strlcpy(cgdl->periph_name, nperiph->periph_name, sizeof(cgdl->periph_name)); cgdl->unit_number = nperiph->unit_number; found = 1; } } if (found == 0) { cgdl->status = CAM_GDEVLIST_ERROR; break; } if (nperiph == NULL) cgdl->status = CAM_GDEVLIST_LAST_DEVICE; else cgdl->status = CAM_GDEVLIST_MORE_DEVS; cgdl->index++; cgdl->generation = device->generation; cgdl->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEV_MATCH: { dev_pos_type position_type; struct ccb_dev_match *cdm; cdm = &start_ccb->cdm; /* * There are two ways of getting at information in the EDT. * The first way is via the primary EDT tree. It starts * with a list of buses, then a list of targets on a bus, * then devices/luns on a target, and then peripherals on a * device/lun. The "other" way is by the peripheral driver * lists. The peripheral driver lists are organized by * peripheral driver. (obviously) So it makes sense to * use the peripheral driver list if the user is looking * for something like "da1", or all "da" devices. If the * user is looking for something on a particular bus/target * or lun, it's generally better to go through the EDT tree. */ if (cdm->pos.position_type != CAM_DEV_POS_NONE) position_type = cdm->pos.position_type; else { u_int i; position_type = CAM_DEV_POS_NONE; for (i = 0; i < cdm->num_patterns; i++) { if ((cdm->patterns[i].type == DEV_MATCH_BUS) ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ position_type = CAM_DEV_POS_EDT; break; } } if (cdm->num_patterns == 0) position_type = CAM_DEV_POS_EDT; else if (position_type == CAM_DEV_POS_NONE) position_type = CAM_DEV_POS_PDRV; } switch(position_type & CAM_DEV_POS_TYPEMASK) { case CAM_DEV_POS_EDT: xptedtmatch(cdm); break; case CAM_DEV_POS_PDRV: xptperiphlistmatch(cdm); break; default: cdm->status = CAM_DEV_MATCH_ERROR; break; } if (cdm->status == CAM_DEV_MATCH_ERROR) start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SASYNC_CB: { struct ccb_setasync *csa; struct async_node *cur_entry; struct async_list *async_head; u_int32_t added; csa = &start_ccb->csa; added = csa->event_enable; async_head = &path->device->asyncs; /* * If there is already an entry for us, simply * update it. */ cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { if ((cur_entry->callback_arg == csa->callback_arg) && (cur_entry->callback == csa->callback)) break; cur_entry = SLIST_NEXT(cur_entry, links); } if (cur_entry != NULL) { /* * If the request has no flags set, * remove the entry. */ added &= ~cur_entry->event_enable; if (csa->event_enable == 0) { SLIST_REMOVE(async_head, cur_entry, async_node, links); xpt_release_device(path->device); free(cur_entry, M_CAMXPT); } else { cur_entry->event_enable = csa->event_enable; } csa->event_enable = added; } else { cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, M_NOWAIT); if (cur_entry == NULL) { csa->ccb_h.status = CAM_RESRC_UNAVAIL; break; } cur_entry->event_enable = csa->event_enable; cur_entry->event_lock = (path->bus->sim->mtx && mtx_owned(path->bus->sim->mtx)) ? 1 : 0; cur_entry->callback_arg = csa->callback_arg; cur_entry->callback = csa->callback; SLIST_INSERT_HEAD(async_head, cur_entry, links); xpt_acquire_device(path->device); } start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_REL_SIMQ: { struct ccb_relsim *crs; struct cam_ed *dev; crs = &start_ccb->crs; dev = path->device; if (dev == NULL) { crs->ccb_h.status = CAM_DEV_NOT_THERE; break; } if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { /* Don't ever go below one opening */ if (crs->openings > 0) { xpt_dev_ccbq_resize(path, crs->openings); if (bootverbose) { xpt_print(path, "number of openings is now %d\n", crs->openings); } } } mtx_lock(&dev->sim->devq->send_mtx); if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { /* * Just extend the old timeout and decrement * the freeze count so that a single timeout * is sufficient for releasing the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; callout_stop(&dev->callout); } else { start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } callout_reset_sbt(&dev->callout, SBT_1MS * crs->release_timeout, 0, xpt_release_devq_timeout, dev, 0); dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; } if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { /* * Decrement the freeze count so that a single * completion is still sufficient to unfreeze * the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_COMPLETE; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 || (dev->ccbq.dev_active == 0)) { start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } mtx_unlock(&dev->sim->devq->send_mtx); if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEBUG: { struct cam_path *oldpath; /* Check that all request bits are supported. */ if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } cam_dflags = CAM_DEBUG_NONE; if (cam_dpath != NULL) { oldpath = cam_dpath; cam_dpath = NULL; xpt_free_path(oldpath); } if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, NULL, start_ccb->ccb_h.path_id, start_ccb->ccb_h.target_id, start_ccb->ccb_h.target_lun) != CAM_REQ_CMP) { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } else { cam_dflags = start_ccb->cdbg.flags; start_ccb->ccb_h.status = CAM_REQ_CMP; xpt_print(cam_dpath, "debugging flags now %x\n", cam_dflags); } } else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_NOOP: if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) xpt_freeze_devq(path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_REPROBE_LUN: xpt_async(AC_INQ_CHANGED, path, NULL); start_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(start_ccb); break; default: case XPT_SDEV_TYPE: case XPT_TERM_IO: case XPT_ENG_INQ: /* XXX Implement */ xpt_print(start_ccb->ccb_h.path, "%s: CCB type %#x %s not supported\n", __func__, start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code)); start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { xpt_done(start_ccb); } break; } CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default: func= %#x %s status %#x\n", start_ccb->ccb_h.func_code, xpt_action_name(start_ccb->ccb_h.func_code), start_ccb->ccb_h.status)); } void xpt_polled_action(union ccb *start_ccb) { u_int32_t timeout; struct cam_sim *sim; struct cam_devq *devq; struct cam_ed *dev; struct mtx *mtx; timeout = start_ccb->ccb_h.timeout * 10; sim = start_ccb->ccb_h.path->bus->sim; devq = sim->devq; mtx = sim->mtx; dev = start_ccb->ccb_h.path->device; mtx_unlock(&dev->device_mtx); /* * Steal an opening so that no other queued requests * can get it before us while we simulate interrupts. */ mtx_lock(&devq->send_mtx); dev->ccbq.dev_openings--; while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && (--timeout > 0)) { mtx_unlock(&devq->send_mtx); DELAY(100); if (mtx) mtx_lock(mtx); (*(sim->sim_poll))(sim); if (mtx) mtx_unlock(mtx); camisr_runqueue(); mtx_lock(&devq->send_mtx); } dev->ccbq.dev_openings++; mtx_unlock(&devq->send_mtx); if (timeout != 0) { xpt_action(start_ccb); while(--timeout > 0) { if (mtx) mtx_lock(mtx); (*(sim->sim_poll))(sim); if (mtx) mtx_unlock(mtx); camisr_runqueue(); if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; DELAY(100); } if (timeout == 0) { /* * XXX Is it worth adding a sim_timeout entry * point so we can attempt recovery? If * this is only used for dumps, I don't think * it is. */ start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; } } else { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } mtx_lock(&dev->device_mtx); } /* * Schedule a peripheral driver to receive a ccb when its * target device has space for more transactions. */ void xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); cam_periph_assert(periph, MA_OWNED); if (new_priority < periph->scheduled_priority) { periph->scheduled_priority = new_priority; xpt_run_allocq(periph, 0); } } /* * Schedule a device to run on a given queue. * If the device was inserted as a new entry on the queue, * return 1 meaning the device queue should be run. If we * were already queued, implying someone else has already * started the queue, return 0 so the caller doesn't attempt * to run the queue. */ static int xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, u_int32_t new_priority) { int retval; u_int32_t old_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); old_priority = pinfo->priority; /* * Are we already queued? */ if (pinfo->index != CAM_UNQUEUED_INDEX) { /* Simply reorder based on new priority */ if (new_priority < old_priority) { camq_change_priority(queue, pinfo->index, new_priority); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("changed priority to %d\n", new_priority)); retval = 1; } else retval = 0; } else { /* New entry on the queue */ if (new_priority < old_priority) pinfo->priority = new_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("Inserting onto queue\n")); pinfo->generation = ++queue->generation; camq_insert(queue, pinfo); retval = 1; } return (retval); } static void xpt_run_allocq_task(void *context, int pending) { struct cam_periph *periph = context; cam_periph_lock(periph); periph->flags &= ~CAM_PERIPH_RUN_TASK; xpt_run_allocq(periph, 1); cam_periph_unlock(periph); cam_periph_release(periph); } static void xpt_run_allocq(struct cam_periph *periph, int sleep) { struct cam_ed *device; union ccb *ccb; uint32_t prio; cam_periph_assert(periph, MA_OWNED); if (periph->periph_allocating) return; periph->periph_allocating = 1; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); device = periph->path->device; ccb = NULL; restart: while ((prio = min(periph->scheduled_priority, periph->immediate_priority)) != CAM_PRIORITY_NONE && (periph->periph_allocated - (ccb != NULL ? 1 : 0) < device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { if (ccb == NULL && (ccb = xpt_get_ccb_nowait(periph)) == NULL) { if (sleep) { ccb = xpt_get_ccb(periph); goto restart; } if (periph->flags & CAM_PERIPH_RUN_TASK) break; cam_periph_doacquire(periph); periph->flags |= CAM_PERIPH_RUN_TASK; taskqueue_enqueue(xsoftc.xpt_taskq, &periph->periph_run_task); break; } xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); if (prio == periph->immediate_priority) { periph->immediate_priority = CAM_PRIORITY_NONE; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("waking cam_periph_getccb()\n")); SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, periph_links.sle); wakeup(&periph->ccb_list); } else { periph->scheduled_priority = CAM_PRIORITY_NONE; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("calling periph_start()\n")); periph->periph_start(periph, ccb); } ccb = NULL; } if (ccb != NULL) xpt_release_ccb(ccb); periph->periph_allocating = 0; } static void xpt_run_devq(struct cam_devq *devq) { struct mtx *mtx; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); devq->send_queue.qfrozen_cnt++; while ((devq->send_queue.entries > 0) && (devq->send_openings > 0) && (devq->send_queue.qfrozen_cnt <= 1)) { struct cam_ed *device; union ccb *work_ccb; struct cam_sim *sim; struct xpt_proto *proto; device = (struct cam_ed *)camq_remove(&devq->send_queue, CAMQ_HEAD); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("running device %p\n", device)); work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); if (work_ccb == NULL) { printf("device on run queue with no ccbs???\n"); continue; } if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { mtx_lock(&xsoftc.xpt_highpower_lock); if (xsoftc.num_highpower <= 0) { /* * We got a high power command, but we * don't have any available slots. Freeze * the device queue until we have a slot * available. */ xpt_freeze_devq_device(device, 1); STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, highpowerq_entry); mtx_unlock(&xsoftc.xpt_highpower_lock); continue; } else { /* * Consume a high power slot while * this ccb runs. */ xsoftc.num_highpower--; } mtx_unlock(&xsoftc.xpt_highpower_lock); } cam_ccbq_remove_ccb(&device->ccbq, work_ccb); cam_ccbq_send_ccb(&device->ccbq, work_ccb); devq->send_openings--; devq->send_active++; xpt_schedule_devq(devq, device); mtx_unlock(&devq->send_mtx); if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { /* * The client wants to freeze the queue * after this CCB is sent. */ xpt_freeze_devq(work_ccb->ccb_h.path, 1); } /* In Target mode, the peripheral driver knows best... */ if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { if ((device->inq_flags & SID_CmdQue) != 0 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; else /* * Clear this in case of a retried CCB that * failed due to a rejected tag. */ work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; } KASSERT(device == work_ccb->ccb_h.path->device, ("device (%p) / path->device (%p) mismatch", device, work_ccb->ccb_h.path->device)); proto = xpt_proto_find(device->protocol); if (proto && proto->ops->debug_out) proto->ops->debug_out(work_ccb); /* * Device queues can be shared among multiple SIM instances * that reside on different buses. Use the SIM from the * queued device, rather than the one from the calling bus. */ sim = device->sim; mtx = sim->mtx; if (mtx && !mtx_owned(mtx)) mtx_lock(mtx); else mtx = NULL; work_ccb->ccb_h.qos.periph_data = cam_iosched_now(); (*(sim->sim_action))(sim, work_ccb); if (mtx) mtx_unlock(mtx); mtx_lock(&devq->send_mtx); } devq->send_queue.qfrozen_cnt--; } /* * This function merges stuff from the slave ccb into the master ccb, while * keeping important fields in the master ccb constant. */ void xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) { /* * Pull fields that are valid for peripheral drivers to set * into the master CCB along with the CCB "payload". */ master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); } void xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority, u_int32_t flags) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); ccb_h->pinfo.priority = priority; ccb_h->path = path; ccb_h->path_id = path->bus->path_id; if (path->target) ccb_h->target_id = path->target->target_id; else ccb_h->target_id = CAM_TARGET_WILDCARD; if (path->device) { ccb_h->target_lun = path->device->lun_id; ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; } else { ccb_h->target_lun = CAM_TARGET_WILDCARD; } ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; ccb_h->flags = flags; ccb_h->xflags = 0; } void xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) { xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); } /* Path manipulation functions */ cam_status xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; cam_status status; path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); if (path == NULL) { status = CAM_RESRC_UNAVAIL; return(status); } status = xpt_compile_path(path, perph, path_id, target_id, lun_id); if (status != CAM_REQ_CMP) { free(path, M_CAMPATH); path = NULL; } *new_path_ptr = path; return (status); } cam_status xpt_create_path_unlocked(struct cam_path **new_path_ptr, struct cam_periph *periph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { return (xpt_create_path(new_path_ptr, periph, path_id, target_id, lun_id)); } cam_status xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_eb *bus; struct cam_et *target; struct cam_ed *device; cam_status status; status = CAM_REQ_CMP; /* Completed without error */ target = NULL; /* Wildcarded */ device = NULL; /* Wildcarded */ /* * We will potentially modify the EDT, so block interrupts * that may attempt to create cam paths. */ bus = xpt_find_bus(path_id); if (bus == NULL) { status = CAM_PATH_INVALID; } else { xpt_lock_buses(); mtx_lock(&bus->eb_mtx); target = xpt_find_target(bus, target_id); if (target == NULL) { /* Create one */ struct cam_et *new_target; new_target = xpt_alloc_target(bus, target_id); if (new_target == NULL) { status = CAM_RESRC_UNAVAIL; } else { target = new_target; } } xpt_unlock_buses(); if (target != NULL) { device = xpt_find_device(target, lun_id); if (device == NULL) { /* Create one */ struct cam_ed *new_device; new_device = (*(bus->xport->ops->alloc_device))(bus, target, lun_id); if (new_device == NULL) { status = CAM_RESRC_UNAVAIL; } else { device = new_device; } } } mtx_unlock(&bus->eb_mtx); } /* * Only touch the user's data if we are successful. */ if (status == CAM_REQ_CMP) { new_path->periph = perph; new_path->bus = bus; new_path->target = target; new_path->device = device; CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); } else { if (device != NULL) xpt_release_device(device); if (target != NULL) xpt_release_target(target); if (bus != NULL) xpt_release_bus(bus); } return (status); } cam_status xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) { struct cam_path *new_path; new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); if (new_path == NULL) return(CAM_RESRC_UNAVAIL); xpt_copy_path(new_path, path); *new_path_ptr = new_path; return (CAM_REQ_CMP); } void xpt_copy_path(struct cam_path *new_path, struct cam_path *path) { *new_path = *path; if (path->bus != NULL) xpt_acquire_bus(path->bus); if (path->target != NULL) xpt_acquire_target(path->target); if (path->device != NULL) xpt_acquire_device(path->device); } void xpt_release_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); if (path->device != NULL) { xpt_release_device(path->device); path->device = NULL; } if (path->target != NULL) { xpt_release_target(path->target); path->target = NULL; } if (path->bus != NULL) { xpt_release_bus(path->bus); path->bus = NULL; } } void xpt_free_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); xpt_release_path(path); free(path, M_CAMPATH); } void xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) { xpt_lock_buses(); if (bus_ref) { if (path->bus) *bus_ref = path->bus->refcount; else *bus_ref = 0; } if (periph_ref) { if (path->periph) *periph_ref = path->periph->refcount; else *periph_ref = 0; } xpt_unlock_buses(); if (target_ref) { if (path->target) *target_ref = path->target->refcount; else *target_ref = 0; } if (device_ref) { if (path->device) *device_ref = path->device->refcount; else *device_ref = 0; } } /* * Return -1 for failure, 0 for exact match, 1 for match with wildcards * in path1, 2 for match with wildcards in path2. */ int xpt_path_comp(struct cam_path *path1, struct cam_path *path2) { int retval = 0; if (path1->bus != path2->bus) { if (path1->bus->path_id == CAM_BUS_WILDCARD) retval = 1; else if (path2->bus->path_id == CAM_BUS_WILDCARD) retval = 2; else return (-1); } if (path1->target != path2->target) { if (path1->target->target_id == CAM_TARGET_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->target->target_id == CAM_TARGET_WILDCARD) retval = 2; else return (-1); } if (path1->device != path2->device) { if (path1->device->lun_id == CAM_LUN_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->device->lun_id == CAM_LUN_WILDCARD) retval = 2; else return (-1); } return (retval); } int xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) { int retval = 0; if (path->bus != dev->target->bus) { if (path->bus->path_id == CAM_BUS_WILDCARD) retval = 1; else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) retval = 2; else return (-1); } if (path->target != dev->target) { if (path->target->target_id == CAM_TARGET_WILDCARD) { if (retval == 0) retval = 1; } else if (dev->target->target_id == CAM_TARGET_WILDCARD) retval = 2; else return (-1); } if (path->device != dev) { if (path->device->lun_id == CAM_LUN_WILDCARD) { if (retval == 0) retval = 1; } else if (dev->lun_id == CAM_LUN_WILDCARD) retval = 2; else return (-1); } return (retval); } void xpt_print_path(struct cam_path *path) { struct sbuf sb; char buffer[XPT_PRINT_LEN]; sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); xpt_path_sbuf(path, &sb); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); sbuf_delete(&sb); } void xpt_print_device(struct cam_ed *device) { if (device == NULL) printf("(nopath): "); else { printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, device->sim->unit_number, device->sim->bus_id, device->target->target_id, (uintmax_t)device->lun_id); } } void xpt_print(struct cam_path *path, const char *fmt, ...) { va_list ap; struct sbuf sb; char buffer[XPT_PRINT_LEN]; sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); xpt_path_sbuf(path, &sb); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); sbuf_delete(&sb); } int xpt_path_string(struct cam_path *path, char *str, size_t str_len) { struct sbuf sb; int len; sbuf_new(&sb, str, str_len, 0); len = xpt_path_sbuf(path, &sb); sbuf_finish(&sb); return (len); } int xpt_path_sbuf(struct cam_path *path, struct sbuf *sb) { if (path == NULL) sbuf_printf(sb, "(nopath): "); else { if (path->periph != NULL) sbuf_printf(sb, "(%s%d:", path->periph->periph_name, path->periph->unit_number); else sbuf_printf(sb, "(noperiph:"); if (path->bus != NULL) sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id); else sbuf_printf(sb, "nobus:"); if (path->target != NULL) sbuf_printf(sb, "%d:", path->target->target_id); else sbuf_printf(sb, "X:"); if (path->device != NULL) sbuf_printf(sb, "%jx): ", (uintmax_t)path->device->lun_id); else sbuf_printf(sb, "X): "); } return(sbuf_len(sb)); } path_id_t xpt_path_path_id(struct cam_path *path) { return(path->bus->path_id); } target_id_t xpt_path_target_id(struct cam_path *path) { if (path->target != NULL) return (path->target->target_id); else return (CAM_TARGET_WILDCARD); } lun_id_t xpt_path_lun_id(struct cam_path *path) { if (path->device != NULL) return (path->device->lun_id); else return (CAM_LUN_WILDCARD); } struct cam_sim * xpt_path_sim(struct cam_path *path) { return (path->bus->sim); } struct cam_periph* xpt_path_periph(struct cam_path *path) { return (path->periph); } /* * Release a CAM control block for the caller. Remit the cost of the structure * to the device referenced by the path. If the this device had no 'credits' * and peripheral drivers have registered async callbacks for this notification * call them now. */ void xpt_release_ccb(union ccb *free_ccb) { struct cam_ed *device; struct cam_periph *periph; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); device = free_ccb->ccb_h.path->device; periph = free_ccb->ccb_h.path->periph; xpt_free_ccb(free_ccb); periph->periph_allocated--; cam_ccbq_release_opening(&device->ccbq); xpt_run_allocq(periph, 0); } /* Functions accessed by SIM drivers */ static struct xpt_xport_ops xport_default_ops = { .alloc_device = xpt_alloc_device_default, .action = xpt_action_default, .async = xpt_dev_async_default, }; static struct xpt_xport xport_default = { .xport = XPORT_UNKNOWN, .name = "unknown", .ops = &xport_default_ops, }; CAM_XPT_XPORT(xport_default); /* * A sim structure, listing the SIM entry points and instance * identification info is passed to xpt_bus_register to hook the SIM * into the CAM framework. xpt_bus_register creates a cam_eb entry * for this new bus and places it in the array of buses and assigns * it a path_id. The path_id may be influenced by "hard wiring" * information specified by the user. Once interrupt services are * available, the bus will be probed. */ int32_t xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) { struct cam_eb *new_bus; struct cam_eb *old_bus; struct ccb_pathinq cpi; struct cam_path *path; cam_status status; sim->bus_id = bus; new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), M_CAMXPT, M_NOWAIT|M_ZERO); if (new_bus == NULL) { /* Couldn't satisfy request */ return (CAM_RESRC_UNAVAIL); } mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); TAILQ_INIT(&new_bus->et_entries); cam_sim_hold(sim); new_bus->sim = sim; timevalclear(&new_bus->last_reset); new_bus->flags = 0; new_bus->refcount = 1; /* Held until a bus_deregister event */ new_bus->generation = 0; xpt_lock_buses(); sim->path_id = new_bus->path_id = xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); while (old_bus != NULL && old_bus->path_id < new_bus->path_id) old_bus = TAILQ_NEXT(old_bus, links); if (old_bus != NULL) TAILQ_INSERT_BEFORE(old_bus, new_bus, links); else TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); xsoftc.bus_generation++; xpt_unlock_buses(); /* * Set a default transport so that a PATH_INQ can be issued to * the SIM. This will then allow for probing and attaching of * a more appropriate transport. */ new_bus->xport = &xport_default; status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { xpt_release_bus(new_bus); return (CAM_RESRC_UNAVAIL); } xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status == CAM_REQ_CMP) { struct xpt_xport **xpt; SET_FOREACH(xpt, cam_xpt_xport_set) { if ((*xpt)->xport == cpi.transport) { new_bus->xport = *xpt; break; } } if (new_bus->xport == NULL) { xpt_print(path, "No transport found for %d\n", cpi.transport); xpt_release_bus(new_bus); free(path, M_CAMXPT); return (CAM_RESRC_UNAVAIL); } } /* Notify interested parties */ if (sim->path_id != CAM_XPT_PATH_ID) { xpt_async(AC_PATH_REGISTERED, path, &cpi); if ((cpi.hba_misc & PIM_NOSCAN) == 0) { union ccb *scan_ccb; /* Initiate bus rescan. */ scan_ccb = xpt_alloc_ccb_nowait(); if (scan_ccb != NULL) { scan_ccb->ccb_h.path = path; scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; scan_ccb->crcn.flags = 0; xpt_rescan(scan_ccb); } else { xpt_print(path, "Can't allocate CCB to scan bus\n"); xpt_free_path(path); } } else xpt_free_path(path); } else xpt_free_path(path); return (CAM_SUCCESS); } int32_t xpt_bus_deregister(path_id_t pathid) { struct cam_path bus_path; cam_status status; status = xpt_compile_path(&bus_path, NULL, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return (status); xpt_async(AC_LOST_DEVICE, &bus_path, NULL); xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); /* Release the reference count held while registered. */ xpt_release_bus(bus_path.bus); xpt_release_path(&bus_path); return (CAM_REQ_CMP); } static path_id_t xptnextfreepathid(void) { struct cam_eb *bus; path_id_t pathid; const char *strval; mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); pathid = 0; bus = TAILQ_FIRST(&xsoftc.xpt_busses); retry: /* Find an unoccupied pathid */ while (bus != NULL && bus->path_id <= pathid) { if (bus->path_id == pathid) pathid++; bus = TAILQ_NEXT(bus, links); } /* * Ensure that this pathid is not reserved for * a bus that may be registered in the future. */ if (resource_string_value("scbus", pathid, "at", &strval) == 0) { ++pathid; /* Start the search over */ goto retry; } return (pathid); } static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus) { path_id_t pathid; int i, dunit, val; char buf[32]; const char *dname; pathid = CAM_XPT_PATH_ID; snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) return (pathid); i = 0; while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { if (strcmp(dname, "scbus")) { /* Avoid a bit of foot shooting. */ continue; } if (dunit < 0) /* unwired?! */ continue; if (resource_int_value("scbus", dunit, "bus", &val) == 0) { if (sim_bus == val) { pathid = dunit; break; } } else if (sim_bus == 0) { /* Unspecified matches bus 0 */ pathid = dunit; break; } else { printf("Ambiguous scbus configuration for %s%d " "bus %d, cannot wire down. The kernel " "config entry for scbus%d should " "specify a controller bus.\n" "Scbus will be assigned dynamically.\n", sim_name, sim_unit, sim_bus, dunit); break; } } if (pathid == CAM_XPT_PATH_ID) pathid = xptnextfreepathid(); return (pathid); } static const char * xpt_async_string(u_int32_t async_code) { switch (async_code) { case AC_BUS_RESET: return ("AC_BUS_RESET"); case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); case AC_SCSI_AEN: return ("AC_SCSI_AEN"); case AC_SENT_BDR: return ("AC_SENT_BDR"); case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); case AC_CONTRACT: return ("AC_CONTRACT"); case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); } return ("AC_UNKNOWN"); } static int xpt_async_size(u_int32_t async_code) { switch (async_code) { case AC_BUS_RESET: return (0); case AC_UNSOL_RESEL: return (0); case AC_SCSI_AEN: return (0); case AC_SENT_BDR: return (0); case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); case AC_PATH_DEREGISTERED: return (0); case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); case AC_LOST_DEVICE: return (0); case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); case AC_INQ_CHANGED: return (0); case AC_GETDEV_CHANGED: return (0); case AC_CONTRACT: return (sizeof(struct ac_contract)); case AC_ADVINFO_CHANGED: return (-1); case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); } return (0); } static int xpt_async_process_dev(struct cam_ed *device, void *arg) { union ccb *ccb = arg; struct cam_path *path = ccb->ccb_h.path; void *async_arg = ccb->casync.async_arg_ptr; u_int32_t async_code = ccb->casync.async_code; int relock; if (path->device != device && path->device->lun_id != CAM_LUN_WILDCARD && device->lun_id != CAM_LUN_WILDCARD) return (1); /* * The async callback could free the device. * If it is a broadcast async, it doesn't hold * device reference, so take our own reference. */ xpt_acquire_device(device); /* * If async for specific device is to be delivered to * the wildcard client, take the specific device lock. * XXX: We may need a way for client to specify it. */ if ((device->lun_id == CAM_LUN_WILDCARD && path->device->lun_id != CAM_LUN_WILDCARD) || (device->target->target_id == CAM_TARGET_WILDCARD && path->target->target_id != CAM_TARGET_WILDCARD) || (device->target->bus->path_id == CAM_BUS_WILDCARD && path->target->bus->path_id != CAM_BUS_WILDCARD)) { mtx_unlock(&device->device_mtx); xpt_path_lock(path); relock = 1; } else relock = 0; (*(device->target->bus->xport->ops->async))(async_code, device->target->bus, device->target, device, async_arg); xpt_async_bcast(&device->asyncs, async_code, path, async_arg); if (relock) { xpt_path_unlock(path); mtx_lock(&device->device_mtx); } xpt_release_device(device); return (1); } static int xpt_async_process_tgt(struct cam_et *target, void *arg) { union ccb *ccb = arg; struct cam_path *path = ccb->ccb_h.path; if (path->target != target && path->target->target_id != CAM_TARGET_WILDCARD && target->target_id != CAM_TARGET_WILDCARD) return (1); if (ccb->casync.async_code == AC_SENT_BDR) { /* Update our notion of when the last reset occurred */ microtime(&target->last_reset); } return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); } static void xpt_async_process(struct cam_periph *periph, union ccb *ccb) { struct cam_eb *bus; struct cam_path *path; void *async_arg; u_int32_t async_code; path = ccb->ccb_h.path; async_code = ccb->casync.async_code; async_arg = ccb->casync.async_arg_ptr; CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, ("xpt_async(%s)\n", xpt_async_string(async_code))); bus = path->bus; if (async_code == AC_BUS_RESET) { /* Update our notion of when the last reset occurred */ microtime(&bus->last_reset); } xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); /* * If this wasn't a fully wildcarded async, tell all * clients that want all async events. */ if (bus != xpt_periph->path->bus) { xpt_path_lock(xpt_periph->path); xpt_async_process_dev(xpt_periph->path->device, ccb); xpt_path_unlock(xpt_periph->path); } if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) xpt_release_devq(path, 1, TRUE); else xpt_release_simq(path->bus->sim, TRUE); if (ccb->casync.async_arg_size > 0) free(async_arg, M_CAMXPT); xpt_free_path(path); xpt_free_ccb(ccb); } static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg) { struct async_node *cur_entry; struct mtx *mtx; cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { struct async_node *next_entry; /* * Grab the next list entry before we call the current * entry's callback. This is because the callback function * can delete its async callback entry. */ next_entry = SLIST_NEXT(cur_entry, links); if ((cur_entry->event_enable & async_code) != 0) { mtx = cur_entry->event_lock ? path->device->sim->mtx : NULL; if (mtx) mtx_lock(mtx); cur_entry->callback(cur_entry->callback_arg, async_code, path, async_arg); if (mtx) mtx_unlock(mtx); } cur_entry = next_entry; } } void xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) { union ccb *ccb; int size; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { xpt_print(path, "Can't allocate CCB to send %s\n", xpt_async_string(async_code)); return; } if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { xpt_print(path, "Can't allocate path to send %s\n", xpt_async_string(async_code)); xpt_free_ccb(ccb); return; } ccb->ccb_h.path->periph = NULL; ccb->ccb_h.func_code = XPT_ASYNC; ccb->ccb_h.cbfcnp = xpt_async_process; ccb->ccb_h.flags |= CAM_UNLOCKED; ccb->casync.async_code = async_code; ccb->casync.async_arg_size = 0; size = xpt_async_size(async_code); CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_async: func %#x %s aync_code %d %s\n", ccb->ccb_h.func_code, xpt_action_name(ccb->ccb_h.func_code), async_code, xpt_async_string(async_code))); if (size > 0 && async_arg != NULL) { ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); if (ccb->casync.async_arg_ptr == NULL) { xpt_print(path, "Can't allocate argument to send %s\n", xpt_async_string(async_code)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } memcpy(ccb->casync.async_arg_ptr, async_arg, size); ccb->casync.async_arg_size = size; } else if (size < 0) { ccb->casync.async_arg_ptr = async_arg; ccb->casync.async_arg_size = size; } if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) xpt_freeze_devq(path, 1); else xpt_freeze_simq(path->bus->sim, 1); xpt_done(ccb); } static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; printf("%s called\n", __func__); } static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count) { struct cam_devq *devq; uint32_t freeze; devq = dev->sim->devq; mtx_assert(&devq->send_mtx, MA_OWNED); CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_freeze_devq_device(%d) %u->%u\n", count, dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); freeze = (dev->ccbq.queue.qfrozen_cnt += count); /* Remove frozen device from sendq. */ if (device_is_queued(dev)) camq_remove(&devq->send_queue, dev->devq_entry.index); return (freeze); } u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count) { struct cam_ed *dev = path->device; struct cam_devq *devq; uint32_t freeze; devq = dev->sim->devq; mtx_lock(&devq->send_mtx); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); freeze = xpt_freeze_devq_device(dev, count); mtx_unlock(&devq->send_mtx); return (freeze); } u_int32_t xpt_freeze_simq(struct cam_sim *sim, u_int count) { struct cam_devq *devq; uint32_t freeze; devq = sim->devq; mtx_lock(&devq->send_mtx); freeze = (devq->send_queue.qfrozen_cnt += count); mtx_unlock(&devq->send_mtx); return (freeze); } static void xpt_release_devq_timeout(void *arg) { struct cam_ed *dev; struct cam_devq *devq; dev = (struct cam_ed *)arg; CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); devq = dev->sim->devq; mtx_assert(&devq->send_mtx, MA_OWNED); if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) xpt_run_devq(devq); } void xpt_release_devq(struct cam_path *path, u_int count, int run_queue) { struct cam_ed *dev; struct cam_devq *devq; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", count, run_queue)); dev = path->device; devq = dev->sim->devq; mtx_lock(&devq->send_mtx); if (xpt_release_devq_device(dev, count, run_queue)) xpt_run_devq(dev->sim->devq); mtx_unlock(&devq->send_mtx); } static int xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) { mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); if (count > dev->ccbq.queue.qfrozen_cnt) { #ifdef INVARIANTS printf("xpt_release_devq(): requested %u > present %u\n", count, dev->ccbq.queue.qfrozen_cnt); #endif count = dev->ccbq.queue.qfrozen_cnt; } dev->ccbq.queue.qfrozen_cnt -= count; if (dev->ccbq.queue.qfrozen_cnt == 0) { /* * No longer need to wait for a successful * command completion. */ dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; /* * Remove any timeouts that might be scheduled * to release this queue. */ if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { callout_stop(&dev->callout); dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; } /* * Now that we are unfrozen schedule the * device so any pending transactions are * run. */ xpt_schedule_devq(dev->sim->devq, dev); } else run_queue = 0; return (run_queue); } void xpt_release_simq(struct cam_sim *sim, int run_queue) { struct cam_devq *devq; devq = sim->devq; mtx_lock(&devq->send_mtx); if (devq->send_queue.qfrozen_cnt <= 0) { #ifdef INVARIANTS printf("xpt_release_simq: requested 1 > present %u\n", devq->send_queue.qfrozen_cnt); #endif } else devq->send_queue.qfrozen_cnt--; if (devq->send_queue.qfrozen_cnt == 0) { /* * If there is a timeout scheduled to release this * sim queue, remove it. The queue frozen count is * already at 0. */ if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ callout_stop(&sim->callout); sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; } if (run_queue) { /* * Now that we are unfrozen run the send queue. */ xpt_run_devq(sim->devq); } } mtx_unlock(&devq->send_mtx); } /* * XXX Appears to be unused. */ static void xpt_release_simq_timeout(void *arg) { struct cam_sim *sim; sim = (struct cam_sim *)arg; xpt_release_simq(sim, /* run_queue */ TRUE); } void xpt_done(union ccb *done_ccb) { struct cam_doneq *queue; int run, hash; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && done_ccb->csio.bio != NULL) biotrack(done_ccb->csio.bio, __func__); #endif CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done: func= %#x %s status %#x\n", done_ccb->ccb_h.func_code, xpt_action_name(done_ccb->ccb_h.func_code), done_ccb->ccb_h.status)); if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) return; /* Store the time the ccb was in the sim */ done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + done_ccb->ccb_h.target_lun) % cam_num_doneqs; queue = &cam_doneqs[hash]; mtx_lock(&queue->cam_doneq_mtx); run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; mtx_unlock(&queue->cam_doneq_mtx); if (run) wakeup(&queue->cam_doneq); } void xpt_done_direct(union ccb *done_ccb) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) return; /* Store the time the ccb was in the sim */ done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); xpt_done_process(&done_ccb->ccb_h); } union ccb * xpt_alloc_ccb() { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); return (new_ccb); } union ccb * xpt_alloc_ccb_nowait() { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); return (new_ccb); } void xpt_free_ccb(union ccb *free_ccb) { free(free_ccb, M_CAMCCB); } /* Private XPT functions */ /* * Get a CAM control block for the caller. Charge the structure to the device * referenced by the path. If we don't have sufficient resources to allocate * more ccbs, we return NULL. */ static union ccb * xpt_get_ccb_nowait(struct cam_periph *periph) { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); if (new_ccb == NULL) return (NULL); periph->periph_allocated++; cam_ccbq_take_opening(&periph->path->device->ccbq); return (new_ccb); } static union ccb * xpt_get_ccb(struct cam_periph *periph) { union ccb *new_ccb; cam_periph_unlock(periph); new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); cam_periph_lock(periph); periph->periph_allocated++; cam_ccbq_take_opening(&periph->path->device->ccbq); return (new_ccb); } union ccb * cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) { struct ccb_hdr *ccb_h; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); cam_periph_assert(periph, MA_OWNED); while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || ccb_h->pinfo.priority != priority) { if (priority < periph->immediate_priority) { periph->immediate_priority = priority; xpt_run_allocq(periph, 0); } else cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, "cgticb", 0); } SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); return ((union ccb *)ccb_h); } static void xpt_acquire_bus(struct cam_eb *bus) { xpt_lock_buses(); bus->refcount++; xpt_unlock_buses(); } static void xpt_release_bus(struct cam_eb *bus) { xpt_lock_buses(); KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); if (--bus->refcount > 0) { xpt_unlock_buses(); return; } TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); xsoftc.bus_generation++; xpt_unlock_buses(); KASSERT(TAILQ_EMPTY(&bus->et_entries), ("destroying bus, but target list is not empty")); cam_sim_release(bus->sim); mtx_destroy(&bus->eb_mtx); free(bus, M_CAMXPT); } static struct cam_et * xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *cur_target, *target; mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); mtx_assert(&bus->eb_mtx, MA_OWNED); target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT|M_ZERO); if (target == NULL) return (NULL); TAILQ_INIT(&target->ed_entries); target->bus = bus; target->target_id = target_id; target->refcount = 1; target->generation = 0; target->luns = NULL; mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); timevalclear(&target->last_reset); /* * Hold a reference to our parent bus so it * will not go away before we do. */ bus->refcount++; /* Insertion sort into our bus's target list */ cur_target = TAILQ_FIRST(&bus->et_entries); while (cur_target != NULL && cur_target->target_id < target_id) cur_target = TAILQ_NEXT(cur_target, links); if (cur_target != NULL) { TAILQ_INSERT_BEFORE(cur_target, target, links); } else { TAILQ_INSERT_TAIL(&bus->et_entries, target, links); } bus->generation++; return (target); } static void xpt_acquire_target(struct cam_et *target) { struct cam_eb *bus = target->bus; mtx_lock(&bus->eb_mtx); target->refcount++; mtx_unlock(&bus->eb_mtx); } static void xpt_release_target(struct cam_et *target) { struct cam_eb *bus = target->bus; mtx_lock(&bus->eb_mtx); if (--target->refcount > 0) { mtx_unlock(&bus->eb_mtx); return; } TAILQ_REMOVE(&bus->et_entries, target, links); bus->generation++; mtx_unlock(&bus->eb_mtx); KASSERT(TAILQ_EMPTY(&target->ed_entries), ("destroying target, but device list is not empty")); xpt_release_bus(bus); mtx_destroy(&target->luns_mtx); if (target->luns) free(target->luns, M_CAMXPT); free(target, M_CAMXPT); } static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); device->mintags = 1; device->maxtags = 1; return (device); } static void xpt_destroy_device(void *context, int pending) { struct cam_ed *device = context; mtx_lock(&device->device_mtx); mtx_destroy(&device->device_mtx); free(device, M_CAMDEV); } struct cam_ed * xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *cur_device, *device; struct cam_devq *devq; cam_status status; mtx_assert(&bus->eb_mtx, MA_OWNED); /* Make space for us in the device queue on our bus */ devq = bus->sim->devq; mtx_lock(&devq->send_mtx); status = cam_devq_resize(devq, devq->send_queue.array_size + 1); mtx_unlock(&devq->send_mtx); if (status != CAM_REQ_CMP) return (NULL); device = (struct cam_ed *)malloc(sizeof(*device), M_CAMDEV, M_NOWAIT|M_ZERO); if (device == NULL) return (NULL); cam_init_pinfo(&device->devq_entry); device->target = target; device->lun_id = lun_id; device->sim = bus->sim; if (cam_ccbq_init(&device->ccbq, bus->sim->max_dev_openings) != 0) { free(device, M_CAMDEV); return (NULL); } SLIST_INIT(&device->asyncs); SLIST_INIT(&device->periphs); device->generation = 0; device->flags = CAM_DEV_UNCONFIGURED; device->tag_delay_count = 0; device->tag_saved_openings = 0; device->refcount = 1; mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); callout_init_mtx(&device->callout, &devq->send_mtx, 0); TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); /* * Hold a reference to our parent bus so it * will not go away before we do. */ target->refcount++; cur_device = TAILQ_FIRST(&target->ed_entries); while (cur_device != NULL && cur_device->lun_id < lun_id) cur_device = TAILQ_NEXT(cur_device, links); if (cur_device != NULL) TAILQ_INSERT_BEFORE(cur_device, device, links); else TAILQ_INSERT_TAIL(&target->ed_entries, device, links); target->generation++; return (device); } void xpt_acquire_device(struct cam_ed *device) { struct cam_eb *bus = device->target->bus; mtx_lock(&bus->eb_mtx); device->refcount++; mtx_unlock(&bus->eb_mtx); } void xpt_release_device(struct cam_ed *device) { struct cam_eb *bus = device->target->bus; struct cam_devq *devq; mtx_lock(&bus->eb_mtx); if (--device->refcount > 0) { mtx_unlock(&bus->eb_mtx); return; } TAILQ_REMOVE(&device->target->ed_entries, device,links); device->target->generation++; mtx_unlock(&bus->eb_mtx); /* Release our slot in the devq */ devq = bus->sim->devq; mtx_lock(&devq->send_mtx); cam_devq_resize(devq, devq->send_queue.array_size - 1); mtx_unlock(&devq->send_mtx); KASSERT(SLIST_EMPTY(&device->periphs), ("destroying device, but periphs list is not empty")); KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, ("destroying device while still queued for ccbs")); if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) callout_stop(&device->callout); xpt_release_target(device->target); cam_ccbq_fini(&device->ccbq); /* * Free allocated memory. free(9) does nothing if the * supplied pointer is NULL, so it is safe to call without * checking. */ free(device->supported_vpds, M_CAMXPT); free(device->device_id, M_CAMXPT); free(device->ext_inq, M_CAMXPT); free(device->physpath, M_CAMXPT); free(device->rcap_buf, M_CAMXPT); free(device->serial_num, M_CAMXPT); taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); } u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) { int result; struct cam_ed *dev; dev = path->device; mtx_lock(&dev->sim->devq->send_mtx); result = cam_ccbq_resize(&dev->ccbq, newopenings); mtx_unlock(&dev->sim->devq->send_mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (dev->inq_flags & SID_CmdQue) != 0) dev->tag_saved_openings = newopenings; return (result); } static struct cam_eb * xpt_find_bus(path_id_t path_id) { struct cam_eb *bus; xpt_lock_buses(); for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); bus != NULL; bus = TAILQ_NEXT(bus, links)) { if (bus->path_id == path_id) { bus->refcount++; break; } } xpt_unlock_buses(); return (bus); } static struct cam_et * xpt_find_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *target; mtx_assert(&bus->eb_mtx, MA_OWNED); for (target = TAILQ_FIRST(&bus->et_entries); target != NULL; target = TAILQ_NEXT(target, links)) { if (target->target_id == target_id) { target->refcount++; break; } } return (target); } static struct cam_ed * xpt_find_device(struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; mtx_assert(&target->bus->eb_mtx, MA_OWNED); for (device = TAILQ_FIRST(&target->ed_entries); device != NULL; device = TAILQ_NEXT(device, links)) { if (device->lun_id == lun_id) { device->refcount++; break; } } return (device); } void xpt_start_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; int newopenings; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; xpt_freeze_devq(path, /*count*/1); device->inq_flags |= SID_CmdQue; if (device->tag_saved_openings != 0) newopenings = device->tag_saved_openings; else newopenings = min(device->maxtags, sim->max_tagged_dev_openings); xpt_dev_ccbq_resize(path, newopenings); xpt_async(AC_GETDEV_CHANGED, path, NULL); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } void xpt_stop_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; device->tag_delay_count = 0; xpt_freeze_devq(path, /*count*/1); device->inq_flags &= ~SID_CmdQue; xpt_dev_ccbq_resize(path, sim->max_dev_openings); xpt_async(AC_GETDEV_CHANGED, path, NULL); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } static void xpt_boot_delay(void *arg) { xpt_release_boot(); } static void xpt_config(void *arg) { /* * Now that interrupts are enabled, go find our devices */ if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) printf("xpt_config: failed to create taskqueue thread.\n"); /* Setup debugging path */ if (cam_dflags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, NULL, CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN) != CAM_REQ_CMP) { printf("xpt_config: xpt_create_path() failed for debug" " target %d:%d:%d, debugging disabled\n", CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); cam_dflags = CAM_DEBUG_NONE; } } else cam_dpath = NULL; periphdriver_init(1); xpt_hold_boot(); callout_init(&xsoftc.boot_callout, 1); callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0, xpt_boot_delay, NULL, 0); /* Fire up rescan thread. */ if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, "cam", "scanner")) { printf("xpt_config: failed to create rescan thread.\n"); } } void xpt_hold_boot(void) { xpt_lock_buses(); xsoftc.buses_to_config++; xpt_unlock_buses(); } void xpt_release_boot(void) { xpt_lock_buses(); xsoftc.buses_to_config--; if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) { struct xpt_task *task; xsoftc.buses_config_done = 1; xpt_unlock_buses(); /* Call manually because we don't have any buses */ task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); if (task != NULL) { TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); taskqueue_enqueue(taskqueue_thread, &task->task); } } else xpt_unlock_buses(); } /* * If the given device only has one peripheral attached to it, and if that * peripheral is the passthrough driver, announce it. This insures that the * user sees some sort of announcement for every peripheral in their system. */ static int xptpassannouncefunc(struct cam_ed *device, void *arg) { struct cam_periph *periph; int i; for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++); periph = SLIST_FIRST(&device->periphs); if ((i == 1) && (strncmp(periph->periph_name, "pass", 4) == 0)) xpt_announce_periph(periph, NULL); return(1); } static void xpt_finishconfig_task(void *context, int pending) { periphdriver_init(2); /* * Check for devices with no "standard" peripheral driver * attached. For any devices like that, announce the * passthrough driver so the user will see something. */ if (!bootverbose) xpt_for_all_devices(xptpassannouncefunc, NULL); /* Release our hook so that the boot can continue. */ config_intrhook_disestablish(xsoftc.xpt_config_hook); free(xsoftc.xpt_config_hook, M_CAMXPT); xsoftc.xpt_config_hook = NULL; free(context, M_CAMXPT); } cam_status xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, struct cam_path *path) { struct ccb_setasync csa; cam_status status; int xptpath = 0; if (path == NULL) { status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return (status); xpt_path_lock(path); xptpath = 1; } xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = event; csa.callback = cbfunc; csa.callback_arg = cbarg; xpt_action((union ccb *)&csa); status = csa.ccb_h.status; CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, ("xpt_register_async: func %p\n", cbfunc)); if (xptpath) { xpt_path_unlock(path); xpt_free_path(path); } if ((status == CAM_REQ_CMP) && (csa.event_enable & AC_FOUND_DEVICE)) { /* * Get this peripheral up to date with all * the currently existing devices. */ xpt_for_all_devices(xptsetasyncfunc, &csa); } if ((status == CAM_REQ_CMP) && (csa.event_enable & AC_PATH_REGISTERED)) { /* * Get this peripheral up to date with all * the currently existing buses. */ xpt_for_all_busses(xptsetasyncbusfunc, &csa); } return (status); } static void xptaction(struct cam_sim *sim, union ccb *work_ccb) { CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); switch (work_ccb->ccb_h.func_code) { /* Common cases first */ case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi; cpi = &work_ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "", HBA_IDLEN); strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 0; cpi->protocol = PROTO_UNSPECIFIED; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->transport = XPORT_UNSPECIFIED; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(work_ccb); break; } default: work_ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(work_ccb); break; } } /* * The xpt as a "controller" has no interrupt sources, so polling * is a no-op. */ static void xptpoll(struct cam_sim *sim) { } void xpt_lock_buses(void) { mtx_lock(&xsoftc.xpt_topo_lock); } void xpt_unlock_buses(void) { mtx_unlock(&xsoftc.xpt_topo_lock); } struct mtx * xpt_path_mtx(struct cam_path *path) { return (&path->device->device_mtx); } static void xpt_done_process(struct ccb_hdr *ccb_h) { struct cam_sim *sim; struct cam_devq *devq; struct mtx *mtx = NULL; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) struct ccb_scsiio *csio; if (ccb_h->func_code == XPT_SCSI_IO) { csio = &((union ccb *)ccb_h)->csio; if (csio->bio != NULL) biotrack(csio->bio, __func__); } #endif if (ccb_h->flags & CAM_HIGH_POWER) { struct highpowerlist *hphead; struct cam_ed *device; mtx_lock(&xsoftc.xpt_highpower_lock); hphead = &xsoftc.highpowerq; device = STAILQ_FIRST(hphead); /* * Increment the count since this command is done. */ xsoftc.num_highpower++; /* * Any high powered commands queued up? */ if (device != NULL) { STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); mtx_unlock(&xsoftc.xpt_highpower_lock); mtx_lock(&device->sim->devq->send_mtx); xpt_release_devq_device(device, /*count*/1, /*runqueue*/TRUE); mtx_unlock(&device->sim->devq->send_mtx); } else mtx_unlock(&xsoftc.xpt_highpower_lock); } sim = ccb_h->path->bus->sim; if (ccb_h->status & CAM_RELEASE_SIMQ) { xpt_release_simq(sim, /*run_queue*/FALSE); ccb_h->status &= ~CAM_RELEASE_SIMQ; } if ((ccb_h->flags & CAM_DEV_QFRZDIS) && (ccb_h->status & CAM_DEV_QFRZN)) { xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); ccb_h->status &= ~CAM_DEV_QFRZN; } devq = sim->devq; if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { struct cam_ed *dev = ccb_h->path->device; mtx_lock(&devq->send_mtx); devq->send_active--; devq->send_openings++; cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 && (dev->ccbq.dev_active == 0))) { dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; xpt_release_devq_device(dev, /*count*/1, /*run_queue*/FALSE); } if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; xpt_release_devq_device(dev, /*count*/1, /*run_queue*/FALSE); } if (!device_is_queued(dev)) (void)xpt_schedule_devq(devq, dev); xpt_run_devq(devq); mtx_unlock(&devq->send_mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { mtx = xpt_path_mtx(ccb_h->path); mtx_lock(mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 && (--dev->tag_delay_count == 0)) xpt_start_tags(ccb_h->path); } } if ((ccb_h->flags & CAM_UNLOCKED) == 0) { if (mtx == NULL) { mtx = xpt_path_mtx(ccb_h->path); mtx_lock(mtx); } } else { if (mtx != NULL) { mtx_unlock(mtx); mtx = NULL; } } /* Call the peripheral driver's callback */ ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); if (mtx != NULL) mtx_unlock(mtx); } void xpt_done_td(void *arg) { struct cam_doneq *queue = arg; struct ccb_hdr *ccb_h; STAILQ_HEAD(, ccb_hdr) doneq; STAILQ_INIT(&doneq); mtx_lock(&queue->cam_doneq_mtx); while (1) { while (STAILQ_EMPTY(&queue->cam_doneq)) { queue->cam_doneq_sleep = 1; msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, PRIBIO, "-", 0); queue->cam_doneq_sleep = 0; } STAILQ_CONCAT(&doneq, &queue->cam_doneq); mtx_unlock(&queue->cam_doneq_mtx); THREAD_NO_SLEEPING(); while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); xpt_done_process(ccb_h); } THREAD_SLEEPING_OK(); mtx_lock(&queue->cam_doneq_mtx); } } static void camisr_runqueue(void) { struct ccb_hdr *ccb_h; struct cam_doneq *queue; int i; /* Process global queues. */ for (i = 0; i < cam_num_doneqs; i++) { queue = &cam_doneqs[i]; mtx_lock(&queue->cam_doneq_mtx); while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); mtx_unlock(&queue->cam_doneq_mtx); xpt_done_process(ccb_h); mtx_lock(&queue->cam_doneq_mtx); } mtx_unlock(&queue->cam_doneq_mtx); } } struct kv { uint32_t v; const char *name; }; static struct kv map[] = { { XPT_NOOP, "XPT_NOOP" }, { XPT_SCSI_IO, "XPT_SCSI_IO" }, { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, { XPT_GDEVLIST, "XPT_GDEVLIST" }, { XPT_PATH_INQ, "XPT_PATH_INQ" }, { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, { XPT_DEBUG, "XPT_DEBUG" }, { XPT_PATH_STATS, "XPT_PATH_STATS" }, { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, { XPT_ASYNC, "XPT_ASYNC" }, { XPT_ABORT, "XPT_ABORT" }, { XPT_RESET_BUS, "XPT_RESET_BUS" }, { XPT_RESET_DEV, "XPT_RESET_DEV" }, { XPT_TERM_IO, "XPT_TERM_IO" }, { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, { XPT_ATA_IO, "XPT_ATA_IO" }, { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, { XPT_NVME_IO, "XPT_NVME_IO" }, { XPT_MMC_IO, "XPT_MMC_IO" }, { XPT_SMP_IO, "XPT_SMP_IO" }, { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" }, { XPT_ENG_INQ, "XPT_ENG_INQ" }, { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, { XPT_EN_LUN, "XPT_EN_LUN" }, { XPT_TARGET_IO, "XPT_TARGET_IO" }, { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, { 0, 0 } }; const char * xpt_action_name(uint32_t action) { static char buffer[32]; /* Only for unknown messages -- racy */ struct kv *walker = map; while (walker->name != NULL) { if (walker->v == action) return (walker->name); walker++; } snprintf(buffer, sizeof(buffer), "%#x", action); return (buffer); } Index: head/sys/cam/cam_xpt.h =================================================================== --- head/sys/cam/cam_xpt.h (revision 326264) +++ head/sys/cam/cam_xpt.h (revision 326265) @@ -1,148 +1,150 @@ /*- * Data structures and definitions for dealing with the * Common Access Method Transport (xpt) layer. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_XPT_H #define _CAM_CAM_XPT_H 1 #ifdef _KERNEL #include #endif /* Forward Declarations */ union ccb; struct cam_periph; struct cam_ed; struct cam_sim; struct sbuf; /* * Definition of a CAM path. Paths are created from bus, target, and lun ids * via xpt_create_path and allow for reference to devices without recurring * lookups in the edt. */ struct cam_path; /* Path functions */ #ifdef _KERNEL /* * Definition of an async handler callback block. These are used to add * SIMs and peripherals to the async callback lists. */ struct async_node { SLIST_ENTRY(async_node) links; u_int32_t event_enable; /* Async Event enables */ u_int32_t event_lock; /* Take SIM lock for handlers. */ void (*callback)(void *arg, u_int32_t code, struct cam_path *path, void *args); void *callback_arg; }; SLIST_HEAD(async_list, async_node); SLIST_HEAD(periph_list, cam_periph); void xpt_action(union ccb *new_ccb); void xpt_action_default(union ccb *new_ccb); union ccb *xpt_alloc_ccb(void); union ccb *xpt_alloc_ccb_nowait(void); void xpt_free_ccb(union ccb *free_ccb); void xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority, u_int32_t flags); void xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority); void xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb); cam_status xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id); cam_status xpt_create_path_unlocked(struct cam_path **new_path_ptr, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id); int xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path); void xpt_free_path(struct cam_path *path); void xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref); int xpt_path_comp(struct cam_path *path1, struct cam_path *path2); int xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev); void xpt_print_path(struct cam_path *path); void xpt_print_device(struct cam_ed *device); void xpt_print(struct cam_path *path, const char *fmt, ...); int xpt_path_string(struct cam_path *path, char *str, size_t str_len); int xpt_path_sbuf(struct cam_path *path, struct sbuf *sb); path_id_t xpt_path_path_id(struct cam_path *path); target_id_t xpt_path_target_id(struct cam_path *path); lun_id_t xpt_path_lun_id(struct cam_path *path); struct cam_sim *xpt_path_sim(struct cam_path *path); struct cam_periph *xpt_path_periph(struct cam_path *path); void xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg); void xpt_rescan(union ccb *ccb); void xpt_hold_boot(void); void xpt_release_boot(void); void xpt_lock_buses(void); void xpt_unlock_buses(void); struct mtx * xpt_path_mtx(struct cam_path *path); #define xpt_path_lock(path) mtx_lock(xpt_path_mtx(path)) #define xpt_path_unlock(path) mtx_unlock(xpt_path_mtx(path)) #define xpt_path_assert(path, what) mtx_assert(xpt_path_mtx(path), (what)) #define xpt_path_owned(path) mtx_owned(xpt_path_mtx(path)) #define xpt_path_sleep(path, chan, priority, wmesg, timo) \ msleep((chan), xpt_path_mtx(path), (priority), (wmesg), (timo)) cam_status xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, struct cam_path *path); cam_status xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id); cam_status xpt_clone_path(struct cam_path **new_path, struct cam_path *path); void xpt_copy_path(struct cam_path *new_path, struct cam_path *path); void xpt_release_path(struct cam_path *path); const char * xpt_action_name(uint32_t action); #endif /* _KERNEL */ #endif /* _CAM_CAM_XPT_H */ Index: head/sys/cam/cam_xpt_internal.h =================================================================== --- head/sys/cam/cam_xpt_internal.h (revision 326264) +++ head/sys/cam/cam_xpt_internal.h (revision 326265) @@ -1,217 +1,219 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright 2009 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_XPT_INTERNAL_H #define _CAM_CAM_XPT_INTERNAL_H 1 #include /* Forward Declarations */ struct cam_eb; struct cam_et; struct cam_ed; typedef struct cam_ed * (*xpt_alloc_device_func)(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); typedef void (*xpt_release_device_func)(struct cam_ed *device); typedef void (*xpt_action_func)(union ccb *start_ccb); typedef void (*xpt_dev_async_func)(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); typedef void (*xpt_announce_periph_func)(struct cam_periph *periph); typedef void (*xpt_announce_periph_sbuf_func)(struct cam_periph *periph, struct sbuf *sbuf); struct xpt_xport_ops { xpt_alloc_device_func alloc_device; xpt_release_device_func reldev; xpt_action_func action; xpt_dev_async_func async; xpt_announce_periph_func announce; xpt_announce_periph_sbuf_func announce_sbuf; }; struct xpt_xport { cam_xport xport; const char *name; struct xpt_xport_ops *ops; }; SET_DECLARE(cam_xpt_xport_set, struct xpt_xport); #define CAM_XPT_XPORT(data) \ DATA_SET(cam_xpt_xport_set, data) typedef void (*xpt_proto_announce_func)(struct cam_ed *); typedef void (*xpt_proto_announce_sbuf_func)(struct cam_ed *, struct sbuf *); typedef void (*xpt_proto_debug_out_func)(union ccb *); struct xpt_proto_ops { xpt_proto_announce_func announce; xpt_proto_announce_sbuf_func announce_sbuf; xpt_proto_announce_func denounce; xpt_proto_announce_sbuf_func denounce_sbuf; xpt_proto_debug_out_func debug_out; }; struct xpt_proto { cam_proto proto; const char *name; struct xpt_proto_ops *ops; }; SET_DECLARE(cam_xpt_proto_set, struct xpt_proto); #define CAM_XPT_PROTO(data) \ DATA_SET(cam_xpt_proto_set, data) /* * The CAM EDT (Existing Device Table) contains the device information for * all devices for all buses in the system. The table contains a * cam_ed structure for each device on the bus. */ struct cam_ed { cam_pinfo devq_entry; TAILQ_ENTRY(cam_ed) links; struct cam_et *target; struct cam_sim *sim; lun_id_t lun_id; struct cam_ccbq ccbq; /* Queue of pending ccbs */ struct async_list asyncs; /* Async callback info for this B/T/L */ struct periph_list periphs; /* All attached devices */ u_int generation; /* Generation number */ void *quirk; /* Oddities about this device */ u_int maxtags; u_int mintags; cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; struct scsi_inquiry_data inq_data; uint8_t *supported_vpds; uint8_t supported_vpds_len; uint32_t device_id_len; uint8_t *device_id; uint32_t ext_inq_len; uint8_t *ext_inq; uint8_t physpath_len; uint8_t *physpath; /* physical path string form */ uint32_t rcap_len; uint8_t *rcap_buf; struct ata_params ident_data; struct mmc_params mmc_ident_data; u_int8_t inq_flags; /* * Current settings for inquiry flags. * This allows us to override settings * like disconnection and tagged * queuing for a device. */ u_int8_t queue_flags; /* Queue flags from the control page */ u_int8_t serial_num_len; u_int8_t *serial_num; u_int32_t flags; #define CAM_DEV_UNCONFIGURED 0x01 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02 #define CAM_DEV_REL_ON_COMPLETE 0x04 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08 #define CAM_DEV_TAG_AFTER_COUNT 0x20 #define CAM_DEV_INQUIRY_DATA_VALID 0x40 #define CAM_DEV_IN_DV 0x80 #define CAM_DEV_DV_HIT_BOTTOM 0x100 #define CAM_DEV_IDENTIFY_DATA_VALID 0x200 u_int32_t tag_delay_count; #define CAM_TAG_DELAY_COUNT 5 u_int32_t tag_saved_openings; u_int32_t refcount; struct callout callout; STAILQ_ENTRY(cam_ed) highpowerq_entry; struct mtx device_mtx; struct task device_destroy_task; const struct nvme_controller_data *nvme_cdata; const struct nvme_namespace_data *nvme_data; }; /* * Each target is represented by an ET (Existing Target). These * entries are created when a target is successfully probed with an * identify, and removed when a device fails to respond after a number * of retries, or a bus rescan finds the device missing. */ struct cam_et { TAILQ_HEAD(, cam_ed) ed_entries; TAILQ_ENTRY(cam_et) links; struct cam_eb *bus; target_id_t target_id; u_int32_t refcount; u_int generation; struct timeval last_reset; u_int rpl_size; struct scsi_report_luns_data *luns; struct mtx luns_mtx; /* Protection for luns field. */ }; /* * Each bus is represented by an EB (Existing Bus). These entries * are created by calls to xpt_bus_register and deleted by calls to * xpt_bus_deregister. */ struct cam_eb { TAILQ_HEAD(, cam_et) et_entries; TAILQ_ENTRY(cam_eb) links; path_id_t path_id; struct cam_sim *sim; struct timeval last_reset; u_int32_t flags; #define CAM_EB_RUNQ_SCHEDULED 0x01 u_int32_t refcount; u_int generation; device_t parent_dev; struct xpt_xport *xport; struct mtx eb_mtx; /* Bus topology mutex. */ }; struct cam_path { struct cam_periph *periph; struct cam_eb *bus; struct cam_et *target; struct cam_ed *device; }; struct cam_ed * xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); void xpt_acquire_device(struct cam_ed *device); void xpt_release_device(struct cam_ed *device); u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings); void xpt_start_tags(struct cam_path *path); void xpt_stop_tags(struct cam_path *path); MALLOC_DECLARE(M_CAMXPT); #endif Index: head/sys/cam/cam_xpt_periph.h =================================================================== --- head/sys/cam/cam_xpt_periph.h (revision 326264) +++ head/sys/cam/cam_xpt_periph.h (revision 326265) @@ -1,60 +1,62 @@ /*- * Data structures and definitions for dealing with the * Common Access Method Transport (xpt) layer from peripheral * drivers. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_XPT_PERIPH_H #define _CAM_CAM_XPT_PERIPH_H 1 #include #include /* Functions accessed by the peripheral drivers */ #ifdef _KERNEL void xpt_polled_action(union ccb *ccb); void xpt_release_ccb(union ccb *released_ccb); void xpt_schedule(struct cam_periph *perph, u_int32_t new_priority); int32_t xpt_add_periph(struct cam_periph *periph); void xpt_remove_periph(struct cam_periph *periph); void xpt_announce_periph(struct cam_periph *periph, char *announce_string); void xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb, char *announce_string); void xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string); void xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb, int quirks, char *bit_string); void xpt_denounce_periph(struct cam_periph *periph); void xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb); #endif #endif /* _CAM_CAM_XPT_PERIPH_H */ Index: head/sys/cam/cam_xpt_sim.h =================================================================== --- head/sys/cam/cam_xpt_sim.h (revision 326264) +++ head/sys/cam/cam_xpt_sim.h (revision 326265) @@ -1,53 +1,55 @@ /*- * Data structures and definitions for dealing with the * Common Access Method Transport (xpt) layer. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_XPT_SIM_H #define _CAM_CAM_XPT_SIM_H 1 #include #include /* Functions accessed by SIM drivers */ #ifdef _KERNEL int32_t xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus); int32_t xpt_bus_deregister(path_id_t path_id); u_int32_t xpt_freeze_simq(struct cam_sim *sim, u_int count); void xpt_release_simq(struct cam_sim *sim, int run_queue); u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count); void xpt_release_devq(struct cam_path *path, u_int count, int run_queue); void xpt_done(union ccb *done_ccb); void xpt_done_direct(union ccb *done_ccb); #endif #endif /* _CAM_CAM_XPT_SIM_H */ Index: head/sys/cam/ctl/ctl.c =================================================================== --- head/sys/cam/ctl/ctl.c (revision 326264) +++ head/sys/cam/ctl/ctl.c (revision 326265) @@ -1,13590 +1,13592 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003-2009 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id$ */ /* * CAM Target Layer, a SCSI device emulation subsystem. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctl_softc *control_softc = NULL; /* * Template mode pages. */ /* * Note that these are default values only. The actual values will be * filled in when the user does a mode sense. */ const static struct scsi_da_rw_recovery_page rw_er_page_default = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/SMS_RWER_LBPERE, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/SMS_RWER_PER, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/SMS_RWER_LBPERE, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_format_page format_page_default = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ SFP_HSEC, /*reserved*/ {0, 0, 0} }; const static struct scsi_format_page format_page_changeable = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {0, 0}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ 0, /*reserved*/ {0, 0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_default = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ CTL_DEFAULT_HEADS, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ SRDP_RPL_DISABLED, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, CTL_DEFAULT_ROTATION_RATE & 0xff}, /*reserved2*/ {0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ 0, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ 0, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {0, 0}, /*reserved2*/ {0, 0} }; const static struct scsi_da_verify_recovery_page verify_er_page_default = { /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, /*byte3*/0, /*read_retry_count*/0, /*reserved*/{ 0, 0, 0, 0, 0, 0 }, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, /*byte3*/SMS_VER_PER, /*read_retry_count*/0, /*reserved*/{ 0, 0, 0, 0, 0, 0 }, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_caching_page caching_page_default = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_DISC | SCP_WCE, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0xff, 0xff}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0xff, 0xff}, /*max_pf_ceiling*/ {0xff, 0xff}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_caching_page caching_page_changeable = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_WCE | SCP_RCD, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0, 0}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0, 0}, /*max_pf_ceiling*/ {0, 0}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_control_page control_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/0, /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, /*eca_and_aen*/0, /*flags4*/SCP_TAS, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; const static struct scsi_control_page control_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/SCP_DSENSE, /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, /*eca_and_aen*/SCP_SWP, /*flags4*/0, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) const static struct scsi_control_ext_page control_ext_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, /*subpage_code*/0x01, /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, /*flags*/0, /*prio*/0, /*max_sense*/0 }; const static struct scsi_control_ext_page control_ext_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, /*subpage_code*/0x01, /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, /*flags*/0, /*prio*/0, /*max_sense*/0xff }; const static struct scsi_info_exceptions_page ie_page_default = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/SIEP_FLAGS_EWASC, /*mrie*/SIEP_MRIE_NO, /*interval_timer*/{0, 0, 0, 0}, /*report_count*/{0, 0, 0, 1} }; const static struct scsi_info_exceptions_page ie_page_changeable = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | SIEP_FLAGS_LOGERR, /*mrie*/0x0f, /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, /*report_count*/{0xff, 0xff, 0xff, 0xff} }; #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/0, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0x01, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0x02, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf1, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf2, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/SLBPP_SITUA, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct scsi_cddvd_capabilities_page cddvd_page_default = { /*page_code*/SMS_CDDVD_CAPS_PAGE, /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, /*caps1*/0x3f, /*caps2*/0x00, /*caps3*/0xf0, /*caps4*/0x00, /*caps5*/0x29, /*caps6*/0x00, /*obsolete*/{0, 0}, /*nvol_levels*/{0, 0}, /*buffer_size*/{8, 0}, /*obsolete2*/{0, 0}, /*reserved*/0, /*digital*/0, /*obsolete3*/0, /*copy_management*/0, /*reserved2*/0, /*rotation_control*/0, /*cur_write_speed*/0, /*num_speed_descr*/0, }; const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { /*page_code*/SMS_CDDVD_CAPS_PAGE, /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, /*caps1*/0, /*caps2*/0, /*caps3*/0, /*caps4*/0, /*caps5*/0, /*caps6*/0, /*obsolete*/{0, 0}, /*nvol_levels*/{0, 0}, /*buffer_size*/{0, 0}, /*obsolete2*/{0, 0}, /*reserved*/0, /*digital*/0, /*obsolete3*/0, /*copy_management*/0, /*reserved2*/0, /*rotation_control*/0, /*cur_write_speed*/0, /*num_speed_descr*/0, }; SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); static int worker_threads = -1; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, &worker_threads, 1, "Number of worker threads"); static int ctl_debug = CTL_DEBUG_NONE; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, &ctl_debug, 0, "Enabled debug flags"); static int ctl_lun_map_size = 1024; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); #ifdef CTL_TIME_IO static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, &ctl_time_io_secs, 0, "Log requests taking more seconds"); #endif /* * Maximum number of LUNs we support. MUST be a power of 2. */ #define CTL_DEFAULT_MAX_LUNS 1024 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); /* * Maximum number of ports registered at one time. */ #define CTL_DEFAULT_MAX_PORTS 256 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); /* * Maximum number of initiators we support. */ #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) /* * Supported pages (0x00), Serial number (0x80), Device ID (0x83), * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) */ #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, int param); static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); static int ctl_init(void); static int ctl_shutdown(void); static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries); static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, struct ctl_be_lun *be_lun); static int ctl_free_lun(struct ctl_lun *lun); static void ctl_create_lun(struct ctl_be_lun *be_lun); static int ctl_do_mode_select(union ctl_io *io); static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param); static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg); static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); static int ctl_inquiry_std(struct ctl_scsiio *ctsio); static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq); static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *ooa_io); static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *starting_io); static int ctl_check_blocked(struct ctl_lun *lun); static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio); static void ctl_failover_lun(union ctl_io *io); static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio); static int ctl_scsiio(struct ctl_scsiio *ctsio); static int ctl_target_reset(union ctl_io *io); static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type); static int ctl_lun_reset(union ctl_io *io); static int ctl_abort_task(union ctl_io *io); static int ctl_abort_task_set(union ctl_io *io); static int ctl_query_task(union ctl_io *io, int task_set); static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, ctl_ua_type ua_type); static int ctl_i_t_nexus_reset(union ctl_io *io); static int ctl_query_async_event(union ctl_io *io); static void ctl_run_task(union ctl_io *io); #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg); static void ctl_done_timer_wakeup(void *arg); #endif /* CTL_IO_DELAY */ static void ctl_send_datamove_done(union ctl_io *io, int have_lock); static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); static void ctl_datamove_remote_write(union ctl_io *io); static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_sgl_setup(union ctl_io *io); static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback); static void ctl_datamove_remote_read(union ctl_io *io); static void ctl_datamove_remote(union ctl_io *io); static void ctl_process_done(union ctl_io *io); static void ctl_lun_thread(void *arg); static void ctl_thresh_thread(void *arg); static void ctl_work_thread(void *arg); static void ctl_enqueue_incoming(union ctl_io *io); static void ctl_enqueue_rtr(union ctl_io *io); static void ctl_enqueue_done(union ctl_io *io); static void ctl_enqueue_isc(union ctl_io *io); static const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); static const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio); static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry); static int ctl_ha_init(void); static int ctl_ha_shutdown(void); static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); /* * Load the serialization table. This isn't very pretty, but is probably * the easiest way to do it. */ #include "ctl_ser_table.c" /* * We only need to define open, close and ioctl routines for this driver. */ static struct cdevsw ctl_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ctl_open, .d_close = ctl_close, .d_ioctl = ctl_ioctl, .d_name = "ctl", }; MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t ctl_moduledata = { "ctl", ctl_module_event_handler, NULL }; DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); MODULE_VERSION(ctl, 1); static struct ctl_frontend ha_frontend = { .name = "ha", .init = ctl_ha_init, .shutdown = ctl_ha_shutdown, }; static int ctl_ha_init(void) { struct ctl_softc *softc = control_softc; if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, &softc->othersc_pool) != 0) return (ENOMEM); if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { ctl_pool_free(softc->othersc_pool); return (EIO); } if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) != CTL_HA_STATUS_SUCCESS) { ctl_ha_msg_destroy(softc); ctl_pool_free(softc->othersc_pool); return (EIO); } return (0); }; static int ctl_ha_shutdown(void) { struct ctl_softc *softc = control_softc; struct ctl_port *port; ctl_ha_msg_shutdown(softc); if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) return (EIO); if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) return (EIO); ctl_pool_free(softc->othersc_pool); while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { ctl_port_deregister(port); free(port->port_name, M_CTL); free(port, M_CTL); } return (0); }; static void ctl_ha_datamove(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_sg_entry *sgl; union ctl_ha_msg msg; uint32_t sg_entries_sent; int do_sg_copy, i, j; memset(&msg.dt, 0, sizeof(msg.dt)); msg.hdr.msg_type = CTL_MSG_DATAMOVE; msg.hdr.original_sc = io->io_hdr.original_sc; msg.hdr.serializing_sc = io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.dt.flags = io->io_hdr.flags; /* * We convert everything into a S/G list here. We can't * pass by reference, only by value between controllers. * So we can't pass a pointer to the S/G list, only as many * S/G entries as we can fit in here. If it's possible for * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, * then we need to break this up into multiple transfers. */ if (io->scsiio.kern_sg_entries == 0) { msg.dt.kern_sg_entries = 1; #if 0 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; } else { /* XXX KDM use busdma here! */ msg.dt.sg_list[0].addr = (void *)vtophys(io->scsiio.kern_data_ptr); } #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; #endif msg.dt.sg_list[0].len = io->scsiio.kern_data_len; do_sg_copy = 0; } else { msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; do_sg_copy = 1; } msg.dt.kern_data_len = io->scsiio.kern_data_len; msg.dt.kern_total_len = io->scsiio.kern_total_len; msg.dt.kern_data_resid = io->scsiio.kern_data_resid; msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; msg.dt.sg_sequence = 0; /* * Loop until we've sent all of the S/G entries. On the * other end, we'll recompose these S/G entries into one * contiguous list before processing. */ for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / sizeof(msg.dt.sg_list[0])), msg.dt.kern_sg_entries - sg_entries_sent); if (do_sg_copy != 0) { sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; for (i = sg_entries_sent, j = 0; i < msg.dt.cur_sg_entries; i++, j++) { #if 0 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[j].addr = sgl[i].addr; } else { /* XXX KDM use busdma here! */ msg.dt.sg_list[j].addr = (void *)vtophys(sgl[i].addr); } #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); msg.dt.sg_list[j].addr = sgl[i].addr; #endif msg.dt.sg_list[j].len = sgl[i].len; } } sg_entries_sent += msg.dt.cur_sg_entries; msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.dt) - sizeof(msg.dt.sg_list) + sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, M_WAITOK) > CTL_HA_STATUS_SUCCESS) { io->io_hdr.port_status = 31341; io->scsiio.be_move_done(io); return; } msg.dt.sent_sg_entries = sg_entries_sent; } /* * Officially handover the request from us to peer. * If failover has just happened, then we must return error. * If failover happen just after, then it is not our problem. */ if (lun) mtx_lock(&lun->lun_lock); if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { if (lun) mtx_unlock(&lun->lun_lock); io->io_hdr.port_status = 31342; io->scsiio.be_move_done(io); return; } io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; if (lun) mtx_unlock(&lun->lun_lock); } static void ctl_ha_done(union ctl_io *io) { union ctl_ha_msg msg; if (io->io_hdr.io_type == CTL_IO_SCSI) { memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.original_sc = io->io_hdr.original_sc; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.scsi_status = io->scsiio.scsi_status; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.sense_len = io->scsiio.sense_len; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, io->scsiio.sense_len); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + msg.scsi.sense_len, M_WAITOK); } ctl_free_io(io); } static void ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.original_sc == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.original_sc->scsiio; ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctsio->io_hdr.status = msg_info->hdr.status; ctsio->scsi_status = msg_info->scsi.scsi_status; ctsio->sense_len = msg_info->scsi.sense_len; memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, msg_info->scsi.sense_len); ctl_enqueue_isc((union ctl_io *)ctsio); } static void ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.serializing_sc->scsiio; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctl_enqueue_isc((union ctl_io *)ctsio); } void ctl_isc_announce_lun(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; union ctl_ha_msg *msg; struct ctl_ha_msg_lun_pr_key pr_key; int i, k; if (softc->ha_link != CTL_HA_LINK_ONLINE) return; mtx_lock(&lun->lun_lock); i = sizeof(msg->lun); if (lun->lun_devid) i += lun->lun_devid->len; i += sizeof(pr_key) * lun->pr_key_count; alloc: mtx_unlock(&lun->lun_lock); msg = malloc(i, M_CTL, M_WAITOK); mtx_lock(&lun->lun_lock); k = sizeof(msg->lun); if (lun->lun_devid) k += lun->lun_devid->len; k += sizeof(pr_key) * lun->pr_key_count; if (i < k) { free(msg, M_CTL); i = k; goto alloc; } bzero(&msg->lun, sizeof(msg->lun)); msg->hdr.msg_type = CTL_MSG_LUN_SYNC; msg->hdr.nexus.targ_lun = lun->lun; msg->hdr.nexus.targ_mapped_lun = lun->lun; msg->lun.flags = lun->flags; msg->lun.pr_generation = lun->pr_generation; msg->lun.pr_res_idx = lun->pr_res_idx; msg->lun.pr_res_type = lun->pr_res_type; msg->lun.pr_key_count = lun->pr_key_count; i = 0; if (lun->lun_devid) { msg->lun.lun_devid_len = lun->lun_devid->len; memcpy(&msg->lun.data[i], lun->lun_devid->data, msg->lun.lun_devid_len); i += msg->lun.lun_devid_len; } for (k = 0; k < CTL_MAX_INITIATORS; k++) { if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) continue; pr_key.pr_iid = k; memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); i += sizeof(pr_key); } mtx_unlock(&lun->lun_lock); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, M_WAITOK); free(msg, M_CTL); if (lun->flags & CTL_LUN_PRIMARY_SC) { for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { ctl_isc_announce_mode(lun, -1, lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].subpage); } } } void ctl_isc_announce_port(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; union ctl_ha_msg *msg; int i; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max || softc->ha_link != CTL_HA_LINK_ONLINE) return; i = sizeof(msg->port) + strlen(port->port_name) + 1; if (port->lun_map) i += port->lun_map_size * sizeof(uint32_t); if (port->port_devid) i += port->port_devid->len; if (port->target_devid) i += port->target_devid->len; if (port->init_devid) i += port->init_devid->len; msg = malloc(i, M_CTL, M_WAITOK); bzero(&msg->port, sizeof(msg->port)); msg->hdr.msg_type = CTL_MSG_PORT_SYNC; msg->hdr.nexus.targ_port = port->targ_port; msg->port.port_type = port->port_type; msg->port.physical_port = port->physical_port; msg->port.virtual_port = port->virtual_port; msg->port.status = port->status; i = 0; msg->port.name_len = sprintf(&msg->port.data[i], "%d:%s", softc->ha_id, port->port_name) + 1; i += msg->port.name_len; if (port->lun_map) { msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); memcpy(&msg->port.data[i], port->lun_map, msg->port.lun_map_len); i += msg->port.lun_map_len; } if (port->port_devid) { msg->port.port_devid_len = port->port_devid->len; memcpy(&msg->port.data[i], port->port_devid->data, msg->port.port_devid_len); i += msg->port.port_devid_len; } if (port->target_devid) { msg->port.target_devid_len = port->target_devid->len; memcpy(&msg->port.data[i], port->target_devid->data, msg->port.target_devid_len); i += msg->port.target_devid_len; } if (port->init_devid) { msg->port.init_devid_len = port->init_devid->len; memcpy(&msg->port.data[i], port->init_devid->data, msg->port.init_devid_len); i += msg->port.init_devid_len; } ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, M_WAITOK); free(msg, M_CTL); } void ctl_isc_announce_iid(struct ctl_port *port, int iid) { struct ctl_softc *softc = port->ctl_softc; union ctl_ha_msg *msg; int i, l; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max || softc->ha_link != CTL_HA_LINK_ONLINE) return; mtx_lock(&softc->ctl_lock); i = sizeof(msg->iid); l = 0; if (port->wwpn_iid[iid].name) l = strlen(port->wwpn_iid[iid].name) + 1; i += l; msg = malloc(i, M_CTL, M_NOWAIT); if (msg == NULL) { mtx_unlock(&softc->ctl_lock); return; } bzero(&msg->iid, sizeof(msg->iid)); msg->hdr.msg_type = CTL_MSG_IID_SYNC; msg->hdr.nexus.targ_port = port->targ_port; msg->hdr.nexus.initid = iid; msg->iid.in_use = port->wwpn_iid[iid].in_use; msg->iid.name_len = l; msg->iid.wwpn = port->wwpn_iid[iid].wwpn; if (port->wwpn_iid[iid].name) strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); mtx_unlock(&softc->ctl_lock); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); free(msg, M_CTL); } void ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, uint8_t page, uint8_t subpage) { struct ctl_softc *softc = lun->ctl_softc; union ctl_ha_msg msg; u_int i; if (softc->ha_link != CTL_HA_LINK_ONLINE) return; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == page && lun->mode_pages.index[i].subpage == subpage) break; } if (i == CTL_NUM_MODE_PAGES) return; /* Don't try to replicate pages not present on this device. */ if (lun->mode_pages.index[i].page_data == NULL) return; bzero(&msg.mode, sizeof(msg.mode)); msg.hdr.msg_type = CTL_MSG_MODE_SYNC; msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.mode.page_code = page; msg.mode.subpage = subpage; msg.mode.page_len = lun->mode_pages.index[i].page_len; memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, msg.mode.page_len); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), M_WAITOK); } static void ctl_isc_ha_link_up(struct ctl_softc *softc) { struct ctl_port *port; struct ctl_lun *lun; union ctl_ha_msg msg; int i; /* Announce this node parameters to peer for validation. */ msg.login.msg_type = CTL_MSG_LOGIN; msg.login.version = CTL_HA_VERSION; msg.login.ha_mode = softc->ha_mode; msg.login.ha_id = softc->ha_id; msg.login.max_luns = ctl_max_luns; msg.login.max_ports = ctl_max_ports; msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), M_WAITOK); STAILQ_FOREACH(port, &softc->port_list, links) { ctl_isc_announce_port(port); for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use) ctl_isc_announce_iid(port, i); } } STAILQ_FOREACH(lun, &softc->lun_list, links) ctl_isc_announce_lun(lun); } static void ctl_isc_ha_link_down(struct ctl_softc *softc) { struct ctl_port *port; struct ctl_lun *lun; union ctl_io *io; int i; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); } mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); io = ctl_alloc_io(softc->othersc_pool); mtx_lock(&softc->ctl_lock); ctl_zero_io(io); io->io_hdr.msg_type = CTL_MSG_FAILOVER; io->io_hdr.nexus.targ_mapped_lun = lun->lun; ctl_enqueue_isc(io); } STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port >= softc->port_min && port->targ_port < softc->port_max) continue; port->status &= ~CTL_PORT_STATUS_ONLINE; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { port->wwpn_iid[i].in_use = 0; free(port->wwpn_iid[i].name, M_CTL); port->wwpn_iid[i].name = NULL; } } mtx_unlock(&softc->ctl_lock); } static void ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); mtx_lock(&softc->ctl_lock); if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); if (msg->ua.ua_all) { if (msg->ua.ua_set) ctl_est_ua_all(lun, iid, msg->ua.ua_type); else ctl_clr_ua_all(lun, iid, msg->ua.ua_type); } else { if (msg->ua.ua_set) ctl_est_ua(lun, iid, msg->ua.ua_type); else ctl_clr_ua(lun, iid, msg->ua.ua_type); } mtx_unlock(&lun->lun_lock); } static void ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; struct ctl_ha_msg_lun_pr_key pr_key; int i, k; ctl_lun_flags oflags; uint32_t targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; if (msg->lun.lun_devid_len != i || (i > 0 && memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { mtx_unlock(&lun->lun_lock); printf("%s: Received conflicting HA LUN %d\n", __func__, targ_lun); return; } else { /* Record whether peer is primary. */ oflags = lun->flags; if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && (msg->lun.flags & CTL_LUN_DISABLED) == 0) lun->flags |= CTL_LUN_PEER_SC_PRIMARY; else lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; if (oflags != lun->flags) ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); /* If peer is primary and we are not -- use data */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { lun->pr_generation = msg->lun.pr_generation; lun->pr_res_idx = msg->lun.pr_res_idx; lun->pr_res_type = msg->lun.pr_res_type; lun->pr_key_count = msg->lun.pr_key_count; for (k = 0; k < CTL_MAX_INITIATORS; k++) ctl_clr_prkey(lun, k); for (k = 0; k < msg->lun.pr_key_count; k++) { memcpy(&pr_key, &msg->lun.data[i], sizeof(pr_key)); ctl_alloc_prkey(lun, pr_key.pr_iid); ctl_set_prkey(lun, pr_key.pr_iid, pr_key.pr_key); i += sizeof(pr_key); } } mtx_unlock(&lun->lun_lock); CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", __func__, targ_lun, (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? "primary" : "secondary")); /* If we are primary but peer doesn't know -- notify */ if ((lun->flags & CTL_LUN_PRIMARY_SC) && (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) ctl_isc_announce_lun(lun); } } static void ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_port *port; struct ctl_lun *lun; int i, new; port = softc->ctl_ports[msg->hdr.nexus.targ_port]; if (port == NULL) { CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, msg->hdr.nexus.targ_port)); new = 1; port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); port->frontend = &ha_frontend; port->targ_port = msg->hdr.nexus.targ_port; port->fe_datamove = ctl_ha_datamove; port->fe_done = ctl_ha_done; } else if (port->frontend == &ha_frontend) { CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, msg->hdr.nexus.targ_port)); new = 0; } else { printf("%s: Received conflicting HA port %d\n", __func__, msg->hdr.nexus.targ_port); return; } port->port_type = msg->port.port_type; port->physical_port = msg->port.physical_port; port->virtual_port = msg->port.virtual_port; port->status = msg->port.status; i = 0; free(port->port_name, M_CTL); port->port_name = strndup(&msg->port.data[i], msg->port.name_len, M_CTL); i += msg->port.name_len; if (msg->port.lun_map_len != 0) { if (port->lun_map == NULL || port->lun_map_size * sizeof(uint32_t) < msg->port.lun_map_len) { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = malloc(msg->port.lun_map_len, M_CTL, M_WAITOK); } memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); i += msg->port.lun_map_len; } else { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = NULL; } if (msg->port.port_devid_len != 0) { if (port->port_devid == NULL || port->port_devid->len < msg->port.port_devid_len) { free(port->port_devid, M_CTL); port->port_devid = malloc(sizeof(struct ctl_devid) + msg->port.port_devid_len, M_CTL, M_WAITOK); } memcpy(port->port_devid->data, &msg->port.data[i], msg->port.port_devid_len); port->port_devid->len = msg->port.port_devid_len; i += msg->port.port_devid_len; } else { free(port->port_devid, M_CTL); port->port_devid = NULL; } if (msg->port.target_devid_len != 0) { if (port->target_devid == NULL || port->target_devid->len < msg->port.target_devid_len) { free(port->target_devid, M_CTL); port->target_devid = malloc(sizeof(struct ctl_devid) + msg->port.target_devid_len, M_CTL, M_WAITOK); } memcpy(port->target_devid->data, &msg->port.data[i], msg->port.target_devid_len); port->target_devid->len = msg->port.target_devid_len; i += msg->port.target_devid_len; } else { free(port->target_devid, M_CTL); port->target_devid = NULL; } if (msg->port.init_devid_len != 0) { if (port->init_devid == NULL || port->init_devid->len < msg->port.init_devid_len) { free(port->init_devid, M_CTL); port->init_devid = malloc(sizeof(struct ctl_devid) + msg->port.init_devid_len, M_CTL, M_WAITOK); } memcpy(port->init_devid->data, &msg->port.data[i], msg->port.init_devid_len); port->init_devid->len = msg->port.init_devid_len; i += msg->port.init_devid_len; } else { free(port->init_devid, M_CTL); port->init_devid = NULL; } if (new) { if (ctl_port_register(port) != 0) { printf("%s: ctl_port_register() failed with error\n", __func__); } } mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); } static void ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_port *port; int iid; port = softc->ctl_ports[msg->hdr.nexus.targ_port]; if (port == NULL) { printf("%s: Received IID for unknown port %d\n", __func__, msg->hdr.nexus.targ_port); return; } iid = msg->hdr.nexus.initid; if (port->wwpn_iid[iid].in_use != 0 && msg->iid.in_use == 0) ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); port->wwpn_iid[iid].in_use = msg->iid.in_use; port->wwpn_iid[iid].wwpn = msg->iid.wwpn; free(port->wwpn_iid[iid].name, M_CTL); if (msg->iid.name_len) { port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], msg->iid.name_len, M_CTL); } else port->wwpn_iid[iid].name = NULL; } static void ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { if (msg->login.version != CTL_HA_VERSION) { printf("CTL HA peers have different versions %d != %d\n", msg->login.version, CTL_HA_VERSION); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.ha_mode != softc->ha_mode) { printf("CTL HA peers have different ha_mode %d != %d\n", msg->login.ha_mode, softc->ha_mode); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.ha_id == softc->ha_id) { printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.max_luns != ctl_max_luns || msg->login.max_ports != ctl_max_ports || msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { printf("CTL HA peers have different limits\n"); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } } static void ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; u_int i; uint32_t initidx, targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == msg->mode.page_code && lun->mode_pages.index[i].subpage == msg->mode.subpage) break; } if (i == CTL_NUM_MODE_PAGES) { mtx_unlock(&lun->lun_lock); return; } memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, lun->mode_pages.index[i].page_len); initidx = ctl_get_initindex(&msg->hdr.nexus); if (initidx != -1) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); } /* * ISC (Inter Shelf Communication) event handler. Events from the HA * subsystem come in here. */ static void ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) { struct ctl_softc *softc = control_softc; union ctl_io *io; struct ctl_prio *presio; ctl_ha_status isc_status; CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); if (event == CTL_HA_EVT_MSG_RECV) { union ctl_ha_msg *msg, msgbuf; if (param > sizeof(msgbuf)) msg = malloc(param, M_CTL, M_WAITOK); else msg = &msgbuf; isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, M_WAITOK); if (isc_status != CTL_HA_STATUS_SUCCESS) { printf("%s: Error receiving message: %d\n", __func__, isc_status); if (msg != &msgbuf) free(msg, M_CTL); return; } CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); switch (msg->hdr.msg_type) { case CTL_MSG_SERIALIZE: io = ctl_alloc_io(softc->othersc_pool); ctl_zero_io(io); // populate ctsio from msg io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.msg_type = CTL_MSG_SERIALIZE; io->io_hdr.original_sc = msg->hdr.original_sc; io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | CTL_FLAG_IO_ACTIVE; /* * If we're in serialization-only mode, we don't * want to go through full done processing. Thus * the COPY flag. * * XXX KDM add another flag that is more specific. */ if (softc->ha_mode != CTL_HA_MODE_XFER) io->io_hdr.flags |= CTL_FLAG_INT_COPY; io->io_hdr.nexus = msg->hdr.nexus; #if 0 printf("port %u, iid %u, lun %u\n", io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_lun); #endif io->scsiio.tag_num = msg->scsi.tag_num; io->scsiio.tag_type = msg->scsi.tag_type; #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ io->scsiio.cdb_len = msg->scsi.cdb_len; memcpy(io->scsiio.cdb, msg->scsi.cdb, CTL_MAX_CDBLEN); if (softc->ha_mode == CTL_HA_MODE_XFER) { const struct ctl_cmd_entry *entry; entry = ctl_get_cmd_entry(&io->scsiio, NULL); io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; io->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; } ctl_enqueue_isc(io); break; /* Performed on the Originating SC, XFER mode only */ case CTL_MSG_DATAMOVE: { struct ctl_sg_entry *sgl; int i, j; io = msg->hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM do something here */ break; } io->io_hdr.msg_type = CTL_MSG_DATAMOVE; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* * Keep track of this, we need to send it back over * when the datamove is complete. */ io->io_hdr.serializing_sc = msg->hdr.serializing_sc; if (msg->hdr.status == CTL_SUCCESS) io->io_hdr.status = msg->hdr.status; if (msg->dt.sg_sequence == 0) { #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif i = msg->dt.kern_sg_entries + msg->dt.kern_data_len / CTL_HA_DATAMOVE_SEGMENT + 1; sgl = malloc(sizeof(*sgl) * i, M_CTL, M_WAITOK | M_ZERO); io->io_hdr.remote_sglist = sgl; io->io_hdr.local_sglist = &sgl[msg->dt.kern_sg_entries]; io->scsiio.kern_data_ptr = (uint8_t *)sgl; io->scsiio.kern_sg_entries = msg->dt.kern_sg_entries; io->scsiio.rem_sg_entries = msg->dt.kern_sg_entries; io->scsiio.kern_data_len = msg->dt.kern_data_len; io->scsiio.kern_total_len = msg->dt.kern_total_len; io->scsiio.kern_data_resid = msg->dt.kern_data_resid; io->scsiio.kern_rel_offset = msg->dt.kern_rel_offset; io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; io->io_hdr.flags |= msg->dt.flags & CTL_FLAG_BUS_ADDR; } else sgl = (struct ctl_sg_entry *) io->scsiio.kern_data_ptr; for (i = msg->dt.sent_sg_entries, j = 0; i < (msg->dt.sent_sg_entries + msg->dt.cur_sg_entries); i++, j++) { sgl[i].addr = msg->dt.sg_list[j].addr; sgl[i].len = msg->dt.sg_list[j].len; #if 0 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", __func__, sgl[i].addr, sgl[i].len, j, i); #endif } /* * If this is the last piece of the I/O, we've got * the full S/G list. Queue processing in the thread. * Otherwise wait for the next piece. */ if (msg->dt.sg_last != 0) ctl_enqueue_isc(io); break; } /* Performed on the Serializing (primary) SC, XFER mode only */ case CTL_MSG_DATAMOVE_DONE: { if (msg->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ break; } /* * We grab the sense information here in case * there was a failure, so we can return status * back to the initiator. */ io = msg->hdr.serializing_sc; io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; io->io_hdr.port_status = msg->scsi.port_status; io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; if (msg->hdr.status != CTL_STATUS_NONE) { io->io_hdr.status = msg->hdr.status; io->scsiio.scsi_status = msg->scsi.scsi_status; io->scsiio.sense_len = msg->scsi.sense_len; memcpy(&io->scsiio.sense_data, &msg->scsi.sense_data, msg->scsi.sense_len); if (msg->hdr.status == CTL_SUCCESS) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; } ctl_enqueue_isc(io); break; } /* Preformed on Originating SC, SER_ONLY mode */ case CTL_MSG_R2R: io = msg->hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); break; } io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; io->io_hdr.msg_type = CTL_MSG_R2R; io->io_hdr.serializing_sc = msg->hdr.serializing_sc; ctl_enqueue_isc(io); break; /* * Performed on Serializing(i.e. primary SC) SC in SER_ONLY * mode. * Performed on the Originating (i.e. secondary) SC in XFER * mode */ case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) ctl_isc_handler_finish_xfer(softc, msg); else ctl_isc_handler_finish_ser_only(softc, msg); break; /* Preformed on Originating SC */ case CTL_MSG_BAD_JUJU: io = msg->hdr.original_sc; if (io == NULL) { printf("%s: Bad JUJU!, original_sc is NULL!\n", __func__); break; } ctl_copy_sense_data(msg, io); /* * IO should have already been cleaned up on other * SC so clear this flag so we won't send a message * back to finish the IO there. */ io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* io = msg->hdr.serializing_sc; */ io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_enqueue_isc(io); break; /* Handle resets sent from the other side */ case CTL_MSG_MANAGE_TASKS: { struct ctl_taskio *taskio; taskio = (struct ctl_taskio *)ctl_alloc_io( softc->othersc_pool); ctl_zero_io((union ctl_io *)taskio); taskio->io_hdr.io_type = CTL_IO_TASK; taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; taskio->io_hdr.nexus = msg->hdr.nexus; taskio->task_action = msg->task.task_action; taskio->tag_num = msg->task.tag_num; taskio->tag_type = msg->task.tag_type; #ifdef CTL_TIME_IO taskio->io_hdr.start_time = time_uptime; getbinuptime(&taskio->io_hdr.start_bt); #endif /* CTL_TIME_IO */ ctl_run_task((union ctl_io *)taskio); break; } /* Persistent Reserve action which needs attention */ case CTL_MSG_PERS_ACTION: presio = (struct ctl_prio *)ctl_alloc_io( softc->othersc_pool); ctl_zero_io((union ctl_io *)presio); presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; presio->io_hdr.nexus = msg->hdr.nexus; presio->pr_msg = msg->pr; ctl_enqueue_isc((union ctl_io *)presio); break; case CTL_MSG_UA: ctl_isc_ua(softc, msg, param); break; case CTL_MSG_PORT_SYNC: ctl_isc_port_sync(softc, msg, param); break; case CTL_MSG_LUN_SYNC: ctl_isc_lun_sync(softc, msg, param); break; case CTL_MSG_IID_SYNC: ctl_isc_iid_sync(softc, msg, param); break; case CTL_MSG_LOGIN: ctl_isc_login(softc, msg, param); break; case CTL_MSG_MODE_SYNC: ctl_isc_mode_sync(softc, msg, param); break; default: printf("Received HA message of unknown type %d\n", msg->hdr.msg_type); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); break; } if (msg != &msgbuf) free(msg, M_CTL); } else if (event == CTL_HA_EVT_LINK_CHANGE) { printf("CTL: HA link status changed from %d to %d\n", softc->ha_link, param); if (param == softc->ha_link) return; if (softc->ha_link == CTL_HA_LINK_ONLINE) { softc->ha_link = param; ctl_isc_ha_link_down(softc); } else { softc->ha_link = param; if (softc->ha_link == CTL_HA_LINK_ONLINE) ctl_isc_ha_link_up(softc); } return; } else { printf("ctl_isc_event_handler: Unknown event %d\n", event); return; } } static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) { memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, src->scsi.sense_len); dest->scsiio.scsi_status = src->scsi.scsi_status; dest->scsiio.sense_len = src->scsi.sense_len; dest->io_hdr.status = src->hdr.status; } static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) { memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, src->scsiio.sense_len); dest->scsi.scsi_status = src->scsiio.scsi_status; dest->scsi.sense_len = src->scsiio.sense_len; dest->hdr.status = src->io_hdr.status; } void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; ctl_ua_type *pu; if (initidx < softc->init_min || initidx >= softc->init_max) return; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; } void ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) { int i; mtx_assert(&lun->lun_lock, MA_OWNED); if (lun->pending_ua[port] == NULL) return; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port * CTL_MAX_INIT_PER_PORT + i == except) continue; lun->pending_ua[port][i] |= ua; } } void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; int i; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = softc->port_min; i < softc->port_max; i++) ctl_est_ua_port(lun, i, except, ua); } void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; ctl_ua_type *pu; if (initidx < softc->init_min || initidx >= softc->init_max) return; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; } void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; int i, j; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = softc->port_min; i < softc->port_max; i++) { if (lun->pending_ua[i] == NULL) continue; for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (i * CTL_MAX_INIT_PER_PORT + j == except) continue; lun->pending_ua[i][j] &= ~ua; } } } void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_lun *lun; mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, ua_type); mtx_unlock(&lun->lun_lock); } } static int ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) { struct ctl_softc *softc = (struct ctl_softc *)arg1; struct ctl_lun *lun; struct ctl_lun_req ireq; int error, value; value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); mtx_lock(&softc->ctl_lock); if (value == 0) softc->flags |= CTL_FLAG_ACTIVE_SHELF; else softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_unlock(&softc->ctl_lock); bzero(&ireq, sizeof(ireq)); ireq.reqtype = CTL_LUNREQ_MODIFY; ireq.reqdata.modify.lun_id = lun->lun; lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, curthread); if (ireq.status != CTL_LUN_OK) { printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", __func__, ireq.status, ireq.error_str); } mtx_lock(&softc->ctl_lock); } mtx_unlock(&softc->ctl_lock); return (0); } static int ctl_init(void) { struct make_dev_args args; struct ctl_softc *softc; int i, error; softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, M_WAITOK | M_ZERO); make_dev_args_init(&args); args.mda_devsw = &ctl_cdevsw; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = softc; error = make_dev_s(&args, &softc->dev, "cam/ctl"); if (error != 0) { free(softc, M_DEVBUF); control_softc = NULL; return (error); } sysctl_ctx_init(&softc->sysctl_ctx); softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", CTLFLAG_RD, 0, "CAM Target Layer"); if (softc->sysctl_tree == NULL) { printf("%s: unable to allocate sysctl tree\n", __func__); destroy_dev(softc->dev); free(softc, M_DEVBUF); control_softc = NULL; return (ENOMEM); } mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); softc->flags = 0; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", ctl_max_luns, CTL_DEFAULT_MAX_LUNS); ctl_max_luns = CTL_DEFAULT_MAX_LUNS; } softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, M_DEVBUF, M_WAITOK | M_ZERO); softc->ctl_lun_mask = malloc(sizeof(uint32_t) * ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", ctl_max_ports, CTL_DEFAULT_MAX_PORTS); ctl_max_ports = CTL_DEFAULT_MAX_PORTS; } softc->ctl_port_mask = malloc(sizeof(uint32_t) * ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); /* * In Copan's HA scheme, the "master" and "slave" roles are * figured out through the slot the controller is in. Although it * is an active/active system, someone has to be in charge. */ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, "HA head ID (0 - no HA)"); if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { softc->flags |= CTL_FLAG_ACTIVE_SHELF; softc->is_single = 1; softc->port_cnt = ctl_max_ports; softc->port_min = 0; } else { softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; softc->port_min = (softc->ha_id - 1) * softc->port_cnt; } softc->port_max = softc->port_min + softc->port_cnt; softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, "HA link state (0 - offline, 1 - unknown, 2 - online)"); STAILQ_INIT(&softc->lun_list); STAILQ_INIT(&softc->pending_lun_queue); STAILQ_INIT(&softc->fe_list); STAILQ_INIT(&softc->port_list); STAILQ_INIT(&softc->be_list); ctl_tpc_init(softc); if (worker_threads <= 0) worker_threads = max(1, mp_ncpus / 4); if (worker_threads > CTL_MAX_THREADS) worker_threads = CTL_MAX_THREADS; for (i = 0; i < worker_threads; i++) { struct ctl_thread *thr = &softc->threads[i]; mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); thr->ctl_softc = softc; STAILQ_INIT(&thr->incoming_queue); STAILQ_INIT(&thr->rtr_queue); STAILQ_INIT(&thr->done_queue); STAILQ_INIT(&thr->isc_queue); error = kproc_kthread_add(ctl_work_thread, thr, &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); if (error != 0) { printf("error creating CTL work thread!\n"); return (error); } } error = kproc_kthread_add(ctl_lun_thread, softc, &softc->ctl_proc, &softc->lun_thread, 0, 0, "ctl", "lun"); if (error != 0) { printf("error creating CTL lun thread!\n"); return (error); } error = kproc_kthread_add(ctl_thresh_thread, softc, &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); if (error != 0) { printf("error creating CTL threshold thread!\n"); return (error); } SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); if (softc->is_single == 0) { if (ctl_frontend_register(&ha_frontend) != 0) softc->is_single = 1; } return (0); } static int ctl_shutdown(void) { struct ctl_softc *softc = control_softc; int i; if (softc->is_single == 0) ctl_frontend_deregister(&ha_frontend); destroy_dev(softc->dev); /* Shutdown CTL threads. */ softc->shutdown = 1; for (i = 0; i < worker_threads; i++) { struct ctl_thread *thr = &softc->threads[i]; while (thr->thread != NULL) { wakeup(thr); if (thr->thread != NULL) pause("CTL thr shutdown", 1); } mtx_destroy(&thr->queue_lock); } while (softc->lun_thread != NULL) { wakeup(&softc->pending_lun_queue); if (softc->lun_thread != NULL) pause("CTL thr shutdown", 1); } while (softc->thresh_thread != NULL) { wakeup(softc->thresh_thread); if (softc->thresh_thread != NULL) pause("CTL thr shutdown", 1); } ctl_tpc_shutdown(softc); uma_zdestroy(softc->io_zone); mtx_destroy(&softc->ctl_lock); free(softc->ctl_luns, M_DEVBUF); free(softc->ctl_lun_mask, M_DEVBUF); free(softc->ctl_port_mask, M_DEVBUF); free(softc->ctl_ports, M_DEVBUF); sysctl_ctx_free(&softc->sysctl_ctx); free(softc, M_DEVBUF); control_softc = NULL; return (0); } static int ctl_module_event_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: return (ctl_init()); case MOD_UNLOAD: return (ctl_shutdown()); default: return (EOPNOTSUPP); } } /* * XXX KDM should we do some access checks here? Bump a reference count to * prevent a CTL module from being unloaded while someone has it open? */ static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } /* * Remove an initiator by port number and initiator ID. * Returns 0 for success, -1 for failure. */ int ctl_remove_initiator(struct ctl_port *port, int iid) { struct ctl_softc *softc = port->ctl_softc; int last; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid > CTL_MAX_INIT_PER_PORT) { printf("%s: initiator ID %u > maximun %u!\n", __func__, iid, CTL_MAX_INIT_PER_PORT); return (-1); } mtx_lock(&softc->ctl_lock); last = (--port->wwpn_iid[iid].in_use == 0); port->wwpn_iid[iid].last_use = time_uptime; mtx_unlock(&softc->ctl_lock); if (last) ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); ctl_isc_announce_iid(port, iid); return (0); } /* * Add an initiator to the initiator map. * Returns iid for success, < 0 for failure. */ int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) { struct ctl_softc *softc = port->ctl_softc; time_t best_time; int i, best; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid >= CTL_MAX_INIT_PER_PORT) { printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); free(name, M_CTL); return (-1); } mtx_lock(&softc->ctl_lock); if (iid < 0 && (wwpn != 0 || name != NULL)) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { iid = i; break; } if (name != NULL && port->wwpn_iid[i].name != NULL && strcmp(name, port->wwpn_iid[i].name) == 0) { iid = i; break; } } } if (iid < 0) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0 && port->wwpn_iid[i].wwpn == 0 && port->wwpn_iid[i].name == NULL) { iid = i; break; } } } if (iid < 0) { best = -1; best_time = INT32_MAX; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0) { if (port->wwpn_iid[i].last_use < best_time) { best = i; best_time = port->wwpn_iid[i].last_use; } } } iid = best; } if (iid < 0) { mtx_unlock(&softc->ctl_lock); free(name, M_CTL); return (-2); } if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { /* * This is not an error yet. */ if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { #if 0 printf("%s: port %d iid %u WWPN %#jx arrived" " again\n", __func__, port->targ_port, iid, (uintmax_t)wwpn); #endif goto take; } if (name != NULL && port->wwpn_iid[iid].name != NULL && strcmp(name, port->wwpn_iid[iid].name) == 0) { #if 0 printf("%s: port %d iid %u name '%s' arrived" " again\n", __func__, port->targ_port, iid, name); #endif goto take; } /* * This is an error, but what do we do about it? The * driver is telling us we have a new WWPN for this * initiator ID, so we pretty much need to use it. */ printf("%s: port %d iid %u WWPN %#jx '%s' arrived," " but WWPN %#jx '%s' is still at that address\n", __func__, port->targ_port, iid, wwpn, name, (uintmax_t)port->wwpn_iid[iid].wwpn, port->wwpn_iid[iid].name); } take: free(port->wwpn_iid[iid].name, M_CTL); port->wwpn_iid[iid].name = name; port->wwpn_iid[iid].wwpn = wwpn; port->wwpn_iid[iid].in_use++; mtx_unlock(&softc->ctl_lock); ctl_isc_announce_iid(port, iid); return (iid); } static int ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) { int len; switch (port->port_type) { case CTL_PORT_FC: { struct scsi_transportid_fcp *id = (struct scsi_transportid_fcp *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_FC; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); return (sizeof(*id)); } case CTL_PORT_ISCSI: { struct scsi_transportid_iscsi_port *id = (struct scsi_transportid_iscsi_port *)buf; if (port->wwpn_iid[iid].name == NULL) return (0); memset(id, 0, 256); id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | SCSI_PROTO_ISCSI; len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; len = roundup2(min(len, 252), 4); scsi_ulto2b(len, id->additional_length); return (sizeof(*id) + len); } case CTL_PORT_SAS: { struct scsi_transportid_sas *id = (struct scsi_transportid_sas *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SAS; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); return (sizeof(*id)); } default: { struct scsi_transportid_spi *id = (struct scsi_transportid_spi *)buf; memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SPI; scsi_ulto2b(iid, id->scsi_addr); scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); return (sizeof(*id)); } } } /* * Serialize a command that went down the "wrong" side, and so was sent to * this controller for execution. The logic is a little different than the * standard case in ctl_scsiio_precheck(). Errors in this case need to get * sent back to the other side, but in the success case, we execute the * command on this side (XFER mode) or tell the other side to execute it * (SER_ONLY mode). */ static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); union ctl_ha_msg msg_info; struct ctl_lun *lun; const struct ctl_cmd_entry *entry; uint32_t targ_lun; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; /* Make sure that we know about this port. */ if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 1); goto badjuju; } /* Make sure that we know about this LUN. */ mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); /* * The other node would not send this request to us unless * received announce that we are primary node for this LUN. * If this LUN does not exist now, it is probably result of * a race, so respond to initiator in the most opaque way. */ ctl_set_busy(ctsio); goto badjuju; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/Os completed. */ if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); ctl_set_busy(ctsio); goto badjuju; } entry = ctl_get_cmd_entry(ctsio, NULL); if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); goto badjuju; } CTL_LUN(ctsio) = lun; CTL_BACKEND_LUN(ctsio) = lun->be_lun; /* * Every I/O goes into the OOA queue for a * particular LUN, and stays there until completion. */ #ifdef CTL_TIME_IO if (TAILQ_EMPTY(&lun->ooa_queue)) lun->idle_time += getsbinuptime() - lun->last_busy; #endif TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links))) { case CTL_ACTION_BLOCK: ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); break; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: if (softc->ha_mode == CTL_HA_MODE_XFER) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); mtx_unlock(&lun->lun_lock); } else { ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; mtx_unlock(&lun->lun_lock); /* send msg back to other side */ msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; msg_info.hdr.msg_type = CTL_MSG_R2R; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.hdr), M_WAITOK); } break; case CTL_ACTION_OVERLAP: TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); goto badjuju; case CTL_ACTION_OVERLAP_TAG: TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num); goto badjuju; case CTL_ACTION_ERROR: default: TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); badjuju: ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi), M_WAITOK); ctl_free_io((union ctl_io *)ctsio); break; } } /* * Returns 0 for success, errno for failure. */ static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) { union ctl_io *io; mtx_lock(&lun->lun_lock); for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links)) { struct ctl_ooa_entry *entry; /* * If we've got more than we can fit, just count the * remaining entries. */ if (*cur_fill_num >= ooa_hdr->alloc_num) continue; entry = &kern_entries[*cur_fill_num]; entry->tag_num = io->scsiio.tag_num; entry->lun_num = lun->lun; #ifdef CTL_TIME_IO entry->start_bt = io->io_hdr.start_bt; #endif bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); entry->cdb_len = io->scsiio.cdb_len; if (io->io_hdr.flags & CTL_FLAG_BLOCKED) entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; if (io->io_hdr.flags & CTL_FLAG_ABORT) entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; } mtx_unlock(&lun->lun_lock); } static void * ctl_copyin_alloc(void *user_addr, unsigned int len, char *error_str, size_t error_str_len) { void *kptr; kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); if (copyin(user_addr, kptr, len) != 0) { snprintf(error_str, error_str_len, "Error copying %d bytes " "from user address %p to kernel address %p", len, user_addr, kptr); free(kptr, M_CTL); return (NULL); } return (kptr); } static void ctl_free_args(int num_args, struct ctl_be_arg *args) { int i; if (args == NULL) return; for (i = 0; i < num_args; i++) { free(args[i].kname, M_CTL); free(args[i].kvalue, M_CTL); } free(args, M_CTL); } static struct ctl_be_arg * ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, char *error_str, size_t error_str_len) { struct ctl_be_arg *args; int i; args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), error_str, error_str_len); if (args == NULL) goto bailout; for (i = 0; i < num_args; i++) { args[i].kname = NULL; args[i].kvalue = NULL; } for (i = 0; i < num_args; i++) { uint8_t *tmpptr; if (args[i].namelen == 0) { snprintf(error_str, error_str_len, "Argument %d " "name length is zero", i); goto bailout; } args[i].kname = ctl_copyin_alloc(args[i].name, args[i].namelen, error_str, error_str_len); if (args[i].kname == NULL) goto bailout; if (args[i].kname[args[i].namelen - 1] != '\0') { snprintf(error_str, error_str_len, "Argument %d " "name is not NUL-terminated", i); goto bailout; } if (args[i].flags & CTL_BEARG_RD) { if (args[i].vallen == 0) { snprintf(error_str, error_str_len, "Argument %d " "value length is zero", i); goto bailout; } tmpptr = ctl_copyin_alloc(args[i].value, args[i].vallen, error_str, error_str_len); if (tmpptr == NULL) goto bailout; if ((args[i].flags & CTL_BEARG_ASCII) && (tmpptr[args[i].vallen - 1] != '\0')) { snprintf(error_str, error_str_len, "Argument " "%d value is not NUL-terminated", i); free(tmpptr, M_CTL); goto bailout; } args[i].kvalue = tmpptr; } else { args[i].kvalue = malloc(args[i].vallen, M_CTL, M_WAITOK | M_ZERO); } } return (args); bailout: ctl_free_args(num_args, args); return (NULL); } static void ctl_copyout_args(int num_args, struct ctl_be_arg *args) { int i; for (i = 0; i < num_args; i++) { if (args[i].flags & CTL_BEARG_WR) copyout(args[i].kvalue, args[i].value, args[i].vallen); } } /* * Escape characters that are illegal or not recommended in XML. */ int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) { char *end = str + size; int retval; retval = 0; for (; *str && str < end; str++) { switch (*str) { case '&': retval = sbuf_printf(sb, "&"); break; case '>': retval = sbuf_printf(sb, ">"); break; case '<': retval = sbuf_printf(sb, "<"); break; default: retval = sbuf_putc(sb, *str); break; } if (retval != 0) break; } return (retval); } static void ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) { struct scsi_vpd_id_descriptor *desc; int i; if (id == NULL || id->len < 4) return; desc = (struct scsi_vpd_id_descriptor *)id->data; switch (desc->id_type & SVPD_ID_TYPE_MASK) { case SVPD_ID_TYPE_T10: sbuf_printf(sb, "t10."); break; case SVPD_ID_TYPE_EUI64: sbuf_printf(sb, "eui."); break; case SVPD_ID_TYPE_NAA: sbuf_printf(sb, "naa."); break; case SVPD_ID_TYPE_SCSI_NAME: break; } switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { case SVPD_ID_CODESET_BINARY: for (i = 0; i < desc->length; i++) sbuf_printf(sb, "%02x", desc->identifier[i]); break; case SVPD_ID_CODESET_ASCII: sbuf_printf(sb, "%.*s", (int)desc->length, (char *)desc->identifier); break; case SVPD_ID_CODESET_UTF8: sbuf_printf(sb, "%s", (char *)desc->identifier); break; } } static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_softc *softc = dev->si_drv1; struct ctl_port *port; struct ctl_lun *lun; int retval; retval = 0; switch (cmd) { case CTL_IO: retval = ctl_ioctl_io(dev, cmd, addr, flag, td); break; case CTL_ENABLE_PORT: case CTL_DISABLE_PORT: case CTL_SET_PORT_WWNS: { struct ctl_port *port; struct ctl_port_entry *entry; entry = (struct ctl_port_entry *)addr; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { int action, done; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max) continue; action = 0; done = 0; if ((entry->port_type == CTL_PORT_NONE) && (entry->targ_port == port->targ_port)) { /* * If the user only wants to enable or * disable or set WWNs on a specific port, * do the operation and we're done. */ action = 1; done = 1; } else if (entry->port_type & port->port_type) { /* * Compare the user's type mask with the * particular frontend type to see if we * have a match. */ action = 1; done = 0; /* * Make sure the user isn't trying to set * WWNs on multiple ports at the same time. */ if (cmd == CTL_SET_PORT_WWNS) { printf("%s: Can't set WWNs on " "multiple ports\n", __func__); retval = EINVAL; break; } } if (action == 0) continue; /* * XXX KDM we have to drop the lock here, because * the online/offline operations can potentially * block. We need to reference count the frontends * so they can't go away, */ if (cmd == CTL_ENABLE_PORT) { mtx_unlock(&softc->ctl_lock); ctl_port_online(port); mtx_lock(&softc->ctl_lock); } else if (cmd == CTL_DISABLE_PORT) { mtx_unlock(&softc->ctl_lock); ctl_port_offline(port); mtx_lock(&softc->ctl_lock); } else if (cmd == CTL_SET_PORT_WWNS) { ctl_port_set_wwns(port, (entry->flags & CTL_PORT_WWNN_VALID) ? 1 : 0, entry->wwnn, (entry->flags & CTL_PORT_WWPN_VALID) ? 1 : 0, entry->wwpn); } if (done != 0) break; } mtx_unlock(&softc->ctl_lock); break; } case CTL_GET_OOA: { struct ctl_ooa *ooa_hdr; struct ctl_ooa_entry *entries; uint32_t cur_fill_num; ooa_hdr = (struct ctl_ooa *)addr; if ((ooa_hdr->alloc_len == 0) || (ooa_hdr->alloc_num == 0)) { printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " "must be non-zero\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num); retval = EINVAL; break; } if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * sizeof(struct ctl_ooa_entry))) { printf("%s: CTL_GET_OOA: alloc len %u must be alloc " "num %d * sizeof(struct ctl_ooa_entry) %zd\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); retval = EINVAL; break; } entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); if (entries == NULL) { printf("%s: could not allocate %d bytes for OOA " "dump\n", __func__, ooa_hdr->alloc_len); retval = ENOMEM; break; } mtx_lock(&softc->ctl_lock); if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && (ooa_hdr->lun_num >= ctl_max_luns || softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { mtx_unlock(&softc->ctl_lock); free(entries, M_CTL); printf("%s: CTL_GET_OOA: invalid LUN %ju\n", __func__, (uintmax_t)ooa_hdr->lun_num); retval = EINVAL; break; } cur_fill_num = 0; if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { STAILQ_FOREACH(lun, &softc->lun_list, links) { ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); } } else { lun = softc->ctl_luns[ooa_hdr->lun_num]; ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); } mtx_unlock(&softc->ctl_lock); ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); ooa_hdr->fill_len = ooa_hdr->fill_num * sizeof(struct ctl_ooa_entry); retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); if (retval != 0) { printf("%s: error copying out %d bytes for OOA dump\n", __func__, ooa_hdr->fill_len); } getbinuptime(&ooa_hdr->cur_bt); if (cur_fill_num > ooa_hdr->alloc_num) { ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; } else { ooa_hdr->dropped_num = 0; ooa_hdr->status = CTL_OOA_OK; } free(entries, M_CTL); break; } case CTL_DELAY_IO: { struct ctl_io_delay_info *delay_info; delay_info = (struct ctl_io_delay_info *)addr; #ifdef CTL_IO_DELAY mtx_lock(&softc->ctl_lock); if (delay_info->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); delay_info->status = CTL_DELAY_STATUS_OK; switch (delay_info->delay_type) { case CTL_DELAY_TYPE_CONT: case CTL_DELAY_TYPE_ONESHOT: break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; break; } switch (delay_info->delay_loc) { case CTL_DELAY_LOC_DATAMOVE: lun->delay_info.datamove_type = delay_info->delay_type; lun->delay_info.datamove_delay = delay_info->delay_secs; break; case CTL_DELAY_LOC_DONE: lun->delay_info.done_type = delay_info->delay_type; lun->delay_info.done_delay = delay_info->delay_secs; break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; break; } mtx_unlock(&lun->lun_lock); #else delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; #endif /* CTL_IO_DELAY */ break; } #ifdef CTL_LEGACY_STATS case CTL_GETSTATS: { struct ctl_stats *stats = (struct ctl_stats *)addr; int i; /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ i = 0; stats->status = CTL_SS_OK; stats->fill_len = 0; STAILQ_FOREACH(lun, &softc->lun_list, links) { if (stats->fill_len + sizeof(lun->legacy_stats) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; break; } retval = copyout(&lun->legacy_stats, &stats->lun_stats[i++], sizeof(lun->legacy_stats)); if (retval != 0) break; stats->fill_len += sizeof(lun->legacy_stats); } stats->num_luns = softc->num_luns; stats->flags = CTL_STATS_FLAG_NONE; #ifdef CTL_TIME_IO stats->flags |= CTL_STATS_FLAG_TIME_VALID; #endif getnanouptime(&stats->timestamp); break; } #endif /* CTL_LEGACY_STATS */ case CTL_ERROR_INJECT: { struct ctl_error_desc *err_desc, *new_err_desc; err_desc = (struct ctl_error_desc *)addr; new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, M_WAITOK | M_ZERO); bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); mtx_lock(&softc->ctl_lock); if (err_desc->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); free(new_err_desc, M_CTL); printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", __func__, (uintmax_t)err_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * We could do some checking here to verify the validity * of the request, but given the complexity of error * injection requests, the checking logic would be fairly * complex. * * For now, if the request is invalid, it just won't get * executed and might get deleted. */ STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); /* * XXX KDM check to make sure the serial number is unique, * in case we somehow manage to wrap. That shouldn't * happen for a very long time, but it's the right thing to * do. */ new_err_desc->serial = lun->error_serial; err_desc->serial = lun->error_serial; lun->error_serial++; mtx_unlock(&lun->lun_lock); break; } case CTL_ERROR_INJECT_DELETE: { struct ctl_error_desc *delete_desc, *desc, *desc2; int delete_done; delete_desc = (struct ctl_error_desc *)addr; delete_done = 0; mtx_lock(&softc->ctl_lock); if (delete_desc->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", __func__, (uintmax_t)delete_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { if (desc->serial != delete_desc->serial) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); delete_done = 1; } mtx_unlock(&lun->lun_lock); if (delete_done == 0) { printf("%s: CTL_ERROR_INJECT_DELETE: can't find " "error serial %ju on LUN %u\n", __func__, delete_desc->serial, delete_desc->lun_id); retval = EINVAL; break; } break; } case CTL_DUMP_STRUCTS: { int j, k; struct ctl_port *port; struct ctl_frontend *fe; mtx_lock(&softc->ctl_lock); printf("CTL Persistent Reservation information start:\n"); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_DISABLED) != 0) { mtx_unlock(&lun->lun_lock); continue; } for (j = 0; j < ctl_max_ports; j++) { if (lun->pr_keys[j] == NULL) continue; for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ if (lun->pr_keys[j][k] == 0) continue; printf(" LUN %ju port %d iid %d key " "%#jx\n", lun->lun, j, k, (uintmax_t)lun->pr_keys[j][k]); } } mtx_unlock(&lun->lun_lock); } printf("CTL Persistent Reservation information end\n"); printf("CTL Ports:\n"); STAILQ_FOREACH(port, &softc->port_list, links) { printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " "%#jx WWPN %#jx\n", port->targ_port, port->port_name, port->frontend->name, port->port_type, port->physical_port, port->virtual_port, (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 && port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL) continue; printf(" iid %u use %d WWPN %#jx '%s'\n", j, port->wwpn_iid[j].in_use, (uintmax_t)port->wwpn_iid[j].wwpn, port->wwpn_iid[j].name); } } printf("CTL Port information end\n"); mtx_unlock(&softc->ctl_lock); /* * XXX KDM calling this without a lock. We'd likely want * to drop the lock before calling the frontend's dump * routine anyway. */ printf("CTL Frontends:\n"); STAILQ_FOREACH(fe, &softc->fe_list, links) { printf(" Frontend '%s'\n", fe->name); if (fe->fe_dump != NULL) fe->fe_dump(); } printf("CTL Frontend information end\n"); break; } case CTL_LUN_REQ: { struct ctl_lun_req *lun_req; struct ctl_backend_driver *backend; lun_req = (struct ctl_lun_req *)addr; backend = ctl_backend_find(lun_req->backend); if (backend == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Backend \"%s\" not found.", lun_req->backend); break; } if (lun_req->num_be_args > 0) { lun_req->kern_be_args = ctl_copyin_args( lun_req->num_be_args, lun_req->be_args, lun_req->error_str, sizeof(lun_req->error_str)); if (lun_req->kern_be_args == NULL) { lun_req->status = CTL_LUN_ERROR; break; } } retval = backend->ioctl(dev, cmd, addr, flag, td); if (lun_req->num_be_args > 0) { ctl_copyout_args(lun_req->num_be_args, lun_req->kern_be_args); ctl_free_args(lun_req->num_be_args, lun_req->kern_be_args); } break; } case CTL_LUN_LIST: { struct sbuf *sb; struct ctl_lun_list *list; struct ctl_option *opt; list = (struct ctl_lun_list *)addr; /* * Allocate a fixed length sbuf here, based on the length * of the user's buffer. We could allocate an auto-extending * buffer, and then tell the user how much larger our * amount of data is than his buffer, but that presents * some problems: * * 1. The sbuf(9) routines use a blocking malloc, and so * we can't hold a lock while calling them with an * auto-extending buffer. * * 2. There is not currently a LUN reference counting * mechanism, outside of outstanding transactions on * the LUN's OOA queue. So a LUN could go away on us * while we're getting the LUN number, backend-specific * information, etc. Thus, given the way things * currently work, we need to hold the CTL lock while * grabbing LUN information. * * So, from the user's standpoint, the best thing to do is * allocate what he thinks is a reasonable buffer length, * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, * double the buffer length and try again. (And repeat * that until he succeeds.) */ sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); retval = sbuf_printf(sb, "\n", (uintmax_t)lun->lun); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", (lun->backend == NULL) ? "none" : lun->backend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", lun->be_lun->lun_type); if (retval != 0) break; if (lun->backend == NULL) { retval = sbuf_printf(sb, "\n"); if (retval != 0) break; continue; } retval = sbuf_printf(sb, "\t%ju\n", (lun->be_lun->maxlba > 0) ? lun->be_lun->maxlba + 1 : 0); if (retval != 0) break; retval = sbuf_printf(sb, "\t%u\n", lun->be_lun->blocksize); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->serial_num, sizeof(lun->be_lun->serial_num)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->device_id, sizeof(lun->be_lun->device_id)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; if (lun->backend->lun_info != NULL) { retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); if (retval != 0) break; } STAILQ_FOREACH(opt, &lun->be_lun->options, links) { retval = sbuf_printf(sb, "\t<%s>%s\n", opt->name, opt->value, opt->name); if (retval != 0) break; } retval = sbuf_printf(sb, "\n"); if (retval != 0) break; mtx_unlock(&lun->lun_lock); } if (lun != NULL) mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_ISCSI: { struct ctl_iscsi *ci; struct ctl_frontend *fe; ci = (struct ctl_iscsi *)addr; fe = ctl_frontend_find("iscsi"); if (fe == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Frontend \"iscsi\" not found."); break; } retval = fe->ioctl(dev, cmd, addr, flag, td); break; } case CTL_PORT_REQ: { struct ctl_req *req; struct ctl_frontend *fe; req = (struct ctl_req *)addr; fe = ctl_frontend_find(req->driver); if (fe == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Frontend \"%s\" not found.", req->driver); break; } if (req->num_args > 0) { req->kern_args = ctl_copyin_args(req->num_args, req->args, req->error_str, sizeof(req->error_str)); if (req->kern_args == NULL) { req->status = CTL_LUN_ERROR; break; } } if (fe->ioctl) retval = fe->ioctl(dev, cmd, addr, flag, td); else retval = ENODEV; if (req->num_args > 0) { ctl_copyout_args(req->num_args, req->kern_args); ctl_free_args(req->num_args, req->kern_args); } break; } case CTL_PORT_LIST: { struct sbuf *sb; struct ctl_port *port; struct ctl_lun_list *list; struct ctl_option *opt; int j; uint32_t plun; list = (struct ctl_lun_list *)addr; sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { retval = sbuf_printf(sb, "\n", (uintmax_t)port->targ_port); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", port->frontend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->port_type); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", port->port_name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->physical_port); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->virtual_port); if (retval != 0) break; if (port->target_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->target_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->port_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_info != NULL) { retval = port->port_info(port->onoff_arg, sb); if (retval != 0) break; } STAILQ_FOREACH(opt, &port->options, links) { retval = sbuf_printf(sb, "\t<%s>%s\n", opt->name, opt->value, opt->name); if (retval != 0) break; } if (port->lun_map != NULL) { sbuf_printf(sb, "\ton\n"); for (j = 0; j < port->lun_map_size; j++) { plun = ctl_lun_map_from_port(port, j); if (plun == UINT32_MAX) continue; sbuf_printf(sb, "\t%u\n", j, plun); } } for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 || (port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL)) continue; if (port->wwpn_iid[j].name != NULL) retval = sbuf_printf(sb, "\t%s\n", j, port->wwpn_iid[j].name); else retval = sbuf_printf(sb, "\tnaa.%08jx\n", j, port->wwpn_iid[j].wwpn); if (retval != 0) break; } if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; } mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_LUN_MAP: { struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; struct ctl_port *port; mtx_lock(&softc->ctl_lock); if (lm->port < softc->port_min || lm->port >= softc->port_max || (port = softc->ctl_ports[lm->port]) == NULL) { mtx_unlock(&softc->ctl_lock); return (ENXIO); } if (port->status & CTL_PORT_STATUS_ONLINE) { STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_port(lun, lm->port, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps if (lm->plun != UINT32_MAX) { if (lm->lun == UINT32_MAX) retval = ctl_lun_map_unset(port, lm->plun); else if (lm->lun < ctl_max_luns && softc->ctl_luns[lm->lun] != NULL) retval = ctl_lun_map_set(port, lm->plun, lm->lun); else return (ENXIO); } else { if (lm->lun == UINT32_MAX) retval = ctl_lun_map_deinit(port); else retval = ctl_lun_map_init(port); } if (port->status & CTL_PORT_STATUS_ONLINE) ctl_isc_announce_port(port); break; } case CTL_GET_LUN_STATS: { struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; int i; /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ i = 0; stats->status = CTL_SS_OK; stats->fill_len = 0; STAILQ_FOREACH(lun, &softc->lun_list, links) { if (lun->lun < stats->first_item) continue; if (stats->fill_len + sizeof(lun->stats) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; break; } retval = copyout(&lun->stats, &stats->stats[i++], sizeof(lun->stats)); if (retval != 0) break; stats->fill_len += sizeof(lun->stats); } stats->num_items = softc->num_luns; stats->flags = CTL_STATS_FLAG_NONE; #ifdef CTL_TIME_IO stats->flags |= CTL_STATS_FLAG_TIME_VALID; #endif getnanouptime(&stats->timestamp); break; } case CTL_GET_PORT_STATS: { struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; int i; /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ i = 0; stats->status = CTL_SS_OK; stats->fill_len = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port < stats->first_item) continue; if (stats->fill_len + sizeof(port->stats) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; break; } retval = copyout(&port->stats, &stats->stats[i++], sizeof(port->stats)); if (retval != 0) break; stats->fill_len += sizeof(port->stats); } stats->num_items = softc->num_ports; stats->flags = CTL_STATS_FLAG_NONE; #ifdef CTL_TIME_IO stats->flags |= CTL_STATS_FLAG_TIME_VALID; #endif getnanouptime(&stats->timestamp); break; } default: { /* XXX KDM should we fix this? */ #if 0 struct ctl_backend_driver *backend; unsigned int type; int found; found = 0; /* * We encode the backend type as the ioctl type for backend * ioctls. So parse it out here, and then search for a * backend of this type. */ type = _IOC_TYPE(cmd); STAILQ_FOREACH(backend, &softc->be_list, links) { if (backend->type == type) { found = 1; break; } } if (found == 0) { printf("ctl: unknown ioctl command %#lx or backend " "%d\n", cmd, type); retval = EINVAL; break; } retval = backend->ioctl(dev, cmd, addr, flag, td); #endif retval = ENOTTY; break; } } return (retval); } uint32_t ctl_get_initindex(struct ctl_nexus *nexus) { return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); } int ctl_lun_map_init(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; int size = ctl_lun_map_size; uint32_t i; if (port->lun_map == NULL || port->lun_map_size < size) { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = malloc(size * sizeof(uint32_t), M_CTL, M_NOWAIT); } if (port->lun_map == NULL) return (ENOMEM); for (i = 0; i < size; i++) port->lun_map[i] = UINT32_MAX; port->lun_map_size = size; if (port->status & CTL_PORT_STATUS_ONLINE) { if (port->lun_disable != NULL) { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_disable(port->targ_lun_arg, lun->lun); } ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_deinit(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; if (port->lun_map == NULL) return (0); port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = NULL; if (port->status & CTL_PORT_STATUS_ONLINE) { if (port->lun_enable != NULL) { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_enable(port->targ_lun_arg, lun->lun); } ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) { int status; uint32_t old; if (port->lun_map == NULL) { status = ctl_lun_map_init(port); if (status != 0) return (status); } if (plun >= port->lun_map_size) return (EINVAL); old = port->lun_map[plun]; port->lun_map[plun] = glun; if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { if (port->lun_enable != NULL) port->lun_enable(port->targ_lun_arg, plun); ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) { uint32_t old; if (port->lun_map == NULL || plun >= port->lun_map_size) return (0); old = port->lun_map[plun]; port->lun_map[plun] = UINT32_MAX; if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { if (port->lun_disable != NULL) port->lun_disable(port->targ_lun_arg, plun); ctl_isc_announce_port(port); } return (0); } uint32_t ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) { if (port == NULL) return (UINT32_MAX); if (port->lun_map == NULL) return (lun_id); if (lun_id > port->lun_map_size) return (UINT32_MAX); return (port->lun_map[lun_id]); } uint32_t ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) { uint32_t i; if (port == NULL) return (UINT32_MAX); if (port->lun_map == NULL) return (lun_id); for (i = 0; i < port->lun_map_size; i++) { if (port->lun_map[i] == lun_id) return (i); } return (UINT32_MAX); } uint32_t ctl_decode_lun(uint64_t encoded) { uint8_t lun[8]; uint32_t result = 0xffffffff; be64enc(lun, encoded); switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) result = lun[1]; break; case RPL_LUNDATA_ATYP_FLAT: if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) result = ((lun[0] & 0x3f) << 8) + lun[1]; break; case RPL_LUNDATA_ATYP_EXTLUN: switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { case 0x02: switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { case 0x00: result = lun[1]; break; case 0x10: result = (lun[1] << 16) + (lun[2] << 8) + lun[3]; break; case 0x20: if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) result = (lun[2] << 24) + (lun[3] << 16) + (lun[4] << 8) + lun[5]; break; } break; case RPL_LUNDATA_EXT_EAM_NOT_SPEC: result = 0xffffffff; break; } break; } return (result); } uint64_t ctl_encode_lun(uint32_t decoded) { uint64_t l = decoded; if (l <= 0xff) return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); if (l <= 0x3fff) return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); if (l <= 0xffffff) return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | (l << 32)); return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); } int ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) { int i; for (i = first; i < last; i++) { if ((mask[i / 32] & (1 << (i % 32))) == 0) return (i); } return (-1); } int ctl_set_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) != 0) return (-1); else mask[chunk] |= (1 << piece); return (0); } int ctl_clear_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (-1); else mask[chunk] &= ~(1 << piece); return (0); } int ctl_is_set(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (0); else return (1); } static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return (0); return (t[residx % CTL_MAX_INIT_PER_PORT]); } static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return; t[residx % CTL_MAX_INIT_PER_PORT] = 0; } static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *p; u_int i; i = residx/CTL_MAX_INIT_PER_PORT; if (lun->pr_keys[i] != NULL) return; mtx_unlock(&lun->lun_lock); p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, M_WAITOK | M_ZERO); mtx_lock(&lun->lun_lock); if (lun->pr_keys[i] == NULL) lun->pr_keys[i] = p; else free(p, M_CTL); } static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; KASSERT(t != NULL, ("prkey %d is not allocated", residx)); t[residx % CTL_MAX_INIT_PER_PORT] = key; } /* * ctl_softc, pool_name, total_ctl_io are passed in. * npool is passed out. */ int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, uint32_t total_ctl_io, void **npool) { struct ctl_io_pool *pool; pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, M_NOWAIT | M_ZERO); if (pool == NULL) return (ENOMEM); snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); pool->ctl_softc = ctl_softc; #ifdef IO_POOLS pool->zone = uma_zsecond_create(pool->name, NULL, NULL, NULL, NULL, ctl_softc->io_zone); /* uma_prealloc(pool->zone, total_ctl_io); */ #else pool->zone = ctl_softc->io_zone; #endif *npool = pool; return (0); } void ctl_pool_free(struct ctl_io_pool *pool) { if (pool == NULL) return; #ifdef IO_POOLS uma_zdestroy(pool->zone); #endif free(pool, M_CTL); } union ctl_io * ctl_alloc_io(void *pool_ref) { struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; union ctl_io *io; io = uma_zalloc(pool->zone, M_WAITOK); if (io != NULL) { io->io_hdr.pool = pool_ref; CTL_SOFTC(io) = pool->ctl_softc; } return (io); } union ctl_io * ctl_alloc_io_nowait(void *pool_ref) { struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; union ctl_io *io; io = uma_zalloc(pool->zone, M_NOWAIT); if (io != NULL) { io->io_hdr.pool = pool_ref; CTL_SOFTC(io) = pool->ctl_softc; } return (io); } void ctl_free_io(union ctl_io *io) { struct ctl_io_pool *pool; if (io == NULL) return; pool = (struct ctl_io_pool *)io->io_hdr.pool; uma_zfree(pool->zone, io); } void ctl_zero_io(union ctl_io *io) { struct ctl_io_pool *pool; if (io == NULL) return; /* * May need to preserve linked list pointers at some point too. */ pool = io->io_hdr.pool; memset(io, 0, sizeof(*io)); io->io_hdr.pool = pool; CTL_SOFTC(io) = pool->ctl_softc; } int ctl_expand_number(const char *buf, uint64_t *num) { char *endptr; uint64_t number; unsigned shift; number = strtoq(buf, &endptr, 0); switch (tolower((unsigned char)*endptr)) { case 'e': shift = 60; break; case 'p': shift = 50; break; case 't': shift = 40; break; case 'g': shift = 30; break; case 'm': shift = 20; break; case 'k': shift = 10; break; case 'b': case '\0': /* No unit. */ *num = number; return (0); default: /* Unrecognized unit. */ return (-1); } if ((number << shift) >> shift != number) { /* Overflow */ return (-1); } *num = number << shift; return (0); } /* * This routine could be used in the future to load default and/or saved * mode page parameters for a particuar lun. */ static int ctl_init_page_index(struct ctl_lun *lun) { int i, page_code; struct ctl_page_index *page_index; const char *value; uint64_t ival; memcpy(&lun->mode_pages.index, page_index_template, sizeof(page_index_template)); for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; page_code = page_index->page_code & SMPH_PC_MASK; switch (page_code) { case SMS_RW_ERROR_RECOVERY_PAGE: { KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], &rw_er_page_changeable, sizeof(rw_er_page_changeable)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], &rw_er_page_default, sizeof(rw_er_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rw_er_page; break; } case SMS_FORMAT_DEVICE_PAGE: { struct scsi_format_page *format_page; KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); /* * Sectors per track are set above. Bytes per * sector need to be set here on a per-LUN basis. */ memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[ CTL_PAGE_CHANGEABLE], &format_page_changeable, sizeof(format_page_changeable)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], &format_page_default, sizeof(format_page_default)); format_page = &lun->mode_pages.format_page[ CTL_PAGE_CURRENT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_DEFAULT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_SAVED]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); page_index->page_data = (uint8_t *)lun->mode_pages.format_page; break; } case SMS_RIGID_DISK_PAGE: { struct scsi_rigid_disk_page *rigid_disk_page; uint32_t sectors_per_cylinder; uint64_t cylinders; #ifndef __XSCALE__ int shift; #endif /* !__XSCALE__ */ KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); /* * Rotation rate and sectors per track are set * above. We calculate the cylinders here based on * capacity. Due to the number of heads and * sectors per track we're using, smaller arrays * may turn out to have 0 cylinders. Linux and * FreeBSD don't pay attention to these mode pages * to figure out capacity, but Solaris does. It * seems to deal with 0 cylinders just fine, and * works out a fake geometry based on the capacity. */ memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT], &rigid_disk_page_default, sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, sizeof(rigid_disk_page_changeable)); sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * CTL_DEFAULT_HEADS; /* * The divide method here will be more accurate, * probably, but results in floating point being * used in the kernel on i386 (__udivdi3()). On the * XScale, though, __udivdi3() is implemented in * software. * * The shift method for cylinder calculation is * accurate if sectors_per_cylinder is a power of * 2. Otherwise it might be slightly off -- you * might have a bit of a truncation problem. */ #ifdef __XSCALE__ cylinders = (lun->be_lun->maxlba + 1) / sectors_per_cylinder; #else for (shift = 31; shift > 0; shift--) { if (sectors_per_cylinder & (1 << shift)) break; } cylinders = (lun->be_lun->maxlba + 1) >> shift; #endif /* * We've basically got 3 bytes, or 24 bits for the * cylinder size in the mode page. If we're over, * just round down to 2^24. */ if (cylinders > 0xffffff) cylinders = 0xffffff; rigid_disk_page = &lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT]; scsi_ulto3b(cylinders, rigid_disk_page->cylinders); if ((value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) { scsi_ulto2b(strtol(value, NULL, 0), rigid_disk_page->rotation_rate); } memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rigid_disk_page; break; } case SMS_VERIFY_ERROR_RECOVERY_PAGE: { KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], &verify_er_page_default, sizeof(verify_er_page_default)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], &verify_er_page_changeable, sizeof(verify_er_page_changeable)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], &verify_er_page_default, sizeof(verify_er_page_default)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], &verify_er_page_default, sizeof(verify_er_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.verify_er_page; break; } case SMS_CACHING_PAGE: { struct scsi_caching_page *caching_page; KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], &caching_page_default, sizeof(caching_page_default)); memcpy(&lun->mode_pages.caching_page[ CTL_PAGE_CHANGEABLE], &caching_page_changeable, sizeof(caching_page_changeable)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], &caching_page_default, sizeof(caching_page_default)); caching_page = &lun->mode_pages.caching_page[ CTL_PAGE_SAVED]; value = ctl_get_opt(&lun->be_lun->options, "writecache"); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 &= ~SCP_WCE; value = ctl_get_opt(&lun->be_lun->options, "readcache"); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 |= SCP_RCD; memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], &lun->mode_pages.caching_page[CTL_PAGE_SAVED], sizeof(caching_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.caching_page; break; } case SMS_CONTROL_MODE_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: { struct scsi_control_page *control_page; memcpy(&lun->mode_pages.control_page[ CTL_PAGE_DEFAULT], &control_page_default, sizeof(control_page_default)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CHANGEABLE], &control_page_changeable, sizeof(control_page_changeable)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_SAVED], &control_page_default, sizeof(control_page_default)); control_page = &lun->mode_pages.control_page[ CTL_PAGE_SAVED]; value = ctl_get_opt(&lun->be_lun->options, "reordering"); if (value != NULL && strcmp(value, "unrestricted") == 0) { control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; } memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CURRENT], &lun->mode_pages.control_page[ CTL_PAGE_SAVED], sizeof(control_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_page; break; } case 0x01: memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_DEFAULT], &control_ext_page_default, sizeof(control_ext_page_default)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_CHANGEABLE], &control_ext_page_changeable, sizeof(control_ext_page_changeable)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_SAVED], &control_ext_page_default, sizeof(control_ext_page_default)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_CURRENT], &lun->mode_pages.control_ext_page[ CTL_PAGE_SAVED], sizeof(control_ext_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_ext_page; break; default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } case SMS_INFO_EXCEPTIONS_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[ CTL_PAGE_CHANGEABLE], &ie_page_changeable, sizeof(ie_page_changeable)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], &ie_page_default, sizeof(ie_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.ie_page; break; case 0x02: { struct ctl_logical_block_provisioning_page *page; memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], &lbp_page_default, sizeof(lbp_page_default)); memcpy(&lun->mode_pages.lbp_page[ CTL_PAGE_CHANGEABLE], &lbp_page_changeable, sizeof(lbp_page_changeable)); memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], &lbp_page_default, sizeof(lbp_page_default)); page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; value = ctl_get_opt(&lun->be_lun->options, "avail-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[0].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[0].count); } value = ctl_get_opt(&lun->be_lun->options, "used-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[1].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[1].count); } value = ctl_get_opt(&lun->be_lun->options, "pool-avail-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[2].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[2].count); } value = ctl_get_opt(&lun->be_lun->options, "pool-used-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[3].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[3].count); } memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], sizeof(lbp_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.lbp_page; break; } default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } case SMS_CDDVD_CAPS_PAGE:{ KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], &cddvd_page_default, sizeof(cddvd_page_default)); memcpy(&lun->mode_pages.cddvd_page[ CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, sizeof(cddvd_page_changeable)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], &cddvd_page_default, sizeof(cddvd_page_default)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], sizeof(cddvd_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.cddvd_page; break; } default: panic("invalid page code value %#x", page_code); } } return (CTL_RETVAL_COMPLETE); } static int ctl_init_log_page_index(struct ctl_lun *lun) { struct ctl_page_index *page_index; int i, j, k, prev; memcpy(&lun->log_pages.index, log_page_index_template, sizeof(log_page_index_template)); prev = -1; for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && lun->backend->lun_attr == NULL) continue; if (page_index->page_code != prev) { lun->log_pages.pages_page[j] = page_index->page_code; prev = page_index->page_code; j++; } lun->log_pages.subpages_page[k*2] = page_index->page_code; lun->log_pages.subpages_page[k*2+1] = page_index->subpage; k++; } lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; lun->log_pages.index[0].page_len = j; lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; lun->log_pages.index[1].page_len = k * 2; lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.ie_page; lun->log_pages.index[4].page_len = sizeof(lun->log_pages.ie_page); return (CTL_RETVAL_COMPLETE); } static int hex2bin(const char *str, uint8_t *buf, int buf_size) { int i; u_char c; memset(buf, 0, buf_size); while (isspace(str[0])) str++; if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) str += 2; buf_size *= 2; for (i = 0; str[i] != 0 && i < buf_size; i++) { while (str[i] == '-') /* Skip dashes in UUIDs. */ str++; c = str[i]; if (isdigit(c)) c -= '0'; else if (isalpha(c)) c -= isupper(c) ? 'A' - 10 : 'a' - 10; else break; if (c >= 16) break; if ((i & 1) == 0) buf[i / 2] |= (c << 4); else buf[i / 2] |= c; } return ((i + 1) / 2); } /* * LUN allocation. * * Requirements: * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he * wants us to allocate the LUN and he can block. * - ctl_softc is always set * - be_lun is set if the LUN has a backend (needed for disk LUNs) * * Returns 0 for success, non-zero (errno) for failure. */ static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, struct ctl_be_lun *const be_lun) { struct ctl_lun *nlun, *lun; struct scsi_vpd_id_descriptor *desc; struct scsi_vpd_id_t10 *t10id; const char *eui, *naa, *scsiname, *uuid, *vendor, *value; int lun_number, lun_malloced; int devidlen, idlen1, idlen2 = 0, len; if (be_lun == NULL) return (EINVAL); /* * We currently only support Direct Access or Processor LUN types. */ switch (be_lun->lun_type) { case T_DIRECT: case T_PROCESSOR: case T_CDROM: break; case T_SEQUENTIAL: case T_CHANGER: default: be_lun->lun_config_status(be_lun->be_lun, CTL_LUN_CONFIG_FAILURE); break; } if (ctl_lun == NULL) { lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); lun_malloced = 1; } else { lun_malloced = 0; lun = ctl_lun; } memset(lun, 0, sizeof(*lun)); if (lun_malloced) lun->flags = CTL_LUN_MALLOCED; lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); /* Generate LUN ID. */ devidlen = max(CTL_DEVID_MIN_LEN, strnlen(be_lun->device_id, CTL_DEVID_LEN)); idlen1 = sizeof(*t10id) + devidlen; len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; scsiname = ctl_get_opt(&be_lun->options, "scsiname"); if (scsiname != NULL) { idlen2 = roundup2(strlen(scsiname) + 1, 4); len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; } eui = ctl_get_opt(&be_lun->options, "eui"); if (eui != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } naa = ctl_get_opt(&be_lun->options, "naa"); if (naa != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } uuid = ctl_get_opt(&be_lun->options, "uuid"); if (uuid != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 18; } lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; desc->proto_codeset = SVPD_ID_CODESET_ASCII; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; desc->length = idlen1; t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; memset(t10id->vendor, ' ', sizeof(t10id->vendor)); if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); } else { strncpy(t10id->vendor, vendor, min(sizeof(t10id->vendor), strlen(vendor))); } strncpy((char *)t10id->vendor_spec_id, (char *)be_lun->device_id, devidlen); if (scsiname != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen2; strlcpy(desc->identifier, scsiname, idlen2); } if (eui != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; desc->length = hex2bin(eui, desc->identifier, 16); desc->length = desc->length > 12 ? 16 : (desc->length > 8 ? 12 : 8); len -= 16 - desc->length; } if (naa != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_NAA; desc->length = hex2bin(naa, desc->identifier, 16); desc->length = desc->length > 8 ? 16 : 8; len -= 16 - desc->length; } if (uuid != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_UUID; desc->identifier[0] = 0x10; hex2bin(uuid, &desc->identifier[2], 16); desc->length = 18; } lun->lun_devid->len = len; mtx_lock(&ctl_softc->ctl_lock); /* * See if the caller requested a particular LUN number. If so, see * if it is available. Otherwise, allocate the first available LUN. */ if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { if ((be_lun->req_lun_id > (ctl_max_luns - 1)) || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { mtx_unlock(&ctl_softc->ctl_lock); if (be_lun->req_lun_id > (ctl_max_luns - 1)) { printf("ctl: requested LUN ID %d is higher " "than ctl_max_luns - 1 (%d)\n", be_lun->req_lun_id, ctl_max_luns - 1); } else { /* * XXX KDM return an error, or just assign * another LUN ID in this case?? */ printf("ctl: requested LUN ID %d is already " "in use\n", be_lun->req_lun_id); } fail: free(lun->lun_devid, M_CTL); if (lun->flags & CTL_LUN_MALLOCED) free(lun, M_CTL); be_lun->lun_config_status(be_lun->be_lun, CTL_LUN_CONFIG_FAILURE); return (ENOSPC); } lun_number = be_lun->req_lun_id; } else { lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); if (lun_number == -1) { mtx_unlock(&ctl_softc->ctl_lock); printf("ctl: can't allocate LUN, out of LUNs\n"); goto fail; } } ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); mtx_unlock(&ctl_softc->ctl_lock); mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); lun->lun = lun_number; lun->be_lun = be_lun; /* * The processor LUN is always enabled. Disk LUNs come on line * disabled, and must be enabled by the backend. */ lun->flags |= CTL_LUN_DISABLED; lun->backend = be_lun->be; be_lun->ctl_lun = lun; be_lun->lun_id = lun_number; atomic_add_int(&be_lun->be->num_luns, 1); if (be_lun->flags & CTL_LUN_FLAG_EJECTED) lun->flags |= CTL_LUN_EJECTED; if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) lun->flags |= CTL_LUN_NO_MEDIA; if (be_lun->flags & CTL_LUN_FLAG_STOPPED) lun->flags |= CTL_LUN_STOPPED; if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) lun->flags |= CTL_LUN_PRIMARY_SC; value = ctl_get_opt(&be_lun->options, "removable"); if (value != NULL) { if (strcmp(value, "on") == 0) lun->flags |= CTL_LUN_REMOVABLE; } else if (be_lun->lun_type == T_CDROM) lun->flags |= CTL_LUN_REMOVABLE; lun->ctl_softc = ctl_softc; #ifdef CTL_TIME_IO lun->last_busy = getsbinuptime(); #endif TAILQ_INIT(&lun->ooa_queue); TAILQ_INIT(&lun->blocked_queue); STAILQ_INIT(&lun->error_list); lun->ie_reported = 1; callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); ctl_tpc_lun_init(lun); if (lun->flags & CTL_LUN_REMOVABLE) { lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, M_CTL, M_WAITOK); } /* * Initialize the mode and log page index. */ ctl_init_page_index(lun); ctl_init_log_page_index(lun); /* Setup statistics gathering */ #ifdef CTL_LEGACY_STATS lun->legacy_stats.device_type = be_lun->lun_type; lun->legacy_stats.lun_number = lun_number; lun->legacy_stats.blocksize = be_lun->blocksize; if (be_lun->blocksize == 0) lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; lun->legacy_stats.ports = malloc(sizeof(struct ctl_lun_io_port_stats) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); for (len = 0; len < ctl_max_ports; len++) lun->legacy_stats.ports[len].targ_port = len; #endif /* CTL_LEGACY_STATS */ lun->stats.item = lun_number; /* * Now, before we insert this lun on the lun list, set the lun * inventory changed UA for all other luns. */ mtx_lock(&ctl_softc->ctl_lock); STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); ctl_softc->ctl_luns[lun_number] = lun; ctl_softc->num_luns++; mtx_unlock(&ctl_softc->ctl_lock); lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); return (0); } /* * Delete a LUN. * Assumptions: * - LUN has already been marked invalid and any pending I/O has been taken * care of. */ static int ctl_free_lun(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; struct ctl_lun *nlun; int i; KASSERT(TAILQ_EMPTY(&lun->ooa_queue), ("Freeing a LUN %p with outstanding I/O!\n", lun)); mtx_lock(&softc->ctl_lock); STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); ctl_clear_mask(softc->ctl_lun_mask, lun->lun); softc->ctl_luns[lun->lun] = NULL; softc->num_luns--; STAILQ_FOREACH(nlun, &softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } mtx_unlock(&softc->ctl_lock); /* * Tell the backend to free resources, if this LUN has a backend. */ atomic_subtract_int(&lun->be_lun->be->num_luns, 1); lun->be_lun->lun_shutdown(lun->be_lun->be_lun); lun->ie_reportcnt = UINT32_MAX; callout_drain(&lun->ie_callout); ctl_tpc_lun_shutdown(lun); mtx_destroy(&lun->lun_lock); free(lun->lun_devid, M_CTL); for (i = 0; i < ctl_max_ports; i++) free(lun->pending_ua[i], M_CTL); free(lun->pending_ua, M_DEVBUF); for (i = 0; i < ctl_max_ports; i++) free(lun->pr_keys[i], M_CTL); free(lun->pr_keys, M_DEVBUF); free(lun->write_buffer, M_CTL); free(lun->prevent, M_CTL); if (lun->flags & CTL_LUN_MALLOCED) free(lun, M_CTL); return (0); } static void ctl_create_lun(struct ctl_be_lun *be_lun) { /* * ctl_alloc_lun() should handle all potential failure cases. */ ctl_alloc_lun(control_softc, NULL, be_lun); } int ctl_add_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc = control_softc; mtx_lock(&softc->ctl_lock); STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); mtx_unlock(&softc->ctl_lock); wakeup(&softc->pending_lun_queue); return (0); } int ctl_enable_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc; struct ctl_port *port, *nport; struct ctl_lun *lun; int retval; lun = (struct ctl_lun *)be_lun->ctl_lun; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_DISABLED) == 0) { /* * eh? Why did we get called if the LUN is already * enabled? */ mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); return (0); } lun->flags &= ~CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || port->lun_map != NULL || port->lun_enable == NULL) continue; /* * Drop the lock while we call the FETD's enable routine. * This can lead to a callback into CTL (at least in the * case of the internal initiator frontend. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_enable(port->targ_lun_arg, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_enable on lun %jd\n", __func__, port->port_name, port->targ_port, retval, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_disable_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc; struct ctl_port *port; struct ctl_lun *lun; int retval; lun = (struct ctl_lun *)be_lun->ctl_lun; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); return (0); } lun->flags |= CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || port->lun_map != NULL || port->lun_disable == NULL) continue; /* * Drop the lock before we call the frontend's disable * routine, to avoid lock order reversals. * * XXX KDM what happens if the frontend list changes while * we're traversing it? It's unlikely, but should be handled. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_disable(port->targ_lun_arg, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_disable on lun %jd\n", __func__, port->port_name, port->targ_port, retval, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_start_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_stop_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_no_media(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_NO_MEDIA; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_has_media(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; union ctl_ha_msg msg; mtx_lock(&lun->lun_lock); lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); if (lun->flags & CTL_LUN_REMOVABLE) ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); mtx_unlock(&lun->lun_lock); if ((lun->flags & CTL_LUN_REMOVABLE) && lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = 1; msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); } return (0); } int ctl_lun_ejected(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_EJECTED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_primary(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_PRIMARY_SC; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_lun_secondary(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PRIMARY_SC; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_invalidate_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc; struct ctl_lun *lun; lun = (struct ctl_lun *)be_lun->ctl_lun; softc = lun->ctl_softc; mtx_lock(&lun->lun_lock); /* * The LUN needs to be disabled before it can be marked invalid. */ if ((lun->flags & CTL_LUN_DISABLED) == 0) { mtx_unlock(&lun->lun_lock); return (-1); } /* * Mark the LUN invalid. */ lun->flags |= CTL_LUN_INVALID; /* * If there is nothing in the OOA queue, go ahead and free the LUN. * If we have something in the OOA queue, we'll free it when the * last I/O completes. */ if (TAILQ_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); ctl_free_lun(lun); } else mtx_unlock(&lun->lun_lock); return (0); } void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; union ctl_ha_msg msg; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); mtx_unlock(&lun->lun_lock); if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { /* Send msg to other side. */ bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = 1; msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); } } /* * Backend "memory move is complete" callback for requests that never * make it down to say RAIDCore's configuration code. */ int ctl_config_move_done(union ctl_io *io) { int retval; CTL_DEBUG_PRINT(("ctl_config_move_done\n")); KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); if ((io->io_hdr.port_status != 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ io->io_hdr.port_status); } else if (io->scsiio.kern_data_resid != 0 && (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_invalid_field_ciu(&io->scsiio); } if (ctl_debug & CTL_DEBUG_CDB_DATA) ctl_data_print(io); if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { /* * XXX KDM just assuming a single pointer here, and not a * S/G list. If we start using S/G lists for config data, * we'll need to know how to clean them up here as well. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) free(io->scsiio.kern_data_ptr, M_CTL); ctl_done(io); retval = CTL_RETVAL_COMPLETE; } else { /* * XXX KDM now we need to continue data movement. Some * options: * - call ctl_scsiio() again? We don't do this for data * writes, because for those at least we know ahead of * time where the write will go and how long it is. For * config writes, though, that information is largely * contained within the write itself, thus we need to * parse out the data again. * * - Call some other function once the data is in? */ /* * XXX KDM call ctl_scsiio() again for now, and check flag * bits to see whether we're allocated or not. */ retval = ctl_scsiio(&io->scsiio); } return (retval); } /* * This gets called by a backend driver when it is done with a * data_submit method. */ void ctl_data_submit_done(union ctl_io *io) { /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } ctl_done(io); } /* * This gets called by a backend driver when it is done with a * configuration write. */ void ctl_config_write_done(union ctl_io *io) { uint8_t *buf; /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } /* * Since a configuration write can be done for commands that actually * have data allocated, like write buffer, and commands that have * no data, like start/stop unit, we need to check here. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); } void ctl_config_read_done(union ctl_io *io) { uint8_t *buf; /* * If there is some error -- we are done, skip data transfer. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); return; } /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. */ if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { io->scsiio.io_cont(io); return; } ctl_datamove(io); } /* * SCSI release command. */ int ctl_scsi_release(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint32_t residx; CTL_DEBUG_PRINT(("ctl_scsi_release\n")); residx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ mtx_lock(&lun->lun_lock); /* * According to SPC, it is not an error for an intiator to attempt * to release a reservation on a LUN that isn't reserved, or that * is reserved by another initiator. The reservation can only be * released, though, by the initiator who made it or by one of * several reset type events. */ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) lun->flags &= ~CTL_LUN_RESERVED; mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_scsi_reserve(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint32_t residx; CTL_DEBUG_PRINT(("ctl_reserve\n")); residx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { ctl_set_reservation_conflict(ctsio); goto bailout; } /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ if (lun->flags & CTL_LUN_PR_RESERVED) { ctl_set_success(ctsio); goto bailout; } lun->flags |= CTL_LUN_RESERVED; lun->res_idx = residx; ctl_set_success(ctsio); bailout: mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_start_stop(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_start_stop_unit *cdb; int retval; CTL_DEBUG_PRINT(("ctl_start_stop\n")); cdb = (struct scsi_start_stop_unit *)ctsio->cdb; if ((cdb->how & SSS_PC_MASK) == 0) { if ((lun->flags & CTL_LUN_PR_RESERVED) && (cdb->how & SSS_START) == 0) { uint32_t residx; residx = ctl_get_initindex(&ctsio->io_hdr.nexus); if (ctl_get_prkey(lun, residx) == 0 || (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } if ((cdb->how & SSS_LOEJ) && (lun->flags & CTL_LUN_REMOVABLE) == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 4, /*bit_valid*/ 1, /*bit*/ 1); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && lun->prevent_count > 0) { /* "Medium removal prevented" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_prevent_allow(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_prevent *cdb; int retval; uint32_t initidx; CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); cdb = (struct scsi_prevent *)ctsio->cdb; if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&lun->lun_lock); if ((cdb->how & PR_PREVENT) && ctl_is_set(lun->prevent, initidx) == 0) { ctl_set_mask(lun->prevent, initidx); lun->prevent_count++; } else if ((cdb->how & PR_PREVENT) == 0 && ctl_is_set(lun->prevent, initidx)) { ctl_clear_mask(lun->prevent, initidx); lun->prevent_count--; } mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } /* * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but * we don't really do anything with the LBA and length fields if the user * passes them in. Instead we'll just flush out the cache for the entire * LUN. */ int ctl_sync_cache(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t starting_lba; uint32_t block_count; int retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_sync_cache\n")); retval = 0; switch (ctsio->cdb[0]) { case SYNCHRONIZE_CACHE: { struct scsi_sync_cache *cdb; cdb = (struct scsi_sync_cache *)ctsio->cdb; starting_lba = scsi_4btoul(cdb->begin_lba); block_count = scsi_2btoul(cdb->lb_count); byte2 = cdb->byte2; break; } case SYNCHRONIZE_CACHE_16: { struct scsi_sync_cache_16 *cdb; cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; starting_lba = scsi_8btou64(cdb->begin_lba); block_count = scsi_4btoul(cdb->lb_count); byte2 = cdb->byte2; break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); goto bailout; break; /* NOTREACHED */ } /* * We check the LBA and length, but don't do anything with them. * A SYNCHRONIZE CACHE will cause the entire cache for this lun to * get flushed. This check will just help satisfy anyone who wants * to see an error for an out of range LBA. */ if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { ctl_set_lba_out_of_range(ctsio, MAX(starting_lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); goto bailout; } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = starting_lba; lbalen->len = block_count; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); bailout: return (retval); } int ctl_format(struct ctl_scsiio *ctsio) { struct scsi_format *cdb; int length, defect_list_len; CTL_DEBUG_PRINT(("ctl_format\n")); cdb = (struct scsi_format *)ctsio->cdb; length = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) length = sizeof(struct scsi_format_header_long); else length = sizeof(struct scsi_format_header_short); } if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) && (length > 0)) { ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); ctsio->kern_data_len = length; ctsio->kern_total_len = length; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } defect_list_len = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) { struct scsi_format_header_long *header; header = (struct scsi_format_header_long *) ctsio->kern_data_ptr; defect_list_len = scsi_4btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } else { struct scsi_format_header_short *header; header = (struct scsi_format_header_short *) ctsio->kern_data_ptr; defect_list_len = scsi_2btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } } ctl_set_success(ctsio); bailout: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_buffer(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint64_t buffer_offset; uint32_t len; uint8_t byte2; static uint8_t descr[4]; static uint8_t echo_descr[4] = { 0 }; CTL_DEBUG_PRINT(("ctl_read_buffer\n")); switch (ctsio->cdb[0]) { case READ_BUFFER: { struct scsi_read_buffer *cdb; cdb = (struct scsi_read_buffer *)ctsio->cdb; buffer_offset = scsi_3btoul(cdb->offset); len = scsi_3btoul(cdb->length); byte2 = cdb->byte2; break; } case READ_BUFFER_16: { struct scsi_read_buffer_16 *cdb; cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; buffer_offset = scsi_8btou64(cdb->offset); len = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* This shouldn't happen. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (buffer_offset > CTL_WRITE_BUFFER_SIZE || buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { descr[0] = 0; scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); ctsio->kern_data_ptr = descr; len = min(len, sizeof(descr)); } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { ctsio->kern_data_ptr = echo_descr; len = min(len, sizeof(echo_descr)); } else { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; } ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctl_set_success(ctsio); ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_write_buffer(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_write_buffer *cdb; int buffer_offset, len; CTL_DEBUG_PRINT(("ctl_write_buffer\n")); cdb = (struct scsi_write_buffer *)ctsio->cdb; len = scsi_3btoul(cdb->length); buffer_offset = scsi_3btoul(cdb->offset); if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_write_same(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int len, retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_write_same\n")); switch (ctsio->cdb[0]) { case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); byte2 = cdb->byte2; break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* ANCHOR flag can be used only together with UNMAP */ if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Zero number of blocks means "to the last logical block" */ if (num_blocks == 0) { if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 1, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } num_blocks = (lun->be_lun->maxlba + 1) - lba; } len = lun->be_lun->blocksize; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((byte2 & SWS_NDOB) == 0 && (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_unmap(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_unmap *cdb; struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_header *hdr; struct scsi_unmap_desc *buf, *end, *endnz, *range; uint64_t lba; uint32_t num_blocks; int len, retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_unmap\n")); cdb = (struct scsi_unmap *)ctsio->cdb; len = scsi_2btoul(cdb->length); byte2 = cdb->byte2; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } len = ctsio->kern_total_len - ctsio->kern_data_resid; hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; if (len < sizeof (*hdr) || len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); goto done; } len = scsi_2btoul(hdr->desc_length); buf = (struct scsi_unmap_desc *)(hdr + 1); end = buf + len / sizeof(*buf); endnz = buf; for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); num_blocks = scsi_4btoul(range->length); if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (num_blocks != 0) endnz = range + 1; } /* * Block backend can not handle zero last range. * Filter it out and return if there is nothing left. */ len = (uint8_t *)endnz - (uint8_t *)buf; if (len == 0) { ctl_set_success(ctsio); goto done; } mtx_lock(&lun->lun_lock); ptrlen = (struct ctl_ptr_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; ptrlen->ptr = (void *)buf; ptrlen->len = len; ptrlen->flags = byte2; ctl_check_blocked(lun); mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); done: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_default_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct ctl_lun *lun = CTL_LUN(ctsio); uint8_t *current_cp; int set_ua; uint32_t initidx; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); set_ua = 0; current_cp = (page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT)); mtx_lock(&lun->lun_lock); if (memcmp(current_cp, page_ptr, page_index->page_len)) { memcpy(current_cp, page_ptr, page_index->page_len); set_ua = 1; } if (set_ua != 0) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); if (set_ua) { ctl_isc_announce_mode(lun, ctl_get_initindex(&ctsio->io_hdr.nexus), page_index->page_code, page_index->subpage); } return (CTL_RETVAL_COMPLETE); } static void ctl_ie_timer(void *arg) { struct ctl_lun *lun = arg; uint64_t t; if (lun->ie_asc == 0) return; if (lun->MODE_IE.mrie == SIEP_MRIE_UA) ctl_est_ua_all(lun, -1, CTL_UA_IE); else lun->ie_reported = 0; if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { lun->ie_reportcnt++; t = scsi_4btoul(lun->MODE_IE.interval_timer); if (t == 0 || t == UINT32_MAX) t = 3000; /* 5 min */ callout_schedule(&lun->ie_callout, t * hz / 10); } } int ctl_ie_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_info_exceptions_page *pg; uint64_t t; (void)ctl_default_page_handler(ctsio, page_index, page_ptr); pg = (struct scsi_info_exceptions_page *)page_ptr; mtx_lock(&lun->lun_lock); if (pg->info_flags & SIEP_FLAGS_TEST) { lun->ie_asc = 0x5d; lun->ie_ascq = 0xff; if (pg->mrie == SIEP_MRIE_UA) { ctl_est_ua_all(lun, -1, CTL_UA_IE); lun->ie_reported = 1; } else { ctl_clr_ua_all(lun, -1, CTL_UA_IE); lun->ie_reported = -1; } lun->ie_reportcnt = 1; if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { lun->ie_reportcnt++; t = scsi_4btoul(pg->interval_timer); if (t == 0 || t == UINT32_MAX) t = 3000; /* 5 min */ callout_reset(&lun->ie_callout, t * hz / 10, ctl_ie_timer, lun); } } else { lun->ie_asc = 0; lun->ie_ascq = 0; lun->ie_reported = 1; ctl_clr_ua_all(lun, -1, CTL_UA_IE); lun->ie_reportcnt = UINT32_MAX; callout_stop(&lun->ie_callout); } mtx_unlock(&lun->lun_lock); return (CTL_RETVAL_COMPLETE); } static int ctl_do_mode_select(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct scsi_mode_page_header *page_header; struct ctl_page_index *page_index; struct ctl_scsiio *ctsio; int page_len, page_len_offset, page_len_size; union ctl_modepage_info *modepage_info; uint16_t *len_left, *len_used; int retval, i; ctsio = &io->scsiio; page_index = NULL; page_len = 0; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; len_left = &modepage_info->header.len_left; len_used = &modepage_info->header.len_used; do_next_page: page_header = (struct scsi_mode_page_header *) (ctsio->kern_data_ptr + *len_used); if (*len_left == 0) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (*len_left < sizeof(struct scsi_mode_page_header)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if ((page_header->page_code & SMPH_SPF) && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * XXX KDM should we do something with the block descriptor? */ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; if ((page_index->page_code & SMPH_PC_MASK) != (page_header->page_code & SMPH_PC_MASK)) continue; /* * If neither page has a subpage code, then we've got a * match. */ if (((page_index->page_code & SMPH_SPF) == 0) && ((page_header->page_code & SMPH_SPF) == 0)) { page_len = page_header->page_length; break; } /* * If both pages have subpages, then the subpage numbers * have to match. */ if ((page_index->page_code & SMPH_SPF) && (page_header->page_code & SMPH_SPF)) { struct scsi_mode_page_header_sp *sph; sph = (struct scsi_mode_page_header_sp *)page_header; if (page_index->subpage == sph->subpage) { page_len = scsi_2btoul(sph->page_length); break; } } } /* * If we couldn't find the page, or if we don't have a mode select * handler for it, send back an error to the user. */ if ((i >= CTL_NUM_MODE_PAGES) || (page_index->select_handler == NULL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (page_index->page_code & SMPH_SPF) { page_len_offset = 2; page_len_size = 2; } else { page_len_size = 1; page_len_offset = 1; } /* * If the length the initiator gives us isn't the one we specify in * the mode page header, or if they didn't specify enough data in * the CDB to avoid truncating this page, kick out the request. */ if (page_len != page_index->page_len - page_len_offset - page_len_size) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + page_len_offset, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (*len_left < page_index->page_len) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Run through the mode page, checking to make sure that the bits * the user changed are actually legal for him to change. */ for (i = 0; i < page_index->page_len; i++) { uint8_t *user_byte, *change_mask, *current_byte; int bad_bit; int j; user_byte = (uint8_t *)page_header + i; change_mask = page_index->page_data + (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; current_byte = page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT) + i; /* * Check to see whether the user set any bits in this byte * that he is not allowed to set. */ if ((*user_byte & ~(*change_mask)) == (*current_byte & ~(*change_mask))) continue; /* * Go through bit by bit to determine which one is illegal. */ bad_bit = 0; for (j = 7; j >= 0; j--) { if ((((1 << i) & ~(*change_mask)) & *user_byte) != (((1 << i) & ~(*change_mask)) & *current_byte)) { bad_bit = i; break; } } ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + i, /*bit_valid*/ 1, /*bit*/ bad_bit); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Decrement these before we call the page handler, since we may * end up getting called back one way or another before the handler * returns to this context. */ *len_left -= page_index->page_len; *len_used += page_index->page_len; retval = page_index->select_handler(ctsio, page_index, (uint8_t *)page_header); /* * If the page handler returns CTL_RETVAL_QUEUED, then we need to * wait until this queued command completes to finish processing * the mode page. If it returns anything other than * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have * already set the sense information, freed the data pointer, and * completed the io for us. */ if (retval != CTL_RETVAL_COMPLETE) goto bailout_no_done; /* * If the initiator sent us more than one page, parse the next one. */ if (*len_left > 0) goto do_next_page; ctl_set_success(ctsio); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); bailout_no_done: return (CTL_RETVAL_COMPLETE); } int ctl_mode_select(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); union ctl_modepage_info *modepage_info; int bd_len, i, header_size, param_len, pf, rtd, sp; uint32_t initidx; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_select_6 *cdb; cdb = (struct scsi_mode_select_6 *)ctsio->cdb; pf = (cdb->byte2 & SMS_PF) ? 1 : 0; rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; sp = (cdb->byte2 & SMS_SP) ? 1 : 0; param_len = cdb->length; header_size = sizeof(struct scsi_mode_header_6); break; } case MODE_SELECT_10: { struct scsi_mode_select_10 *cdb; cdb = (struct scsi_mode_select_10 *)ctsio->cdb; pf = (cdb->byte2 & SMS_PF) ? 1 : 0; rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; sp = (cdb->byte2 & SMS_SP) ? 1 : 0; param_len = scsi_2btoul(cdb->length); header_size = sizeof(struct scsi_mode_header_10); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (rtd) { if (param_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 1, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Revert to defaults. */ ctl_init_page_index(lun); mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { ctl_isc_announce_mode(lun, -1, lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].subpage); } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * From SPC-3: * "A parameter list length of zero indicates that the Data-Out Buffer * shall be empty. This condition shall not be considered as an error." */ if (param_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Since we'll hit this the first time through, prior to * allocation, we don't need to free a data buffer here. */ if (param_len < header_size) { ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Allocate the data buffer and grab the user's data. In theory, * we shouldn't have to sanity check the parameter list length here * because the maximum size is 64K. We should be able to malloc * that much without too many problems. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_header_6 *mh6; mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; bd_len = mh6->blk_desc_len; break; } case MODE_SELECT_10: { struct scsi_mode_header_10 *mh10; mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; bd_len = scsi_2btoul(mh10->blk_desc_len); break; } default: panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); } if (param_len < (header_size + bd_len)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_config_write_done(), it'll get passed back to * ctl_do_mode_select() for further processing, or completion if * we're all done. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_do_mode_select; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; memset(modepage_info, 0, sizeof(*modepage_info)); modepage_info->header.len_left = param_len - header_size - bd_len; modepage_info->header.len_used = header_size + bd_len; return (ctl_do_mode_select((union ctl_io *)ctsio)); } int ctl_mode_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); int pc, page_code, dbd, llba, subpage; int alloc_len, page_len, header_len, total_len; struct scsi_mode_block_descr *block_desc; struct ctl_page_index *page_index; dbd = 0; llba = 0; block_desc = NULL; CTL_DEBUG_PRINT(("ctl_mode_sense\n")); switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_sense_6 *cdb; cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_6); if (cdb->byte2 & SMS_DBD) dbd = 1; else header_len += sizeof(struct scsi_mode_block_descr); pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = cdb->length; break; } case MODE_SENSE_10: { struct scsi_mode_sense_10 *cdb; cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_10); if (cdb->byte2 & SMS_DBD) dbd = 1; else header_len += sizeof(struct scsi_mode_block_descr); if (cdb->byte2 & SMS10_LLBAA) llba = 1; pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * We have to make a first pass through to calculate the size of * the pages that match the user's query. Then we allocate enough * memory to hold it, and actually copy the data into the buffer. */ switch (page_code) { case SMS_ALL_PAGES_PAGE: { u_int i; page_len = 0; /* * At the moment, values other than 0 and 0xff here are * reserved according to SPC-3. */ if ((subpage != SMS_SUBPAGE_PAGE_0) && (subpage != SMS_SUBPAGE_ALL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 3, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * We don't use this subpage if the user didn't * request all subpages. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; #if 0 printf("found page %#x len %d\n", page_index->page_code & SMPH_PC_MASK, page_index->page_len); #endif page_len += page_index->page_len; } break; } default: { u_int i; page_len = 0; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; #if 0 printf("found page %#x len %d\n", page_index->page_code & SMPH_PC_MASK, page_index->page_len); #endif page_len += page_index->page_len; } if (page_len == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } } total_len = header_len + page_len; #if 0 printf("header_len = %d, page_len = %d, total_len = %d\n", header_len, page_len, total_len); #endif ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_hdr_6 *header; header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; header->datalen = MIN(total_len - 1, 254); if (lun->be_lun->lun_type == T_DIRECT) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } if (dbd) header->block_descr_len = 0; else header->block_descr_len = sizeof(struct scsi_mode_block_descr); block_desc = (struct scsi_mode_block_descr *)&header[1]; break; } case MODE_SENSE_10: { struct scsi_mode_hdr_10 *header; int datalen; header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; datalen = MIN(total_len - 2, 65533); scsi_ulto2b(datalen, header->datalen); if (lun->be_lun->lun_type == T_DIRECT) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } if (dbd) scsi_ulto2b(0, header->block_descr_len); else scsi_ulto2b(sizeof(struct scsi_mode_block_descr), header->block_descr_len); block_desc = (struct scsi_mode_block_descr *)&header[1]; break; } default: panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); } /* * If we've got a disk, use its blocksize in the block * descriptor. Otherwise, just set it to 0. */ if (dbd == 0) { if (lun->be_lun->lun_type == T_DIRECT) scsi_ulto3b(lun->be_lun->blocksize, block_desc->block_len); else scsi_ulto3b(0, block_desc->block_len); } switch (page_code) { case SMS_ALL_PAGES_PAGE: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * We don't use this subpage if the user didn't * request all subpages. We already checked (above) * to make sure the user only specified a subpage * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } default: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_param_header *phdr; uint8_t *data; uint64_t val; data = page_index->page_data; if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0001, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0002, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x01; /* per-LUN */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f1, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f2, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } page_index->page_len = data - page_index->page_data; return (0); } int ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct stat_page *data; struct bintime *t; data = (struct stat_page *)page_index->page_data; scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); data->sap.hdr.param_control = SLP_LBIN; data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - sizeof(struct scsi_log_param_header); scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], data->sap.read_num); scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], data->sap.write_num); if (lun->be_lun->blocksize > 0) { scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / lun->be_lun->blocksize, data->sap.recvieved_lba); scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / lun->be_lun->blocksize, data->sap.transmitted_lba); } t = &lun->stats.time[CTL_STATS_READ]; scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), data->sap.read_int); t = &lun->stats.time[CTL_STATS_WRITE]; scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), data->sap.write_int); scsi_u64to8b(0, data->sap.weighted_num); scsi_u64to8b(0, data->sap.weighted_int); scsi_ulto2b(SLP_IT, data->it.hdr.param_code); data->it.hdr.param_control = SLP_LBIN; data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - sizeof(struct scsi_log_param_header); #ifdef CTL_TIME_IO scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); #endif scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); data->it.hdr.param_control = SLP_LBIN; data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - sizeof(struct scsi_log_param_header); scsi_ulto4b(3, data->ti.exponent); scsi_ulto4b(1, data->ti.integer); return (0); } int ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_informational_exceptions *data; data = (struct scsi_log_informational_exceptions *)page_index->page_data; scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); data->hdr.param_control = SLP_LBIN; data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - sizeof(struct scsi_log_param_header); data->ie_asc = lun->ie_asc; data->ie_ascq = lun->ie_ascq; data->temperature = 0xff; return (0); } int ctl_log_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); int i, pc, page_code, subpage; int alloc_len, total_len; struct ctl_page_index *page_index; struct scsi_log_sense *cdb; struct scsi_log_header *header; CTL_DEBUG_PRINT(("ctl_log_sense\n")); cdb = (struct scsi_log_sense *)ctsio->cdb; pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SLS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); page_index = NULL; for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SL_PAGE_CODE) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if (page_index->subpage != subpage) continue; break; } if (i >= CTL_NUM_LOG_PAGES) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_log_header) + page_index->page_len; ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; header = (struct scsi_log_header *)ctsio->kern_data_ptr; header->page = page_index->page_code; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) header->page |= SL_DS; if (page_index->subpage) { header->page |= SL_SPF; header->subpage = page_index->subpage; } scsi_ulto2b(page_index->page_len, header->datalen); /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index, pc); memcpy(header + 1, page_index->page_data, page_index->page_len); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_capacity *cdb; struct scsi_read_capacity_data *data; uint32_t lba; CTL_DEBUG_PRINT(("ctl_read_capacity\n")); cdb = (struct scsi_read_capacity *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); if (((cdb->pmi & SRC_PMI) == 0) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; ctsio->kern_data_len = sizeof(*data); ctsio->kern_total_len = sizeof(*data); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * If the maximum LBA is greater than 0xfffffffe, the user must * issue a SERVICE ACTION IN (16) command, with the read capacity * serivce action set. */ if (lun->be_lun->maxlba > 0xfffffffe) scsi_ulto4b(0xffffffff, data->addr); else scsi_ulto4b(lun->be_lun->maxlba, data->addr); /* * XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity_16(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_capacity_16 *cdb; struct scsi_read_capacity_data_long *data; uint64_t lba; uint32_t alloc_len; CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; alloc_len = scsi_4btoul(cdb->alloc_len); lba = scsi_8btou64(cdb->addr); if ((cdb->reladr & SRC16_PMI) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sizeof(*data), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; scsi_u64to8b(lun->be_lun->maxlba, data->addr); /* XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_lba_status(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_get_lba_status *cdb; struct scsi_get_lba_status_data *data; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t alloc_len, total_len; int retval; CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); cdb = (struct scsi_get_lba_status *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); alloc_len = scsi_4btoul(cdb->alloc_len); if (lba > lun->be_lun->maxlba) { ctl_set_lba_out_of_range(ctsio, lba); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(*data) + sizeof(data->descr[0]); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* Fill dummy data in case backend can't tell anything. */ scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); scsi_u64to8b(lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), data->descr[0].length); data->descr[0].status = 0; /* Mapped or unknown. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = total_len; lbalen->flags = 0; retval = lun->backend->config_read((union ctl_io *)ctsio); return (retval); } int ctl_read_defect(struct ctl_scsiio *ctsio) { struct scsi_read_defect_data_10 *ccb10; struct scsi_read_defect_data_12 *ccb12; struct scsi_read_defect_data_hdr_10 *data10; struct scsi_read_defect_data_hdr_12 *data12; uint32_t alloc_len, data_len; uint8_t format; CTL_DEBUG_PRINT(("ctl_read_defect\n")); if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; format = ccb10->format; alloc_len = scsi_2btoul(ccb10->alloc_length); data_len = sizeof(*data10); } else { ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; format = ccb12->format; alloc_len = scsi_4btoul(ccb12->alloc_length); data_len = sizeof(*data12); } if (alloc_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { data10 = (struct scsi_read_defect_data_hdr_10 *) ctsio->kern_data_ptr; data10->format = format; scsi_ulto2b(0, data10->length); } else { data12 = (struct scsi_read_defect_data_hdr_12 *) ctsio->kern_data_ptr; data12->format = format; scsi_ulto2b(0, data12->generation); scsi_ulto4b(0, data12->length); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_maintenance_in *cdb; int retval; int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; int num_ha_groups, num_target_ports, shared_group; struct ctl_port *port; struct scsi_target_group_data *rtg_ptr; struct scsi_target_group_data_extended *rtg_ext_ptr; struct scsi_target_port_group_descriptor *tpg_desc; CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); cdb = (struct scsi_maintenance_in *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; switch (cdb->byte2 & STG_PDF_MASK) { case STG_PDF_LENGTH: ext = 0; break; case STG_PDF_EXTENDED: ext = 1; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return(retval); } num_target_ports = 0; shared_group = (softc->is_single != 0); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; num_target_ports++; if (port->status & CTL_PORT_STATUS_HA_SHARED) shared_group = 1; } mtx_unlock(&softc->ctl_lock); num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; if (ext) total_len = sizeof(struct scsi_target_group_data_extended); else total_len = sizeof(struct scsi_target_group_data); total_len += sizeof(struct scsi_target_port_group_descriptor) * (shared_group + num_ha_groups) + sizeof(struct scsi_target_port_descriptor) * num_target_ports; alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (ext) { rtg_ext_ptr = (struct scsi_target_group_data_extended *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); rtg_ext_ptr->format_type = 0x10; rtg_ext_ptr->implicit_transition_time = 0; tpg_desc = &rtg_ext_ptr->groups[0]; } else { rtg_ptr = (struct scsi_target_group_data *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ptr->length); tpg_desc = &rtg_ptr->groups[0]; } mtx_lock(&softc->ctl_lock); pg = softc->port_min / softc->port_cnt; if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { /* Some shelf is known to be primary. */ if (softc->ha_link == CTL_HA_LINK_OFFLINE) os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) os = TPG_ASYMMETRIC_ACCESS_STANDBY; else os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; if (lun->flags & CTL_LUN_PRIMARY_SC) { ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else { ts = os; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } } else { /* No known primary shelf. */ if (softc->ha_link == CTL_HA_LINK_OFFLINE) { ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else { ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; } } if (shared_group) { tpg_desc->pref_state = ts; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | TPG_U_SUP | TPG_T_SUP; scsi_ulto2b(1, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (!softc->is_single && (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } for (g = 0; g < num_ha_groups; g++) { tpg_desc->pref_state = (g == pg) ? ts : os; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | TPG_U_SUP | TPG_T_SUP; scsi_ulto2b(2 + g, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port < g * softc->port_cnt || port->targ_port >= (g + 1) * softc->port_cnt) continue; if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (port->status & CTL_PORT_STATUS_HA_SHARED) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_report_supported_opcodes *cdb; const struct ctl_cmd_entry *entry, *sentry; struct scsi_report_supported_opcodes_all *all; struct scsi_report_supported_opcodes_descr *descr; struct scsi_report_supported_opcodes_one *one; int retval; int alloc_len, total_len; int opcode, service_action, i, j, num; CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; opcode = cdb->requested_opcode; service_action = scsi_2btoul(cdb->requested_service_action); switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) num++; } } else { if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) num++; } } total_len = sizeof(struct scsi_report_supported_opcodes_all) + num * sizeof(struct scsi_report_supported_opcodes_descr); break; case RSO_OPTIONS_OC: if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; case RSO_OPTIONS_OC_SA: if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || service_action >= 32) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* FALLTHROUGH */ case RSO_OPTIONS_OC_ASA: total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: all = (struct scsi_report_supported_opcodes_all *) ctsio->kern_data_ptr; num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (!ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(j, descr->service_action); descr->flags = RSO_SERVACTV; scsi_ulto2b(sentry->length, descr->cdb_length); } } else { if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(0, descr->service_action); descr->flags = 0; scsi_ulto2b(entry->length, descr->cdb_length); } } scsi_ulto4b( num * sizeof(struct scsi_report_supported_opcodes_descr), all->length); break; case RSO_OPTIONS_OC: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; goto fill_one; case RSO_OPTIONS_OC_SA: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; fill_one: if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { one->support = 3; scsi_ulto2b(entry->length, one->cdb_length); one->cdb_usage[0] = opcode; memcpy(&one->cdb_usage[1], entry->usage, entry->length - 1); } else one->support = 1; break; case RSO_OPTIONS_OC_ASA: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; if (entry->flags & CTL_CMD_FLAG_SA5) { entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; } else if (service_action != 0) { one->support = 1; break; } goto fill_one; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_tmf(struct ctl_scsiio *ctsio) { struct scsi_report_supported_tmf *cdb; struct scsi_report_supported_tmf_ext_data *data; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; if (cdb->options & RST_REPD) total_len = sizeof(struct scsi_report_supported_tmf_ext_data); else total_len = sizeof(struct scsi_report_supported_tmf_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | RST_TRS; data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; data->length = total_len - 4; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_report_timestamp(struct ctl_scsiio *ctsio) { struct scsi_report_timestamp *cdb; struct scsi_report_timestamp_data *data; struct timeval tv; int64_t timestamp; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); cdb = (struct scsi_report_timestamp *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_timestamp_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*data) - 2, data->length); data->origin = RTS_ORIG_OUTSIDE; getmicrotime(&tv); timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; scsi_ulto4b(timestamp >> 16, data->timestamp); scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_per_res_in *cdb; int alloc_len, total_len = 0; /* struct scsi_per_res_in_rsrv in_data; */ uint64_t key; CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); cdb = (struct scsi_per_res_in *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); retry: mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: /* read keys */ total_len = sizeof(struct scsi_per_res_in_keys) + lun->pr_key_count * sizeof(struct scsi_per_res_key); break; case SPRI_RR: /* read reservation */ if (lun->flags & CTL_LUN_PR_RESERVED) total_len = sizeof(struct scsi_per_res_in_rsrv); else total_len = sizeof(struct scsi_per_res_in_header); break; case SPRI_RC: /* report capabilities */ total_len = sizeof(struct scsi_per_res_cap); break; case SPRI_RS: /* read full status */ total_len = sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count; break; default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } mtx_unlock(&lun->lun_lock); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: { // read keys struct scsi_per_res_in_keys *res_keys; int i, key_count; res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len != (sizeof(struct scsi_per_res_in_keys) + (lun->pr_key_count * sizeof(struct scsi_per_res_key)))){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->pr_generation, res_keys->header.generation); scsi_ulto4b(sizeof(struct scsi_per_res_key) * lun->pr_key_count, res_keys->header.length); for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; /* * We used lun->pr_key_count to calculate the * size to allocate. If it turns out the number of * initiators with the registered flag set is * larger than that (i.e. they haven't been kept in * sync), we've got a problem. */ if (key_count >= lun->pr_key_count) { key_count++; continue; } scsi_u64to8b(key, res_keys->keys[key_count].key); key_count++; } break; } case SPRI_RR: { // read reservation struct scsi_per_res_in_rsrv *res; int tmp_len, header_only; res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; scsi_ulto4b(lun->pr_generation, res->header.generation); if (lun->flags & CTL_LUN_PR_RESERVED) { tmp_len = sizeof(struct scsi_per_res_in_rsrv); scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), res->header.length); header_only = 0; } else { tmp_len = sizeof(struct scsi_per_res_in_header); scsi_ulto4b(0, res->header.length); header_only = 1; } /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (tmp_len != total_len) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation status changed, retrying\n", __func__); goto retry; } /* * No reservation held, so we're done. */ if (header_only != 0) break; /* * If the registration is an All Registrants type, the key * is 0, since it doesn't really matter. */ if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), res->data.reservation); } res->data.scopetype = lun->pr_res_type; break; } case SPRI_RC: //report capabilities { struct scsi_per_res_cap *res_cap; uint16_t type_mask; res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*res_cap), res_cap->length); res_cap->flags1 = SPRI_CRH; res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; type_mask = SPRI_TM_WR_EX_AR | SPRI_TM_EX_AC_RO | SPRI_TM_WR_EX_RO | SPRI_TM_EX_AC | SPRI_TM_WR_EX | SPRI_TM_EX_AC_AR; scsi_ulto2b(type_mask, res_cap->type_mask); break; } case SPRI_RS: { // read full status struct scsi_per_res_in_full *res_status; struct scsi_per_res_in_full_desc *res_desc; struct ctl_port *port; int i, len; res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len < (sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count)){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->pr_generation, res_status->header.generation); res_desc = &res_status->desc[0]; for (i = 0; i < CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; scsi_u64to8b(key, res_desc->res_key.key); if ((lun->flags & CTL_LUN_PR_RESERVED) && (lun->pr_res_idx == i || lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { res_desc->flags = SPRI_FULL_R_HOLDER; res_desc->scopetype = lun->pr_res_type; } scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, res_desc->rel_trgt_port_id); len = 0; port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; if (port != NULL) len = ctl_create_iid(port, i % CTL_MAX_INIT_PER_PORT, res_desc->transport_id); scsi_ulto4b(len, res_desc->additional_length); res_desc = (struct scsi_per_res_in_full_desc *) &res_desc->transport_id[len]; } scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], res_status->header.length); break; } default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if * it should return. */ static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param) { union ctl_ha_msg persis_io; int i; mtx_lock(&lun->lun_lock); if (sa_res_key == 0) { if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Unregister everybody else and build UA for * them */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->pr_res_type = type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; lun->pr_generation++; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* not all registrants */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || !(lun->flags & CTL_LUN_PR_RESERVED)) { int found = 0; if (res_key == sa_res_key) { /* special case */ /* * The spec implies this is not good but doesn't * say what to do. There are two choices either * generate a res conflict or check condition * with illegal field in parameter data. Since * that is what is done when the sa_res_key is * zero I'll take that approach since this has * to do with the sa_res_key. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) != sa_res_key) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* Reserved but not all registrants */ /* sa_res_key is res holder */ if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Do the following: * if sa_res_key != res_key remove all * registrants w/sa_res_key and generate UA * for these registrants(Registrations * Preempted) if it wasn't an exclusive * reservation generate UA(Reservations * Preempted) for all other registered nexuses * if the type has changed. Establish the new * reservation and holder. If res_key and * sa_res_key are the same do the above * except don't unregister the res holder. */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (type != lun->pr_res_type && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* * sa_res_key is not the res holder just * remove registrants */ int found=0; for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (sa_res_key != ctl_get_prkey(lun, i)) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (1); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } } return (0); } static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) { uint64_t sa_res_key; int i; sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || lun->pr_res_idx == CTL_PR_NO_RESERVATION || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { if (sa_res_key == 0) { /* * Unregister everybody else and build UA for * them */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->pr_res_type = msg->pr.pr_info.res_type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; } else { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (sa_res_key == ctl_get_prkey(lun, i)) continue; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } } } else { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (msg->pr.pr_info.res_type != lun->pr_res_type && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = msg->pr.pr_info.res_type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; } lun->pr_generation++; } int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); int retval; u_int32_t param_len; struct scsi_per_res_out *cdb; struct scsi_per_res_out_parms* param; uint32_t residx; uint64_t res_key, sa_res_key, key; uint8_t type; union ctl_ha_msg persis_io; int i; CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); cdb = (struct scsi_per_res_out *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; /* * We only support whole-LUN scope. The scope & type are ignored for * register, register and ignore existing key and clear. * We sometimes ignore scope and type on preempts too!! * Verify reservation type here as well. */ type = cdb->scope_type & SPR_TYPE_MASK; if ((cdb->action == SPRO_RESERVE) || (cdb->action == SPRO_RELEASE)) { if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (type>8 || type==2 || type==4 || type==0) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } param_len = scsi_4btoul(cdb->length); if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; residx = ctl_get_initindex(&ctsio->io_hdr.nexus); res_key = scsi_8btou64(param->res_key.key); sa_res_key = scsi_8btou64(param->serv_act_res_key); /* * Validate the reservation key here except for SPRO_REG_IGNO * This must be done for all other service actions */ if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { mtx_lock(&lun->lun_lock); if ((key = ctl_get_prkey(lun, residx)) != 0) { if (res_key != key) { /* * The current key passed in doesn't match * the one the initiator previously * registered. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { /* * We are not registered */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (res_key != 0) { /* * We are not registered and trying to register but * the register key isn't zero. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } switch (cdb->action & SPRO_ACTION_MASK) { case SPRO_REGISTER: case SPRO_REG_IGNO: { #if 0 printf("Registration received\n"); #endif /* * We don't support any of these options, as we report in * the read capabilities request (see * ctl_persistent_reserve_in(), above). */ if ((param->flags & SPR_SPEC_I_PT) || (param->flags & SPR_ALL_TG_PT) || (param->flags & SPR_APTPL)) { int bit_ptr; if (param->flags & SPR_APTPL) bit_ptr = 0; else if (param->flags & SPR_ALL_TG_PT) bit_ptr = 2; else /* SPR_SPEC_I_PT */ bit_ptr = 3; free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 20, /*bit_valid*/ 1, /*bit*/ bit_ptr); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_lock(&lun->lun_lock); /* * The initiator wants to clear the * key/unregister. */ if (sa_res_key == 0) { if ((res_key == 0 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO && ctl_get_prkey(lun, residx) == 0)) { mtx_unlock(&lun->lun_lock); goto done; } ctl_clr_prkey(lun, residx); lun->pr_key_count--; if (residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = softc->init_min; i < softc->init_max; i++){ if (ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; persis_io.pr.pr_info.residx = residx; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else /* sa_res_key != 0 */ { /* * If we aren't registered currently then increment * the key count and set the registered flag. */ ctl_alloc_prkey(lun, residx); if (ctl_get_prkey(lun, residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, residx, sa_res_key); lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_REG_KEY; persis_io.pr.pr_info.residx = residx; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } break; } case SPRO_RESERVE: #if 0 printf("Reserve executed type %d\n", type); #endif mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PR_RESERVED) { /* * if this isn't the reservation holder and it's * not a "all registrants" type or if the type is * different then we have a conflict */ if ((lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) || lun->pr_res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } else /* create a reservation */ { /* * If it's not an "all registrants" type record * reservation holder */ if (type != SPR_TYPE_WR_EX_AR && type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; /* Res holder */ else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->flags |= CTL_LUN_PR_RESERVED; lun->pr_res_type = type; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RESERVE; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } break; case SPRO_RELEASE: mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { /* No reservation exists return good status */ mtx_unlock(&lun->lun_lock); goto done; } /* * Is this nexus a reservation holder? */ if (lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { /* * not a res holder return good status but * do nothing */ mtx_unlock(&lun->lun_lock); goto done; } if (lun->pr_res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_illegal_pr_release(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* okay to release */ lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->pr_res_type = 0; /* * If this isn't an exclusive access reservation and NUAR * is not set, generate UA for all other registrants. */ if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { for (i = softc->init_min; i < softc->init_max; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } mtx_unlock(&lun->lun_lock); /* Send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RELEASE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); break; case SPRO_CLEAR: /* send msg to other side */ mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; ctl_clr_prkey(lun, residx); for (i = 0; i < CTL_MAX_INITIATORS; i++) if (ctl_get_prkey(lun, i) != 0) { ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_CLEAR; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); break; case SPRO_PREEMPT: case SPRO_PRE_ABO: { int nretval; nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, residx, ctsio, cdb, param); if (nretval != 0) return (CTL_RETVAL_COMPLETE); break; } default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } done: free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } /* * This routine is for handling a message from the other SC pertaining to * persistent reserve out. All the error checking will have been done * so only perorming the action need be done here to keep the two * in sync. */ static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; struct ctl_lun *lun; int i; uint32_t residx, targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } residx = ctl_get_initindex(&msg->hdr.nexus); switch(msg->pr.pr_info.action) { case CTL_PR_REG_KEY: ctl_alloc_prkey(lun, msg->pr.pr_info.residx); if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, msg->pr.pr_info.residx, scsi_8btou64(msg->pr.pr_info.sa_res_key)); lun->pr_generation++; break; case CTL_PR_UNREG_KEY: ctl_clr_prkey(lun, msg->pr.pr_info.residx); lun->pr_key_count--; /* XXX Need to see if the reservation has been released */ /* if so do we need to generate UA? */ if (msg->pr.pr_info.residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = softc->init_min; i < softc->init_max; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->pr_generation++; break; case CTL_PR_RESERVE: lun->flags |= CTL_LUN_PR_RESERVED; lun->pr_res_type = msg->pr.pr_info.res_type; lun->pr_res_idx = msg->pr.pr_info.residx; break; case CTL_PR_RELEASE: /* * If this isn't an exclusive access reservation and NUAR * is not set, generate UA for all other registrants. */ if (lun->pr_res_type != SPR_TYPE_EX_AC && lun->pr_res_type != SPR_TYPE_WR_EX && (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { for (i = softc->init_min; i < softc->init_max; i++) if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->pr_res_type = 0; break; case CTL_PR_PREEMPT: ctl_pro_preempt_other(lun, msg); break; case CTL_PR_CLEAR: lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; for (i=0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_generation++; break; } mtx_unlock(&lun->lun_lock); } int ctl_read_write(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; int isread; CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); flags = 0; isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; switch (ctsio->cdb[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)ctsio->cdb; lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ lba &= 0x1fffff; num_blocks = cdb->length; /* * This is correct according to SBC-2. */ if (num_blocks == 0) num_blocks = 256; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_write_atomic_16 *cdb; if (lun->be_lun->atomicblock == 0) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_2btoul(cdb->length); if (num_blocks > lun->be_lun->atomicblock) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. * Note that this cannot happen with WRITE(6) or READ(6), since 0 * translates to 256 blocks for those commands. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA and/or DPO if caches are disabled. */ if (isread) { if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) flags |= CTL_LLF_FUA | CTL_LLF_DPO; } else { if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } static int ctl_cnw_cont(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_scsiio *ctsio; struct ctl_lba_len_flags *lbalen; int retval; ctsio = &io->scsiio; ctsio->io_hdr.status = CTL_STATUS_NONE; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->flags &= ~CTL_LLF_COMPARE; lbalen->flags |= CTL_LLF_WRITE; CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_cnw(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); flags = 0; switch (ctsio->cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = cdb->length; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA if write cache is disabled. */ if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_data_submit_done(), it'll get passed back to * ctl_ctl_cnw_cont() for further processing. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_cnw_cont; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = CTL_LLF_COMPARE | flags; CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_verify(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int bytchk, flags; int retval; CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); bytchk = 0; flags = CTL_LLF_FUA; switch (ctsio->cdb[0]) { case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; if (bytchk) { lbalen->flags = CTL_LLF_COMPARE | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; } else { lbalen->flags = CTL_LLF_VERIFY | flags; ctsio->kern_total_len = 0; } ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_report_luns(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); struct scsi_report_luns *cdb; struct scsi_report_luns_data *lun_data; int num_filled, num_luns, num_port_luns, retval; uint32_t alloc_len, lun_datalen; uint32_t initidx, targ_lun_id, lun_id; retval = CTL_RETVAL_COMPLETE; cdb = (struct scsi_report_luns *)ctsio->cdb; CTL_DEBUG_PRINT(("ctl_report_luns\n")); num_luns = 0; num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; mtx_lock(&softc->ctl_lock); for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) num_luns++; } mtx_unlock(&softc->ctl_lock); switch (cdb->select_report) { case RPL_REPORT_DEFAULT: case RPL_REPORT_ALL: case RPL_REPORT_NONSUBSID: break; case RPL_REPORT_WELLKNOWN: case RPL_REPORT_ADMIN: case RPL_REPORT_CONGLOM: num_luns = 0; break; default: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); break; /* NOTREACHED */ } alloc_len = scsi_4btoul(cdb->length); /* * The initiator has to allocate at least 16 bytes for this request, * so he can at least get the header and the first LUN. Otherwise * we reject the request (per SPC-3 rev 14, section 6.21). */ if (alloc_len < (sizeof(struct scsi_report_luns_data) + sizeof(struct scsi_report_luns_lundata))) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); } lun_datalen = sizeof(*lun_data) + (num_luns * sizeof(struct scsi_report_luns_lundata)); ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&softc->ctl_lock); for (targ_lun_id = 0, num_filled = 0; targ_lun_id < num_port_luns && num_filled < num_luns; targ_lun_id++) { lun_id = ctl_lun_map_from_port(port, targ_lun_id); if (lun_id == UINT32_MAX) continue; lun = softc->ctl_luns[lun_id]; if (lun == NULL) continue; be64enc(lun_data->luns[num_filled++].lundata, ctl_encode_lun(targ_lun_id)); /* * According to SPC-3, rev 14 section 6.21: * * "The execution of a REPORT LUNS command to any valid and * installed logical unit shall clear the REPORTED LUNS DATA * HAS CHANGED unit attention condition for all logical * units of that target with respect to the requesting * initiator. A valid and installed logical unit is one * having a PERIPHERAL QUALIFIER of 000b in the standard * INQUIRY data (see 6.4.2)." * * If request_lun is NULL, the LUN this report luns command * was issued to is either disabled or doesn't exist. In that * case, we shouldn't clear any pending lun change unit * attention. */ if (request_lun != NULL) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); /* * It's quite possible that we've returned fewer LUNs than we allocated * space for. Trim it. */ lun_datalen = sizeof(*lun_data) + (num_filled * sizeof(struct scsi_report_luns_lundata)); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(lun_datalen, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * We set this to the actual data length, regardless of how much * space we actually have to return results. If the user looks at * this value, he'll know whether or not he allocated enough space * and reissue the command if necessary. We don't support well * known logical units, so if the user asks for that, return none. */ scsi_ulto4b(lun_datalen - 8, lun_data->length); /* * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy * this request. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_request_sense(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_request_sense *cdb; struct scsi_sense_data *sense_ptr, *ps; uint32_t initidx; int have_error; u_int sense_len = SSD_FULL_SIZE; scsi_sense_data_type sense_format; ctl_ua_type ua_type; uint8_t asc = 0, ascq = 0; cdb = (struct scsi_request_sense *)ctsio->cdb; CTL_DEBUG_PRINT(("ctl_request_sense\n")); /* * Determine which sense format the user wants. */ if (cdb->byte2 & SRS_DESC) sense_format = SSD_TYPE_DESC; else sense_format = SSD_TYPE_FIXED; ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; /* * struct scsi_sense_data, which is currently set to 256 bytes, is * larger than the largest allowed value for the length field in the * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. */ ctsio->kern_data_len = cdb->length; ctsio->kern_total_len = cdb->length; /* * If we don't have a LUN, we don't have any pending sense. */ if (lun == NULL || ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && softc->ha_link < CTL_HA_LINK_UNKNOWN)) { /* "Logical unit not supported" */ ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x25, /*ascq*/ 0x00, SSD_ELEM_NONE); goto send; } have_error = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * Check for pending sense, and then for pending unit attentions. * Pending sense gets returned first, then pending unit attentions. */ mtx_lock(&lun->lun_lock); ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; if (ps != NULL) ps += initidx % CTL_MAX_INIT_PER_PORT; if (ps != NULL && ps->error_code != 0) { scsi_sense_data_type stored_format; /* * Check to see which sense format was used for the stored * sense data. */ stored_format = scsi_sense_type(ps); /* * If the user requested a different sense format than the * one we stored, then we need to convert it to the other * format. If we're going from descriptor to fixed format * sense data, we may lose things in translation, depending * on what options were used. * * If the stored format is SSD_TYPE_NONE (i.e. invalid), * for some reason we'll just copy it out as-is. */ if ((stored_format == SSD_TYPE_FIXED) && (sense_format == SSD_TYPE_DESC)) ctl_sense_to_desc((struct scsi_sense_data_fixed *) ps, (struct scsi_sense_data_desc *)sense_ptr); else if ((stored_format == SSD_TYPE_DESC) && (sense_format == SSD_TYPE_FIXED)) ctl_sense_to_fixed((struct scsi_sense_data_desc *) ps, (struct scsi_sense_data_fixed *)sense_ptr); else memcpy(sense_ptr, ps, sizeof(*sense_ptr)); ps->error_code = 0; have_error = 1; } else { ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, sense_format); if (ua_type != CTL_UA_NONE) have_error = 1; } if (have_error == 0) { /* * Report informational exception if have one and allowed. */ if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { asc = lun->ie_asc; ascq = lun->ie_ascq; } ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NO_SENSE, /*asc*/ asc, /*ascq*/ ascq, SSD_ELEM_NONE); } mtx_unlock(&lun->lun_lock); send: /* * We report the SCSI status as OK, since the status of the command * itself is OK. We're reporting sense as parameter data. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_tur(struct ctl_scsiio *ctsio) { CTL_DEBUG_PRINT(("ctl_tur\n")); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x00, the Supported VPD Pages page. */ static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_supported_pages *pages; int sup_page_size; int p; sup_page_size = sizeof(struct scsi_vpd_supported_pages) * SCSI_EVPD_NUM_SUPPORTED_PAGES; ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sup_page_size, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) pages->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; p = 0; /* Supported VPD pages */ pages->page_list[p++] = SVPD_SUPPORTED_PAGES; /* Serial Number */ pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; /* Device Identification */ pages->page_list[p++] = SVPD_DEVICE_ID; /* Extended INQUIRY Data */ pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; /* Mode Page Policy */ pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; /* SCSI Ports */ pages->page_list[p++] = SVPD_SCSI_PORTS; /* Third-party Copy */ pages->page_list[p++] = SVPD_SCSI_TPC; if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { /* Block limits */ pages->page_list[p++] = SVPD_BLOCK_LIMITS; /* Block Device Characteristics */ pages->page_list[p++] = SVPD_BDC; /* Logical Block Provisioning */ pages->page_list[p++] = SVPD_LBP; } pages->length = p; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x80, the Unit Serial Number page. */ static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_unit_serial_number *sn_ptr; int data_len; data_len = 4 + CTL_SN_LEN; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; sn_ptr->length = CTL_SN_LEN; /* * If we don't have a LUN, we just leave the serial number as * all spaces. */ if (lun != NULL) { strncpy((char *)sn_ptr->serial_num, (char *)lun->be_lun->serial_num, CTL_SN_LEN); } else memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x86, the Extended INQUIRY Data page. */ static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_extended_inquiry_data *eid_ptr; int data_len; data_len = sizeof(struct scsi_vpd_extended_inquiry_data); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; scsi_ulto2b(data_len - 4, eid_ptr->page_length); /* * We support head of queue, ordered and simple tags. */ eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; /* * Volatile cache supported. */ eid_ptr->flags3 = SVPD_EID_V_SUP; /* * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit * attention for a particular IT nexus on all LUNs once we report * it to that nexus once. This bit is required as of SPC-4. */ eid_ptr->flags4 = SVPD_EID_LUICLR; /* * We support revert to defaults (RTD) bit in MODE SELECT. */ eid_ptr->flags5 = SVPD_EID_RTD_SUP; /* * XXX KDM in order to correctly answer this, we would need * information from the SIM to determine how much sense data it * can send. So this would really be a path inquiry field, most * likely. This can be set to a maximum of 252 according to SPC-4, * but the hardware may or may not be able to support that much. * 0 just means that the maximum sense data length is not reported. */ eid_ptr->max_sense_length = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_mode_page_policy *mpp_ptr; int data_len; data_len = sizeof(struct scsi_vpd_mode_page_policy) + sizeof(struct scsi_vpd_mode_page_policy_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; scsi_ulto2b(data_len - 4, mpp_ptr->page_length); mpp_ptr->descr[0].page_code = 0x3f; mpp_ptr->descr[0].subpage_code = 0xff; mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x83, the Device Identification page. */ static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_device_id *devid_ptr; struct scsi_vpd_id_descriptor *desc; int data_len, g; uint8_t proto; data_len = sizeof(struct scsi_vpd_device_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_rel_trgt_port_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_trgt_port_grp_id); if (lun && lun->lun_devid) data_len += lun->lun_devid->len; if (port && port->port_devid) data_len += port->port_devid->len; if (port && port->target_devid) data_len += port->target_devid->len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; devid_ptr->page_code = SVPD_DEVICE_ID; scsi_ulto2b(data_len - 4, devid_ptr->length); if (port && port->port_type == CTL_PORT_FC) proto = SCSI_PROTO_FC << 4; else if (port && port->port_type == CTL_PORT_SAS) proto = SCSI_PROTO_SAS << 4; else if (port && port->port_type == CTL_PORT_ISCSI) proto = SCSI_PROTO_ISCSI << 4; else proto = SCSI_PROTO_SPI << 4; desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; /* * We're using a LUN association here. i.e., this device ID is a * per-LUN identifier. */ if (lun && lun->lun_devid) { memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + lun->lun_devid->len); } /* * This is for the WWPN which is a port association. */ if (port && port->port_devid) { memcpy(desc, port->port_devid->data, port->port_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + port->port_devid->len); } /* * This is for the Relative Target Port(type 4h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_RELTARG; desc->length = 4; scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_rel_trgt_port_id)); /* * This is for the Target Port Group(type 5h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_TPORTGRP; desc->length = 4; if (softc->is_single || (port && port->status & CTL_PORT_STATUS_HA_SHARED)) g = 1; else g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; scsi_ulto2b(g, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_trgt_port_grp_id)); /* * This is for the Target identifier */ if (port && port->target_devid) { memcpy(desc, port->target_devid->data, port->target_devid->len); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_scsi_ports *sp; struct scsi_vpd_port_designation *pd; struct scsi_vpd_port_designation_cont *pdc; struct ctl_port *port; int data_len, num_target_ports, iid_len, id_len; num_target_ports = 0; iid_len = 0; id_len = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; num_target_ports++; if (port->init_devid) iid_len += port->init_devid->len; if (port->port_devid) id_len += port->port_devid->len; } mtx_unlock(&softc->ctl_lock); data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_ports * (sizeof(struct scsi_vpd_port_designation) + sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sp->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sp->page_code = SVPD_SCSI_PORTS; scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), sp->page_length); pd = &sp->design[0]; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, pd->relative_port_id); if (port->init_devid) { iid_len = port->init_devid->len; memcpy(pd->initiator_transportid, port->init_devid->data, port->init_devid->len); } else iid_len = 0; scsi_ulto2b(iid_len, pd->initiator_transportid_length); pdc = (struct scsi_vpd_port_designation_cont *) (&pd->initiator_transportid[iid_len]); if (port->port_devid) { id_len = port->port_devid->len; memcpy(pdc->target_port_descriptors, port->port_devid->data, port->port_devid->len); } else id_len = 0; scsi_ulto2b(id_len, pdc->target_port_descriptors_length); pd = (struct scsi_vpd_port_designation *) ((uint8_t *)pdc->target_port_descriptors + id_len); } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_block_limits *bl_ptr; uint64_t ival; ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bl_ptr->page_code = SVPD_BLOCK_LIMITS; scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); bl_ptr->max_cmp_write_len = 0xff; scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); if (lun != NULL) { scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { ival = 0xffffffff; ctl_get_opt_number(&lun->be_lun->options, "unmap_max_lba", &ival); scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); ival = 0xffffffff; ctl_get_opt_number(&lun->be_lun->options, "unmap_max_descr", &ival); scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); if (lun->be_lun->ublockexp != 0) { scsi_ulto4b((1 << lun->be_lun->ublockexp), bl_ptr->opt_unmap_grain); scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, bl_ptr->unmap_grain_align); } } scsi_ulto4b(lun->be_lun->atomicblock, bl_ptr->max_atomic_transfer_length); scsi_ulto4b(0, bl_ptr->atomic_alignment); scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); ival = UINT64_MAX; ctl_get_opt_number(&lun->be_lun->options, "write_same_max_lba", &ival); scsi_u64to8b(ival, bl_ptr->max_write_same_length); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_block_device_characteristics *bdc_ptr; const char *value; u_int i; ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bdc_ptr->page_code = SVPD_BDC; scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); if (lun != NULL && (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) i = strtol(value, NULL, 0); else i = CTL_DEFAULT_ROTATION_RATE; scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); if (lun != NULL && (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) i = strtol(value, NULL, 0); else i = 0; bdc_ptr->wab_wac_ff = (i & 0x0f); bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_logical_block_prov *lbp_ptr; const char *value; ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; lbp_ptr->page_code = SVPD_LBP; scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; value = ctl_get_opt(&lun->be_lun->options, "provisioning_type"); if (value != NULL) { if (strcmp(value, "resource") == 0) lbp_ptr->prov_type = SVPD_LBP_RESOURCE; else if (strcmp(value, "thin") == 0) lbp_ptr->prov_type = SVPD_LBP_THIN; } else lbp_ptr->prov_type = SVPD_LBP_THIN; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * INQUIRY with the EVPD bit set. */ static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_inquiry *cdb; int alloc_len, retval; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); switch (cdb->page_code) { case SVPD_SUPPORTED_PAGES: retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); break; case SVPD_UNIT_SERIAL_NUMBER: retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); break; case SVPD_DEVICE_ID: retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); break; case SVPD_EXTENDED_INQUIRY_DATA: retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); break; case SVPD_MODE_PAGE_POLICY: retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); break; case SVPD_SCSI_PORTS: retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); break; case SVPD_SCSI_TPC: retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); break; case SVPD_BLOCK_LIMITS: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); break; case SVPD_BDC: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); break; case SVPD_LBP: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); break; default: err: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } /* * Standard INQUIRY data. */ static int ctl_inquiry_std(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_inquiry_data *inq_ptr; struct scsi_inquiry *cdb; char *val; uint32_t alloc_len, data_len; ctl_port_type port_type; port_type = port->port_type; if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) port_type = CTL_PORT_SCSI; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); /* * We malloc the full inquiry data size here and fill it * in. If the user only asks for less, we'll give him * that much. */ data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (lun != NULL) { if ((lun->flags & CTL_LUN_PRIMARY_SC) || softc->ha_link >= CTL_HA_LINK_UNKNOWN) { inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; } else { inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | lun->be_lun->lun_type; } if (lun->flags & CTL_LUN_REMOVABLE) inq_ptr->dev_qual2 |= SID_RMB; } else inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; /* RMB in byte 2 is 0 */ inq_ptr->version = SCSI_REV_SPC5; /* * According to SAM-3, even if a device only supports a single * level of LUN addressing, it should still set the HISUP bit: * * 4.9.1 Logical unit numbers overview * * All logical unit number formats described in this standard are * hierarchical in structure even when only a single level in that * hierarchy is used. The HISUP bit shall be set to one in the * standard INQUIRY data (see SPC-2) when any logical unit number * format described in this standard is used. Non-hierarchical * formats are outside the scope of this standard. * * Therefore we set the HiSup bit here. * * The response format is 2, per SPC-3. */ inq_ptr->response_format = SID_HiSup | 2; inq_ptr->additional_length = data_len - (offsetof(struct scsi_inquiry_data, additional_length) + 1); CTL_DEBUG_PRINT(("additional_length = %d\n", inq_ptr->additional_length)); inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; if (port_type == CTL_PORT_SCSI) inq_ptr->spc2_flags = SPC2_SID_ADDR16; inq_ptr->spc2_flags |= SPC2_SID_MultiP; inq_ptr->flags = SID_CmdQue; if (port_type == CTL_PORT_SCSI) inq_ptr->flags |= SID_WBus16 | SID_Sync; /* * Per SPC-3, unused bytes in ASCII strings are filled with spaces. * We have 8 bytes for the vendor name, and 16 bytes for the device * name and 4 bytes for the revision. */ if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, "vendor")) == NULL) { strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); } else { memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); strncpy(inq_ptr->vendor, val, min(sizeof(inq_ptr->vendor), strlen(val))); } if (lun == NULL) { strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { switch (lun->be_lun->lun_type) { case T_DIRECT: strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); break; case T_PROCESSOR: strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, sizeof(inq_ptr->product)); break; case T_CDROM: strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, sizeof(inq_ptr->product)); break; default: strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, sizeof(inq_ptr->product)); break; } } else { memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); strncpy(inq_ptr->product, val, min(sizeof(inq_ptr->product), strlen(val))); } /* * XXX make this a macro somewhere so it automatically gets * incremented when we make changes. */ if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, "revision")) == NULL) { strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); } else { memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); strncpy(inq_ptr->revision, val, min(sizeof(inq_ptr->revision), strlen(val))); } /* * For parallel SCSI, we support double transition and single * transition clocking. We also support QAS (Quick Arbitration * and Selection) and Information Unit transfers on both the * control and array devices. */ if (port_type == CTL_PORT_SCSI) inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | SID_SPI_IUS; /* SAM-6 (no version claimed) */ scsi_ulto2b(0x00C0, inq_ptr->version1); /* SPC-5 (no version claimed) */ scsi_ulto2b(0x05C0, inq_ptr->version2); if (port_type == CTL_PORT_FC) { /* FCP-2 ANSI INCITS.350:2003 */ scsi_ulto2b(0x0917, inq_ptr->version3); } else if (port_type == CTL_PORT_SCSI) { /* SPI-4 ANSI INCITS.362:200x */ scsi_ulto2b(0x0B56, inq_ptr->version3); } else if (port_type == CTL_PORT_ISCSI) { /* iSCSI (no version claimed) */ scsi_ulto2b(0x0960, inq_ptr->version3); } else if (port_type == CTL_PORT_SAS) { /* SAS (no version claimed) */ scsi_ulto2b(0x0BE0, inq_ptr->version3); } else if (port_type == CTL_PORT_UMASS) { /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ scsi_ulto2b(0x1730, inq_ptr->version3); } if (lun == NULL) { /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); } else { switch (lun->be_lun->lun_type) { case T_DIRECT: /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); break; case T_PROCESSOR: break; case T_CDROM: /* MMC-6 (no version claimed) */ scsi_ulto2b(0x04E0, inq_ptr->version4); break; default: break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_inquiry(struct ctl_scsiio *ctsio) { struct scsi_inquiry *cdb; int retval; CTL_DEBUG_PRINT(("ctl_inquiry\n")); cdb = (struct scsi_inquiry *)ctsio->cdb; if (cdb->byte2 & SI_EVPD) retval = ctl_inquiry_evpd(ctsio); else if (cdb->page_code == 0) retval = ctl_inquiry_std(ctsio); else { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } return (retval); } int ctl_get_config(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_get_config_header *hdr; struct scsi_get_config_feature *feature; struct scsi_get_config *cdb; uint32_t alloc_len, data_len; int rt, starting; cdb = (struct scsi_get_config *)ctsio->cdb; rt = (cdb->rt & SGC_RT_MASK); starting = scsi_2btoul(cdb->starting_feature); alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_get_config_header) + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; if (lun->flags & CTL_LUN_NO_MEDIA) scsi_ulto2b(0x0000, hdr->current_profile); else scsi_ulto2b(0x0010, hdr->current_profile); feature = (struct scsi_get_config_feature *)(hdr + 1); if (starting > 0x003b) goto done; if (starting > 0x003a) goto f3b; if (starting > 0x002b) goto f3a; if (starting > 0x002a) goto f2b; if (starting > 0x001f) goto f2a; if (starting > 0x001e) goto f1f; if (starting > 0x001d) goto f1e; if (starting > 0x0010) goto f1d; if (starting > 0x0003) goto f10; if (starting > 0x0002) goto f3; if (starting > 0x0001) goto f2; if (starting > 0x0000) goto f1; /* Profile List */ scsi_ulto2b(0x0000, feature->feature_code); feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 8; scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ feature->feature_data[2] = 0x00; scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ feature->feature_data[6] = 0x01; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1: /* Core */ scsi_ulto2b(0x0001, feature->feature_code); feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 8; scsi_ulto4b(0x00000000, &feature->feature_data[0]); feature->feature_data[4] = 0x03; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2: /* Morphing */ scsi_ulto2b(0x0002, feature->feature_code); feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x02; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3: /* Removable Medium */ scsi_ulto2b(0x0003, feature->feature_code); feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x39; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) goto done; f10: /* Random Read */ scsi_ulto2b(0x0010, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 8; scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); scsi_ulto2b(1, &feature->feature_data[4]); feature->feature_data[6] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1d: /* Multi-Read */ scsi_ulto2b(0x001D, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 0; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1e: /* CD Read */ scsi_ulto2b(0x001E, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1f: /* DVD Read */ scsi_ulto2b(0x001F, feature->feature_code); feature->flags = 0x08; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x01; feature->feature_data[2] = 0x03; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2a: /* DVD+RW */ scsi_ulto2b(0x002A, feature->feature_code); feature->flags = 0x04; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature->feature_data[1] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2b: /* DVD+R */ scsi_ulto2b(0x002B, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3a: /* DVD+RW Dual Layer */ scsi_ulto2b(0x003A, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature->feature_data[1] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3b: /* DVD+R Dual Layer */ scsi_ulto2b(0x003B, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; done: data_len = (uint8_t *)feature - (uint8_t *)hdr; if (rt == SGC_RT_SPECIFIC && data_len > 4) { feature = (struct scsi_get_config_feature *)(hdr + 1); if (scsi_2btoul(feature->feature_code) == starting) feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; data_len = (uint8_t *)feature - (uint8_t *)hdr; } scsi_ulto4b(data_len - 4, hdr->data_length); ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_event_status(struct ctl_scsiio *ctsio) { struct scsi_get_event_status_header *hdr; struct scsi_get_event_status *cdb; uint32_t alloc_len, data_len; int notif_class; cdb = (struct scsi_get_event_status *)ctsio->cdb; if ((cdb->byte2 & SGESN_POLLED) == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } notif_class = cdb->notif_class; alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_get_event_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; scsi_ulto2b(0, hdr->descr_length); hdr->nea_class = SGESN_NEA; hdr->supported_class = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_mechanism_status(struct ctl_scsiio *ctsio) { struct scsi_mechanism_status_header *hdr; struct scsi_mechanism_status *cdb; uint32_t alloc_len, data_len; cdb = (struct scsi_mechanism_status *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_mechanism_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; hdr->state1 = 0x00; hdr->state2 = 0xe0; scsi_ulto3b(0, hdr->lba); hdr->slots_num = 0; scsi_ulto2b(0, hdr->slots_length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static void ctl_ultomsf(uint32_t lba, uint8_t *buf) { lba += 150; buf[0] = 0; buf[1] = bin2bcd((lba / 75) / 60); buf[2] = bin2bcd((lba / 75) % 60); buf[3] = bin2bcd(lba % 75); } int ctl_read_toc(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_toc_hdr *hdr; struct scsi_read_toc_type01_descr *descr; struct scsi_read_toc *cdb; uint32_t alloc_len, data_len; int format, msf; cdb = (struct scsi_read_toc *)ctsio->cdb; msf = (cdb->byte2 & CD_MSF) != 0; format = cdb->format; alloc_len = scsi_2btoul(cdb->data_len); data_len = sizeof(struct scsi_read_toc_hdr); if (format == 0) data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); else data_len += sizeof(struct scsi_read_toc_type01_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; if (format == 0) { scsi_ulto2b(0x12, hdr->data_length); hdr->first = 1; hdr->last = 1; descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); descr->addr_ctl = 0x14; descr->track_number = 1; if (msf) ctl_ultomsf(0, descr->track_start); else scsi_ulto4b(0, descr->track_start); descr++; descr->addr_ctl = 0x14; descr->track_number = 0xaa; if (msf) ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); else scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); } else { scsi_ulto2b(0x0a, hdr->data_length); hdr->first = 1; hdr->last = 1; descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); descr->addr_ctl = 0x14; descr->track_number = 1; if (msf) ctl_ultomsf(0, descr->track_start); else scsi_ulto4b(0, descr->track_start); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * For known CDB types, parse the LBA and length. */ static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) { if (io->io_hdr.io_type != CTL_IO_SCSI) return (1); switch (io->scsiio.cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = cdb->length; break; } case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)io->scsiio.cdb; *lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ *lba &= 0x1fffff; *len = cdb->length; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_write_atomic_16 *cdb; cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_verify_16 *cdb; cdb = (struct scsi_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case UNMAP: { *lba = 0; *len = UINT64_MAX; break; } case SERVICE_ACTION_IN: { /* GET LBA STATUS */ struct scsi_get_lba_status *cdb; cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = UINT32_MAX; break; } default: return (1); break; /* NOTREACHED */ } return (0); } static ctl_action ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, bool seq) { uint64_t endlba1, endlba2; endlba1 = lba1 + len1 - (seq ? 0 : 1); endlba2 = lba2 + len2 - 1; if ((endlba1 < lba2) || (endlba2 < lba1)) return (CTL_ACTION_PASS); else return (CTL_ACTION_BLOCK); } static int ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) { struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_desc *buf, *end, *range; uint64_t lba; uint32_t len; /* If not UNMAP -- go other way. */ if (io->io_hdr.io_type != CTL_IO_SCSI || io->scsiio.cdb[0] != UNMAP) return (CTL_ACTION_ERROR); /* If UNMAP without data -- block and wait for data. */ ptrlen = (struct ctl_ptr_len_flags *) &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || ptrlen->ptr == NULL) return (CTL_ACTION_BLOCK); /* UNMAP with data -- check for collision. */ buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); len = scsi_4btoul(range->length); if ((lba < lba2 + len2) && (lba + len > lba2)) return (CTL_ACTION_BLOCK); } return (CTL_ACTION_PASS); } static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) { uint64_t lba1, lba2; uint64_t len1, len2; int retval; if (ctl_get_lba_len(io2, &lba2, &len2) != 0) return (CTL_ACTION_ERROR); retval = ctl_extent_check_unmap(io1, lba2, len2); if (retval != CTL_ACTION_ERROR) return (retval); if (ctl_get_lba_len(io1, &lba1, &len1) != 0) return (CTL_ACTION_ERROR); if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) seq = FALSE; return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); } static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) { uint64_t lba1, lba2; uint64_t len1, len2; if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) return (CTL_ACTION_PASS); if (ctl_get_lba_len(io1, &lba1, &len1) != 0) return (CTL_ACTION_ERROR); if (ctl_get_lba_len(io2, &lba2, &len2) != 0) return (CTL_ACTION_ERROR); if (lba1 + len1 == lba2) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); } static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *ooa_io) { const struct ctl_cmd_entry *pending_entry, *ooa_entry; const ctl_serialize_action *serialize_row; /* * The initiator attempted multiple untagged commands at the same * time. Can't do that. */ if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid == ooa_io->io_hdr.nexus.initid)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP); /* * The initiator attempted to send multiple tagged commands with * the same ID. (It's fine if different initiators have the same * tag ID.) * * Even if all of those conditions are true, we don't kill the I/O * if the command ahead of us has been aborted. We won't end up * sending it to the FETD, and it's perfectly legal to resend a * command with the same tag number as long as the previous * instance of this tag number has been aborted somehow. */ if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid == ooa_io->io_hdr.nexus.initid)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP_TAG); /* * If we get a head of queue tag, SAM-3 says that we should * immediately execute it. * * What happens if this command would normally block for some other * reason? e.g. a request sense with a head of queue tag * immediately after a write. Normally that would block, but this * will result in its getting executed immediately... * * We currently return "pass" instead of "skip", so we'll end up * going through the rest of the queue to check for overlapped tags. * * XXX KDM check for other types of blockage first?? */ if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) return (CTL_ACTION_PASS); /* * Ordered tags have to block until all items ahead of them * have completed. If we get called with an ordered tag, we always * block, if something else is ahead of us in the queue. */ if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) return (CTL_ACTION_BLOCK); /* * Simple tags get blocked until all head of queue and ordered tags * ahead of them have completed. I'm lumping untagged commands in * with simple tags here. XXX KDM is that the right thing to do? */ if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) return (CTL_ACTION_BLOCK); pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], pending_io->scsiio.cdb[1], pending_io)); ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); if (ooa_entry->seridx == CTL_SERIDX_INVLD) return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], ooa_io->scsiio.cdb[1], ooa_io)); serialize_row = ctl_serialize_table[ooa_entry->seridx]; switch (serialize_row[pending_entry->seridx]) { case CTL_SER_BLOCK: return (CTL_ACTION_BLOCK); case CTL_SER_EXTENT: return (ctl_extent_check(ooa_io, pending_io, (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); case CTL_SER_EXTENTOPT: if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) return (ctl_extent_check(ooa_io, pending_io, (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); return (CTL_ACTION_PASS); case CTL_SER_EXTENTSEQ: if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) return (ctl_extent_check_seq(ooa_io, pending_io)); return (CTL_ACTION_PASS); case CTL_SER_PASS: return (CTL_ACTION_PASS); case CTL_SER_BLOCKOPT: if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); case CTL_SER_SKIP: return (CTL_ACTION_SKIP); default: panic("%s: Invalid serialization value %d for %d => %d", __func__, serialize_row[pending_entry->seridx], pending_entry->seridx, ooa_entry->seridx); } return (CTL_ACTION_ERROR); } /* * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. * Assumptions: * - pending_io is generally either incoming, or on the blocked queue * - starting I/O is the I/O we want to start the check with. */ static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *starting_io) { union ctl_io *ooa_io; ctl_action action; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run back along the OOA queue, starting with the current * blocked I/O and going through every I/O before it on the * queue. If starting_io is NULL, we'll just end up returning * CTL_ACTION_PASS. */ for (ooa_io = starting_io; ooa_io != NULL; ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, ooa_links)){ /* * This routine just checks to see whether * cur_blocked is blocked by ooa_io, which is ahead * of it in the queue. It doesn't queue/dequeue * cur_blocked. */ action = ctl_check_for_blockage(lun, pending_io, ooa_io); switch (action) { case CTL_ACTION_BLOCK: case CTL_ACTION_OVERLAP: case CTL_ACTION_OVERLAP_TAG: case CTL_ACTION_SKIP: case CTL_ACTION_ERROR: return (action); break; /* NOTREACHED */ case CTL_ACTION_PASS: break; default: panic("%s: Invalid action %d\n", __func__, action); } } return (CTL_ACTION_PASS); } /* * Assumptions: * - An I/O has just completed, and has been removed from the per-LUN OOA * queue, so some items on the blocked queue may now be unblocked. */ static int ctl_check_blocked(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; union ctl_io *cur_blocked, *next_blocked; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run forward from the head of the blocked queue, checking each * entry against the I/Os prior to it on the OOA queue to see if * there is still any blockage. * * We cannot use the TAILQ_FOREACH() macro, because it can't deal * with our removing a variable on it while it is traversing the * list. */ for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); cur_blocked != NULL; cur_blocked = next_blocked) { union ctl_io *prev_ooa; ctl_action action; next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, blocked_links); prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, ctl_ooaq, ooa_links); /* * If cur_blocked happens to be the first item in the OOA * queue now, prev_ooa will be NULL, and the action * returned will just be CTL_ACTION_PASS. */ action = ctl_check_ooa(lun, cur_blocked, prev_ooa); switch (action) { case CTL_ACTION_BLOCK: /* Nothing to do here, still blocked */ break; case CTL_ACTION_OVERLAP: case CTL_ACTION_OVERLAP_TAG: /* * This shouldn't happen! In theory we've already * checked this command for overlap... */ break; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: { const struct ctl_cmd_entry *entry; /* * The skip case shouldn't happen, this transaction * should have never made it onto the blocked queue. */ /* * This I/O is no longer blocked, we can remove it * from the blocked queue. Since this is a TAILQ * (doubly linked list), we can do O(1) removals * from any place on the list. */ TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, blocked_links); cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; if ((softc->ha_mode != CTL_HA_MODE_XFER) && (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ /* * Need to send IO back to original side to * run */ union ctl_ha_msg msg_info; cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; msg_info.hdr.original_sc = cur_blocked->io_hdr.original_sc; msg_info.hdr.serializing_sc = cur_blocked; msg_info.hdr.msg_type = CTL_MSG_R2R; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.hdr), M_NOWAIT); break; } entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); /* * Check this I/O for LUN state changes that may * have happened while this command was blocked. * The LUN state may have been changed by a command * ahead of us in the queue, so we need to re-check * for any states that can be caused by SCSI * commands. */ if (ctl_scsiio_lun_check(lun, entry, &cur_blocked->scsiio) == 0) { cur_blocked->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr(cur_blocked); } else ctl_done(cur_blocked); break; } default: /* * This probably shouldn't happen -- we shouldn't * get CTL_ACTION_ERROR, or anything else. */ break; } } return (CTL_RETVAL_COMPLETE); } /* * This routine (with one exception) checks LUN flags that can be set by * commands ahead of us in the OOA queue. These flags have to be checked * when a command initially comes in, and when we pull a command off the * blocked queue and are preparing to execute it. The reason we have to * check these flags for commands on the blocked queue is that the LUN * state may have been changed by a command ahead of us while we're on the * blocked queue. * * Ordering is somewhat important with these checks, so please pay * careful attention to the placement of any new checks. */ static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) { struct ctl_softc *softc = lun->ctl_softc; int retval; uint32_t residx; retval = 0; mtx_assert(&lun->lun_lock, MA_OWNED); /* * If this shelf is a secondary shelf controller, we may have to * reject some commands disallowed by HA mode and link state. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { if (softc->ha_link == CTL_HA_LINK_OFFLINE && (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { ctl_set_lun_unavail(ctsio); retval = 1; goto bailout; } if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { ctl_set_lun_transit(ctsio); retval = 1; goto bailout; } if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { ctl_set_lun_standby(ctsio); retval = 1; goto bailout; } /* The rest of checks are only done on executing side */ if (softc->ha_mode == CTL_HA_MODE_XFER) goto bailout; } if (entry->pattern & CTL_LUN_PAT_WRITE) { if (lun->be_lun && lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { ctl_set_hw_write_protected(ctsio); retval = 1; goto bailout; } if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_DATA_PROTECT, /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); retval = 1; goto bailout; } } /* * Check for a reservation conflict. If this command isn't allowed * even on reserved LUNs, and if this initiator isn't the one who * reserved us, reject the command with a reservation conflict. */ residx = ctl_get_initindex(&ctsio->io_hdr.nexus); if ((lun->flags & CTL_LUN_RESERVED) && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { if (lun->res_idx != residx) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { /* No reservation or command is allowed. */; } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && (lun->pr_res_type == SPR_TYPE_WR_EX || lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { /* The command is allowed for Write Exclusive resv. */; } else { /* * if we aren't registered or it's a res holder type * reservation and this isn't the res holder then set a * conflict. */ if (ctl_get_prkey(lun, residx) == 0 || (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { if (lun->flags & CTL_LUN_EJECTED) ctl_set_lun_ejected(ctsio); else if (lun->flags & CTL_LUN_NO_MEDIA) { if (lun->flags & CTL_LUN_REMOVABLE) ctl_set_lun_no_media(ctsio); else ctl_set_lun_int_reqd(ctsio); } else if (lun->flags & CTL_LUN_STOPPED) ctl_set_lun_stopped(ctsio); else goto bailout; retval = 1; goto bailout; } bailout: return (retval); } static void ctl_failover_io(union ctl_io *io, int have_lock) { ctl_set_busy(&io->scsiio); ctl_done(io); } static void ctl_failover_lun(union ctl_io *rio) { struct ctl_softc *softc = CTL_SOFTC(rio); struct ctl_lun *lun; struct ctl_io_hdr *io, *next_io; uint32_t targ_lun; targ_lun = rio->io_hdr.nexus.targ_mapped_lun; CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun)); /* Find and lock the LUN. */ mtx_lock(&softc->ctl_lock); if (targ_lun > ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } if (softc->ha_mode == CTL_HA_MODE_XFER) { TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { if (io->flags & CTL_FLAG_IO_ACTIVE) { io->flags |= CTL_FLAG_ABORT; io->flags |= CTL_FLAG_FAILOVER; } else { /* This can be only due to DATAMOVE */ io->msg_type = CTL_MSG_DATAMOVE_DONE; io->flags &= ~CTL_FLAG_DMA_INPROG; io->flags |= CTL_FLAG_IO_ACTIVE; io->port_status = 31340; ctl_enqueue_isc((union ctl_io *)io); } } /* We are slave */ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (io->flags & CTL_FLAG_IO_ACTIVE) { io->flags |= CTL_FLAG_FAILOVER; } else { ctl_set_busy(&((union ctl_io *)io)-> scsiio); ctl_done((union ctl_io *)io); } } } } else { /* SERIALIZE modes */ TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { TAILQ_REMOVE(&lun->blocked_queue, io, blocked_links); io->flags &= ~CTL_FLAG_BLOCKED; TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); ctl_free_io((union ctl_io *)io); } } TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); ctl_free_io((union ctl_io *)io); } /* We are slave */ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { ctl_set_busy(&((union ctl_io *)io)-> scsiio); ctl_done((union ctl_io *)io); } } } ctl_check_blocked(lun); } mtx_unlock(&lun->lun_lock); } static int ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) { struct ctl_lun *lun; const struct ctl_cmd_entry *entry; uint32_t initidx, targ_lun; int retval = 0; lun = NULL; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; if (targ_lun < ctl_max_luns) lun = softc->ctl_luns[targ_lun]; if (lun) { /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/O has been * completed. */ mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); lun = NULL; } } CTL_LUN(ctsio) = lun; if (lun) { CTL_BACKEND_LUN(ctsio) = lun->be_lun; /* * Every I/O goes into the OOA queue for a particular LUN, * and stays there until completion. */ #ifdef CTL_TIME_IO if (TAILQ_EMPTY(&lun->ooa_queue)) lun->idle_time += getsbinuptime() - lun->last_busy; #endif TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); } /* Get command entry and return error if it is unsuppotyed. */ entry = ctl_validate_command(ctsio); if (entry == NULL) { if (lun) mtx_unlock(&lun->lun_lock); return (retval); } ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; /* * Check to see whether we can send this command to LUNs that don't * exist. This should pretty much only be the case for inquiry * and request sense. Further checks, below, really require having * a LUN, so we can't really check the command anymore. Just put * it on the rtr queue. */ if (lun == NULL) { if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); return (retval); } ctl_set_unsupported_lun(ctsio); ctl_done((union ctl_io *)ctsio); CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); return (retval); } else { /* * Make sure we support this particular command on this LUN. * e.g., we don't support writes to the control LUN. */ if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * If we've got a request sense, it'll clear the contingent * allegiance condition. Otherwise, if we have a CA condition for * this initiator, clear it, because it sent down a command other * than request sense. */ if (ctsio->cdb[0] != REQUEST_SENSE) { struct scsi_sense_data *ps; ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; if (ps != NULL) ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; } /* * If the command has this flag set, it handles its own unit * attention reporting, we shouldn't do anything. Otherwise we * check for any pending unit attentions, and send them back to the * initiator. We only do this when a command initially comes in, * not when we pull it off the blocked queue. * * According to SAM-3, section 5.3.2, the order that things get * presented back to the host is basically unit attentions caused * by some sort of reset event, busy status, reservation conflicts * or task set full, and finally any other status. * * One issue here is that some of the unit attentions we report * don't fall into the "reset" category (e.g. "reported luns data * has changed"). So reporting it here, before the reservation * check, may be technically wrong. I guess the only thing to do * would be to check for and report the reset events here, and then * check for the other unit attention types after we check for a * reservation conflict. * * XXX KDM need to fix this */ if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { ctl_ua_type ua_type; u_int sense_len = 0; ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, &sense_len, SSD_TYPE_NONE); if (ua_type != CTL_UA_NONE) { mtx_unlock(&lun->lun_lock); ctsio->scsi_status = SCSI_STATUS_CHECK_COND; ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; ctsio->sense_len = sense_len; ctl_done((union ctl_io *)ctsio); return (retval); } } if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return (retval); } /* * XXX CHD this is where we want to send IO to other side if * this LUN is secondary on this SC. We will need to make a copy * of the IO and flag the IO on this side as SENT_2OTHER and the flag * the copy we send as FROM_OTHER. * We also need to stuff the address of the original IO so we can * find it easily. Something similar will need be done on the other * side so when we are done we can find the copy. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { union ctl_ha_msg msg_info; int isc_retval; ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; mtx_unlock(&lun->lun_lock); msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; msg_info.hdr.original_sc = (union ctl_io *)ctsio; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.nexus = ctsio->io_hdr.nexus; msg_info.scsi.tag_num = ctsio->tag_num; msg_info.scsi.tag_type = ctsio->tag_type; msg_info.scsi.cdb_len = ctsio->cdb_len; memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { ctl_set_busy(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } return (retval); } switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links))) { case CTL_ACTION_BLOCK: ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); return (retval); case CTL_ACTION_PASS: case CTL_ACTION_SKIP: ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP_TAG: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_ERROR: default: mtx_unlock(&lun->lun_lock); ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); ctl_done((union ctl_io *)ctsio); break; } return (retval); } const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) { const struct ctl_cmd_entry *entry; int service_action; entry = &ctl_cmd_table[ctsio->cdb[0]]; if (sa) *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); if (entry->flags & CTL_CMD_FLAG_SA5) { service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; } return (entry); } const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio) { const struct ctl_cmd_entry *entry; int i, sa; uint8_t diff; entry = ctl_get_cmd_entry(ctsio, &sa); if (entry->execute == NULL) { if (sa) ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); else ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (NULL); } KASSERT(entry->length > 0, ("Not defined length for command 0x%02x/0x%02x", ctsio->cdb[0], ctsio->cdb[1])); for (i = 1; i < entry->length; i++) { diff = ctsio->cdb[i] & ~entry->usage[i - 1]; if (diff == 0) continue; ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ i, /*bit_valid*/ 1, /*bit*/ fls(diff) - 1); ctl_done((union ctl_io *)ctsio); return (NULL); } return (entry); } static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) { switch (lun_type) { case T_DIRECT: if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) return (0); break; case T_PROCESSOR: if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) return (0); break; case T_CDROM: if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) return (0); break; default: return (0); } return (1); } static int ctl_scsiio(struct ctl_scsiio *ctsio) { int retval; const struct ctl_cmd_entry *entry; retval = CTL_RETVAL_COMPLETE; CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); entry = ctl_get_cmd_entry(ctsio, NULL); /* * If this I/O has been aborted, just send it straight to * ctl_done() without executing it. */ if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { ctl_done((union ctl_io *)ctsio); goto bailout; } /* * All the checks should have been handled by ctl_scsiio_precheck(). * We should be clear now to just execute the I/O. */ retval = entry->execute(ctsio); bailout: return (retval); } static int ctl_target_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun; uint32_t initidx; ctl_ua_type ua_type; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = io->taskio.task_action; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } initidx = ctl_get_initindex(&io->io_hdr.nexus); if (io->taskio.task_action == CTL_TASK_TARGET_RESET) ua_type = CTL_UA_TARG_RESET; else ua_type = CTL_UA_BUS_RESET; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if (port != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; ctl_do_lun_reset(lun, initidx, ua_type); } mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } /* * The LUN should always be set. The I/O is optional, and is used to * distinguish between I/Os sent by this initiator, and by other * initiators. We set unit attention for initiators other than this one. * SAM-3 is vague on this point. It does say that a unit attention should * be established for other initiators when a LUN is reset (see section * 5.7.3), but it doesn't specifically say that the unit attention should * be established for this particular initiator when a LUN is reset. Here * is the relevant text, from SAM-3 rev 8: * * 5.7.2 When a SCSI initiator port aborts its own tasks * * When a SCSI initiator port causes its own task(s) to be aborted, no * notification that the task(s) have been aborted shall be returned to * the SCSI initiator port other than the completion response for the * command or task management function action that caused the task(s) to * be aborted and notification(s) associated with related effects of the * action (e.g., a reset unit attention condition). * * XXX KDM for now, we're setting unit attention for all initiators. */ static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) { union ctl_io *xio; int i; mtx_lock(&lun->lun_lock); /* Abort tasks. */ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; } /* Clear CA. */ for (i = 0; i < ctl_max_ports; i++) { free(lun->pending_sense[i], M_CTL); lun->pending_sense[i] = NULL; } /* Clear reservation. */ lun->flags &= ~CTL_LUN_RESERVED; /* Clear prevent media removal. */ if (lun->prevent) { for (i = 0; i < CTL_MAX_INITIATORS; i++) ctl_clear_mask(lun->prevent, i); lun->prevent_count = 0; } /* Clear TPC status */ ctl_tpc_lun_clear(lun, -1); /* Establish UA. */ #if 0 ctl_est_ua_all(lun, initidx, ua_type); #else ctl_est_ua_all(lun, -1, ua_type); #endif mtx_unlock(&lun->lun_lock); } static int ctl_lun_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; uint32_t targ_lun, initidx; targ_lun = io->io_hdr.nexus.targ_mapped_lun; initidx = ctl_get_initindex(&io->io_hdr.nexus); mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { union ctl_ha_msg msg_info; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_LUN_RESET; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } return (0); } static void ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, int other_sc) { union ctl_io *xio; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { if ((targ_port == UINT32_MAX || targ_port == xio->io_hdr.nexus.targ_port) && (init_id == UINT32_MAX || init_id == xio->io_hdr.nexus.initid)) { if (targ_port != xio->io_hdr.nexus.targ_port || init_id != xio->io_hdr.nexus.initid) xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; xio->io_hdr.flags |= CTL_FLAG_ABORT; if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = xio->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = xio->scsiio.tag_num; msg_info.task.tag_type = xio->scsiio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_NOWAIT); } } } } static int ctl_abort_task_set(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; uint32_t targ_lun; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } else { /* CTL_TASK_CLEAR_TASK_SET */ ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } mtx_unlock(&lun->lun_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_lun *lun; struct scsi_sense_data *ps; uint32_t p, i; p = initidx / CTL_MAX_INIT_PER_PORT; i = initidx % CTL_MAX_INIT_PER_PORT; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); /* Abort tasks. */ ctl_abort_tasks_lun(lun, p, i, 1); /* Clear CA. */ ps = lun->pending_sense[p]; if (ps != NULL) ps[i].error_code = 0; /* Clear reservation. */ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) lun->flags &= ~CTL_LUN_RESERVED; /* Clear prevent media removal. */ if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { ctl_clear_mask(lun->prevent, initidx); lun->prevent_count--; } /* Clear TPC status */ ctl_tpc_lun_clear(lun, initidx); /* Establish UA. */ ctl_est_ua(lun, initidx, ua_type); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); } static int ctl_i_t_nexus_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); uint32_t initidx; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } initidx = ctl_get_initindex(&io->io_hdr.nexus); ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_abort_task(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); union ctl_io *xio; struct ctl_lun *lun; #if 0 struct sbuf sb; char printbuf[128]; #endif int found; uint32_t targ_lun; found = 0; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } #if 0 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", lun->lun, io->taskio.tag_num, io->taskio.tag_type); #endif mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { #if 0 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", lun->lun, xio->scsiio.tag_num, xio->scsiio.tag_type, (xio->io_hdr.blocked_links.tqe_prev == NULL) ? "" : " BLOCKED", (xio->io_hdr.flags & CTL_FLAG_DMA_INPROG) ? " DMA" : "", (xio->io_hdr.flags & CTL_FLAG_ABORT) ? " ABORT" : "", (xio->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); ctl_scsi_command_string(&xio->scsiio, NULL, &sb); sbuf_finish(&sb); printf("%s\n", sbuf_data(&sb)); #endif if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) || (xio->io_hdr.flags & CTL_FLAG_ABORT)) continue; /* * If the abort says that the task is untagged, the * task in the queue must be untagged. Otherwise, * we just check to see whether the tag numbers * match. This is because the QLogic firmware * doesn't pass back the tag type in an abort * request. */ #if 0 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) || (xio->scsiio.tag_num == io->taskio.tag_num)) #endif /* * XXX KDM we've got problems with FC, because it * doesn't send down a tag type with aborts. So we * can only really go by the tag number... * This may cause problems with parallel SCSI. * Need to figure that out!! */ if (xio->scsiio.tag_num == io->taskio.tag_num) { xio->io_hdr.flags |= CTL_FLAG_ABORT; found = 1; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = io->taskio.tag_num; msg_info.task.tag_type = io->taskio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; #if 0 printf("Sent Abort to other side\n"); #endif ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_NOWAIT); } #if 0 printf("ctl_abort_task: found I/O to abort\n"); #endif } } mtx_unlock(&lun->lun_lock); if (found == 0) { /* * This isn't really an error. It's entirely possible for * the abort and command completion to cross on the wire. * This is more of an informative/diagnostic error. */ #if 0 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " "%u:%u:%u tag %d type %d\n", io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, io->taskio.tag_num, io->taskio.tag_type); #endif } io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_query_task(union ctl_io *io, int task_set) { struct ctl_softc *softc = CTL_SOFTC(io); union ctl_io *xio; struct ctl_lun *lun; int found = 0; uint32_t targ_lun; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) || (xio->io_hdr.flags & CTL_FLAG_ABORT)) continue; if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { found = 1; break; } } mtx_unlock(&lun->lun_lock); if (found) io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; else io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_query_async_event(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; ctl_ua_type ua; uint32_t targ_lun, initidx; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); initidx = ctl_get_initindex(&io->io_hdr.nexus); ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); mtx_unlock(&lun->lun_lock); if (ua != CTL_UA_NONE) io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; else io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static void ctl_run_task(union ctl_io *io) { int retval = 1; CTL_DEBUG_PRINT(("ctl_run_task\n")); KASSERT(io->io_hdr.io_type == CTL_IO_TASK, ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); switch (io->taskio.task_action) { case CTL_TASK_ABORT_TASK: retval = ctl_abort_task(io); break; case CTL_TASK_ABORT_TASK_SET: case CTL_TASK_CLEAR_TASK_SET: retval = ctl_abort_task_set(io); break; case CTL_TASK_CLEAR_ACA: break; case CTL_TASK_I_T_NEXUS_RESET: retval = ctl_i_t_nexus_reset(io); break; case CTL_TASK_LUN_RESET: retval = ctl_lun_reset(io); break; case CTL_TASK_TARGET_RESET: case CTL_TASK_BUS_RESET: retval = ctl_target_reset(io); break; case CTL_TASK_PORT_LOGIN: break; case CTL_TASK_PORT_LOGOUT: break; case CTL_TASK_QUERY_TASK: retval = ctl_query_task(io, 0); break; case CTL_TASK_QUERY_TASK_SET: retval = ctl_query_task(io, 1); break; case CTL_TASK_QUERY_ASYNC_EVENT: retval = ctl_query_async_event(io); break; default: printf("%s: got unknown task management event %d\n", __func__, io->taskio.task_action); break; } if (retval == 0) io->io_hdr.status = CTL_SUCCESS; else io->io_hdr.status = CTL_ERROR; ctl_done(io); } /* * For HA operation. Handle commands that come in from the other * controller. */ static void ctl_handle_isc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; const struct ctl_cmd_entry *entry; uint32_t targ_lun; targ_lun = io->io_hdr.nexus.targ_mapped_lun; switch (io->io_hdr.msg_type) { case CTL_MSG_SERIALIZE: ctl_serialize_other_sc_cmd(&io->scsiio); break; case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ entry = ctl_get_cmd_entry(&io->scsiio, NULL); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { ctl_done(io); break; } mtx_lock(&lun->lun_lock); if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { mtx_unlock(&lun->lun_lock); ctl_done(io); break; } io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr(io); break; case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) { ctl_done(io); break; } if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { ctl_free_io(io); break; } mtx_lock(&lun->lun_lock); TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); ctl_check_blocked(lun); mtx_unlock(&lun->lun_lock); ctl_free_io(io); break; case CTL_MSG_PERS_ACTION: ctl_hndl_per_res_out_on_other_sc(io); ctl_free_io(io); break; case CTL_MSG_BAD_JUJU: ctl_done(io); break; case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ ctl_datamove_remote(io); break; case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ io->scsiio.be_move_done(io); break; case CTL_MSG_FAILOVER: ctl_failover_lun(io); ctl_free_io(io); break; default: printf("%s: Invalid message type %d\n", __func__, io->io_hdr.msg_type); ctl_free_io(io); break; } } /* * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if * there is no match. */ static ctl_lun_error_pattern ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) { const struct ctl_cmd_entry *entry; ctl_lun_error_pattern filtered_pattern, pattern; pattern = desc->error_pattern; /* * XXX KDM we need more data passed into this function to match a * custom pattern, and we actually need to implement custom pattern * matching. */ if (pattern & CTL_LUN_PAT_CMD) return (CTL_LUN_PAT_CMD); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) return (CTL_LUN_PAT_ANY); entry = ctl_get_cmd_entry(ctsio, NULL); filtered_pattern = entry->pattern & pattern; /* * If the user requested specific flags in the pattern (e.g. * CTL_LUN_PAT_RANGE), make sure the command supports all of those * flags. * * If the user did not specify any flags, it doesn't matter whether * or not the command supports the flags. */ if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != (pattern & ~CTL_LUN_PAT_MASK)) return (CTL_LUN_PAT_NONE); /* * If the user asked for a range check, see if the requested LBA * range overlaps with this command's LBA range. */ if (filtered_pattern & CTL_LUN_PAT_RANGE) { uint64_t lba1; uint64_t len1; ctl_action action; int retval; retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); if (retval != 0) return (CTL_LUN_PAT_NONE); action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, desc->lba_range.len, FALSE); /* * A "pass" means that the LBA ranges don't overlap, so * this doesn't match the user's range criteria. */ if (action == CTL_ACTION_PASS) return (CTL_LUN_PAT_NONE); } return (filtered_pattern); } static void ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) { struct ctl_error_desc *desc, *desc2; mtx_assert(&lun->lun_lock, MA_OWNED); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { ctl_lun_error_pattern pattern; /* * Check to see whether this particular command matches * the pattern in the descriptor. */ pattern = ctl_cmd_pattern_match(&io->scsiio, desc); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) continue; switch (desc->lun_error & CTL_LUN_INJ_TYPE) { case CTL_LUN_INJ_ABORTED: ctl_set_aborted(&io->scsiio); break; case CTL_LUN_INJ_MEDIUM_ERR: ctl_set_medium_error(&io->scsiio, (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_OUT); break; case CTL_LUN_INJ_UA: /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET * OCCURRED */ ctl_set_ua(&io->scsiio, 0x29, 0x00); break; case CTL_LUN_INJ_CUSTOM: /* * We're assuming the user knows what he is doing. * Just copy the sense information without doing * checks. */ bcopy(&desc->custom_sense, &io->scsiio.sense_data, MIN(sizeof(desc->custom_sense), sizeof(io->scsiio.sense_data))); io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; io->scsiio.sense_len = SSD_FULL_SIZE; io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; break; case CTL_LUN_INJ_NONE: default: /* * If this is an error injection type we don't know * about, clear the continuous flag (if it is set) * so it will get deleted below. */ desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; break; } /* * By default, each error injection action is a one-shot */ if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); } } #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_datamove(io); } #endif /* CTL_IO_DELAY */ void ctl_datamove(union ctl_io *io) { void (*fe_datamove)(union ctl_io *io); mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); CTL_DEBUG_PRINT(("ctl_datamove\n")); /* No data transferred yet. Frontend must update this when done. */ io->scsiio.kern_data_resid = io->scsiio.kern_data_len; #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; char path_str[64]; struct sbuf sb; ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", io->scsiio.tag_num, io->scsiio.tag_type); break; case CTL_IO_TASK: sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " "Tag Type: %d\n", io->taskio.task_action, io->taskio.tag_num, io->taskio.tag_type); break; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } sbuf_cat(&sb, path_str); sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", (intmax_t)time_uptime - io->io_hdr.start_time); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #endif /* CTL_TIME_IO */ #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun; lun = CTL_LUN(io); if ((lun != NULL) && (lun->delay_info.datamove_delay > 0)) { callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(&io->io_hdr.delay_callout, lun->delay_info.datamove_delay * hz, ctl_datamove_timer_wakeup, io); if (lun->delay_info.datamove_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.datamove_delay = 0; return; } } #endif /* * This command has been aborted. Set the port status, so we fail * the data move. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", io->scsiio.tag_num, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31337; /* * Note that the backend, in this case, will get the * callback in its context. In other cases it may get * called in the frontend's interrupt thread context. */ io->scsiio.be_move_done(io); return; } /* Don't confuse frontend with zero length data move. */ if (io->scsiio.kern_data_len == 0) { io->scsiio.be_move_done(io); return; } fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static void ctl_send_datamove_done(union ctl_io *io, int have_lock) { union ctl_ha_msg msg; #ifdef CTL_TIME_IO struct bintime cur_bt; #endif memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; msg.hdr.original_sc = io; msg.hdr.serializing_sc = io->io_hdr.serializing_sc; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.scsi_status = io->scsiio.scsi_status; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, io->scsiio.sense_len); msg.scsi.sense_len = io->scsiio.sense_len; msg.scsi.port_status = io->io_hdr.port_status; io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ have_lock); return; } ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + msg.scsi.sense_len, M_WAITOK); #ifdef CTL_TIME_IO getbinuptime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); bintime_add(&io->io_hdr.dma_bt, &cur_bt); #endif io->io_hdr.num_dmas++; } /* * The DMA to the remote side is done, now we need to tell the other side * we're done so it can continue with its data movement. */ static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; uint32_t i; io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA write failed with error %d", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(io->io_hdr.local_sglist[i].addr, M_CTL); free(io->io_hdr.remote_sglist, M_CTL); io->io_hdr.remote_sglist = NULL; io->io_hdr.local_sglist = NULL; /* * The data is in local and remote memory, so now we need to send * status (good or back) back to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); } /* * We've moved the data from the host/controller into local memory. Now we * need to push it over to the remote controller's memory. */ static int ctl_datamove_remote_dm_write_cb(union ctl_io *io) { int retval; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, ctl_datamove_remote_write_cb); return (retval); } static void ctl_datamove_remote_write(union ctl_io *io) { int retval; void (*fe_datamove)(union ctl_io *io); /* * - Get the data from the host/HBA into local memory. * - DMA memory from the local controller to the remote controller. * - Send status back to the remote controller. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static int ctl_datamove_remote_dm_read_cb(union ctl_io *io) { #if 0 char str[256]; char path_str[64]; struct sbuf sb; #endif uint32_t i; for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(io->io_hdr.local_sglist[i].addr, M_CTL); free(io->io_hdr.remote_sglist, M_CTL); io->io_hdr.remote_sglist = NULL; io->io_hdr.local_sglist = NULL; #if 0 scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", io->scsiio.tag_num, io->scsiio.tag_type); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, io->io_hdr.flags, io->io_hdr.status); sbuf_finish(&sb); printk("%s", sbuf_data(&sb)); #endif /* * The read is done, now we need to send status (good or bad) back * to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (0); } static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; void (*fe_datamove)(union ctl_io *io); io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA read failed with error %d\n", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; /* XXX KDM add checks like the ones in ctl_datamove? */ fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static int ctl_datamove_remote_sgl_setup(union ctl_io *io) { struct ctl_sg_entry *local_sglist; uint32_t len_to_go; int retval; int i; retval = 0; local_sglist = io->io_hdr.local_sglist; len_to_go = io->scsiio.kern_data_len; /* * The difficult thing here is that the size of the various * S/G segments may be different than the size from the * remote controller. That'll make it harder when DMAing * the data back to the other side. */ for (i = 0; len_to_go > 0; i++) { local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); local_sglist[i].addr = malloc(local_sglist[i].len, M_CTL, M_WAITOK); len_to_go -= local_sglist[i].len; } /* * Reset the number of S/G entries accordingly. The original * number of S/G entries is available in rem_sg_entries. */ io->scsiio.kern_sg_entries = i; #if 0 printf("%s: kern_sg_entries = %d\n", __func__, io->scsiio.kern_sg_entries); for (i = 0; i < io->scsiio.kern_sg_entries; i++) printf("%s: sg[%d] = %p, %lu\n", __func__, i, local_sglist[i].addr, local_sglist[i].len); #endif return (retval); } static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback) { struct ctl_ha_dt_req *rq; struct ctl_sg_entry *remote_sglist, *local_sglist; uint32_t local_used, remote_used, total_used; int i, j, isc_ret; rq = ctl_dt_req_alloc(); /* * If we failed to allocate the request, and if the DMA didn't fail * anyway, set busy status. This is just a resource allocation * failure. */ if ((rq == NULL) && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) ctl_set_busy(&io->scsiio); if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { if (rq != NULL) ctl_dt_req_free(rq); /* * The data move failed. We need to return status back * to the other controller. No point in trying to DMA * data to the remote controller. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (1); } local_sglist = io->io_hdr.local_sglist; remote_sglist = io->io_hdr.remote_sglist; local_used = 0; remote_used = 0; total_used = 0; /* * Pull/push the data over the wire from/to the other controller. * This takes into account the possibility that the local and * remote sglists may not be identical in terms of the size of * the elements and the number of elements. * * One fundamental assumption here is that the length allocated for * both the local and remote sglists is identical. Otherwise, we've * essentially got a coding error of some sort. */ isc_ret = CTL_HA_STATUS_SUCCESS; for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { uint32_t cur_len; uint8_t *tmp_ptr; rq->command = command; rq->context = io; /* * Both pointers should be aligned. But it is possible * that the allocation length is not. They should both * also have enough slack left over at the end, though, * to round up to the next 8 byte boundary. */ cur_len = MIN(local_sglist[i].len - local_used, remote_sglist[j].len - remote_used); rq->size = cur_len; tmp_ptr = (uint8_t *)local_sglist[i].addr; tmp_ptr += local_used; #if 0 /* Use physical addresses when talking to ISC hardware */ if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { /* XXX KDM use busdma */ rq->local = vtophys(tmp_ptr); } else rq->local = tmp_ptr; #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); rq->local = tmp_ptr; #endif tmp_ptr = (uint8_t *)remote_sglist[j].addr; tmp_ptr += remote_used; rq->remote = tmp_ptr; rq->callback = NULL; local_used += cur_len; if (local_used >= local_sglist[i].len) { i++; local_used = 0; } remote_used += cur_len; if (remote_used >= remote_sglist[j].len) { j++; remote_used = 0; } total_used += cur_len; if (total_used >= io->scsiio.kern_data_len) rq->callback = callback; #if 0 printf("%s: %s: local %p remote %p size %d\n", __func__, (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", rq->local, rq->remote, rq->size); #endif isc_ret = ctl_dt_single(rq); if (isc_ret > CTL_HA_STATUS_SUCCESS) break; } if (isc_ret != CTL_HA_STATUS_WAIT) { rq->ret = isc_ret; callback(rq); } return (0); } static void ctl_datamove_remote_read(union ctl_io *io) { int retval; uint32_t i; /* * This will send an error to the other controller in the case of a * failure. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, ctl_datamove_remote_read_cb); if (retval != 0) { /* * Make sure we free memory if there was an error.. The * ctl_datamove_remote_xfer() function will send the * datamove done message, or call the callback with an * error if there is a problem. */ for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(io->io_hdr.local_sglist[i].addr, M_CTL); free(io->io_hdr.remote_sglist, M_CTL); io->io_hdr.remote_sglist = NULL; io->io_hdr.local_sglist = NULL; } } /* * Process a datamove request from the other controller. This is used for * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory * first. Once that is complete, the data gets DMAed into the remote * controller's memory. For reads, we DMA from the remote controller's * memory into our memory first, and then move it out to the FETD. */ static void ctl_datamove_remote(union ctl_io *io) { mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ 0); return; } /* * Note that we look for an aborted I/O here, but don't do some of * the other checks that ctl_datamove() normally does. * We don't need to run the datamove delay code, since that should * have been done if need be on the other controller. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, io->scsiio.tag_num, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31338; ctl_send_datamove_done(io, /*have_lock*/ 0); return; } if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) ctl_datamove_remote_write(io); else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ctl_datamove_remote_read(io); else { io->io_hdr.port_status = 31339; ctl_send_datamove_done(io, /*have_lock*/ 0); } } static void ctl_process_done(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun = CTL_LUN(io); void (*fe_done)(union ctl_io *io); union ctl_ha_msg msg; CTL_DEBUG_PRINT(("ctl_process_done\n")); fe_done = port->fe_done; #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; char path_str[64]; struct sbuf sb; ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", io->scsiio.tag_num, io->scsiio.tag_type); break; case CTL_IO_TASK: sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " "Tag Type: %d\n", io->taskio.task_action, io->taskio.tag_num, io->taskio.tag_type); break; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } sbuf_cat(&sb, path_str); sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", (intmax_t)time_uptime - io->io_hdr.start_time); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #endif /* CTL_TIME_IO */ switch (io->io_hdr.io_type) { case CTL_IO_SCSI: break; case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_INFO) ctl_io_error_print(io, NULL); fe_done(io); return; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } if (lun == NULL) { CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", io->io_hdr.nexus.targ_mapped_lun)); goto bailout; } mtx_lock(&lun->lun_lock); /* * Check to see if we have any informational exception and status * of this command can be modified to report it in form of either * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. */ if (lun->ie_reported == 0 && lun->ie_asc != 0 && io->io_hdr.status == CTL_SUCCESS && (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { uint8_t mrie = lun->MODE_IE.mrie; uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || (lun->MODE_VER.byte3 & SMS_VER_PER)); if (((mrie == SIEP_MRIE_REC_COND && per) || mrie == SIEP_MRIE_REC_UNCOND || mrie == SIEP_MRIE_NO_SENSE) && (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { ctl_set_sense(&io->scsiio, /*current_error*/ 1, /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, /*asc*/ lun->ie_asc, /*ascq*/ lun->ie_ascq, SSD_ELEM_NONE); lun->ie_reported = 1; } } else if (lun->ie_reported < 0) lun->ie_reported = 0; /* * Check to see if we have any errors to inject here. We only * inject errors for commands that don't already have errors set. */ if (!STAILQ_EMPTY(&lun->error_list) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) ctl_inject_error(lun, io); /* * XXX KDM how do we treat commands that aren't completed * successfully? * * XXX KDM should we also track I/O latency? */ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && io->io_hdr.io_type == CTL_IO_SCSI) { int type; #ifdef CTL_TIME_IO struct bintime bt; getbinuptime(&bt); bintime_sub(&bt, &io->io_hdr.start_bt); #endif if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) type = CTL_STATS_READ; else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) type = CTL_STATS_WRITE; else type = CTL_STATS_NO_IO; #ifdef CTL_LEGACY_STATS uint32_t targ_port = port->targ_port; lun->legacy_stats.ports[targ_port].bytes[type] += io->scsiio.kern_total_len; lun->legacy_stats.ports[targ_port].operations[type] ++; lun->legacy_stats.ports[targ_port].num_dmas[type] += io->io_hdr.num_dmas; #ifdef CTL_TIME_IO bintime_add(&lun->legacy_stats.ports[targ_port].dma_time[type], &io->io_hdr.dma_bt); bintime_add(&lun->legacy_stats.ports[targ_port].time[type], &bt); #endif #endif /* CTL_LEGACY_STATS */ lun->stats.bytes[type] += io->scsiio.kern_total_len; lun->stats.operations[type] ++; lun->stats.dmas[type] += io->io_hdr.num_dmas; #ifdef CTL_TIME_IO bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); bintime_add(&lun->stats.time[type], &bt); #endif mtx_lock(&port->port_lock); port->stats.bytes[type] += io->scsiio.kern_total_len; port->stats.operations[type] ++; port->stats.dmas[type] += io->io_hdr.num_dmas; #ifdef CTL_TIME_IO bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); bintime_add(&port->stats.time[type], &bt); #endif mtx_unlock(&port->port_lock); } /* * Remove this from the OOA queue. */ TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); #ifdef CTL_TIME_IO if (TAILQ_EMPTY(&lun->ooa_queue)) lun->last_busy = getsbinuptime(); #endif /* * Run through the blocked queue on this LUN and see if anything * has become unblocked, now that this transaction is done. */ ctl_check_blocked(lun); /* * If the LUN has been invalidated, free it if there is nothing * left on its OOA queue. */ if ((lun->flags & CTL_LUN_INVALID) && TAILQ_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); ctl_free_lun(lun); } else mtx_unlock(&lun->lun_lock); bailout: /* * If this command has been aborted, make sure we set the status * properly. The FETD is responsible for freeing the I/O and doing * whatever it needs to do to clean up its state. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) ctl_set_task_aborted(&io->scsiio); /* * If enabled, print command error status. */ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && (ctl_debug & CTL_DEBUG_INFO) != 0) ctl_io_error_print(io, NULL); /* * Tell the FETD or the other shelf controller we're done with this * command. Note that only SCSI commands get to this point. Task * management commands are completed above. */ if ((softc->ha_mode != CTL_HA_MODE_XFER) && (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.serializing_sc = io->io_hdr.serializing_sc; msg.hdr.nexus = io->io_hdr.nexus; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), M_WAITOK); } fe_done(io); } /* * Front end should call this if it doesn't do autosense. When the request * sense comes back in from the initiator, we'll dequeue this and send it. */ int ctl_queue_sense(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun; struct scsi_sense_data *ps; uint32_t initidx, p, targ_lun; CTL_DEBUG_PRINT(("ctl_queue_sense\n")); targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); /* * LUN lookup will likely move to the ctl_work_thread() once we * have our new queueing infrastructure (that doesn't put things on * a per-LUN queue initially). That is so that we can handle * things like an INQUIRY to a LUN that we don't have enabled. We * can't deal with that right now. * If we don't have a LUN for this, just toss the sense information. */ mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); goto bailout; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); initidx = ctl_get_initindex(&io->io_hdr.nexus); p = initidx / CTL_MAX_INIT_PER_PORT; if (lun->pending_sense[p] == NULL) { lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, M_CTL, M_NOWAIT | M_ZERO); } if ((ps = lun->pending_sense[p]) != NULL) { ps += initidx % CTL_MAX_INIT_PER_PORT; memset(ps, 0, sizeof(*ps)); memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); } mtx_unlock(&lun->lun_lock); bailout: ctl_free_io(io); return (CTL_RETVAL_COMPLETE); } /* * Primary command inlet from frontend ports. All SCSI and task I/O * requests must go through this function. */ int ctl_queue(union ctl_io *io) { struct ctl_port *port = CTL_PORT(io); CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ /* Map FE-specific LUN ID into global one. */ io->io_hdr.nexus.targ_mapped_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_CDB) ctl_io_print(io); ctl_enqueue_incoming(io); break; default: printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); return (EINVAL); } return (CTL_RETVAL_COMPLETE); } #ifdef CTL_IO_DELAY static void ctl_done_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_done(io); } #endif /* CTL_IO_DELAY */ void ctl_serseq_done(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io);; if (lun->be_lun == NULL || lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) return; mtx_lock(&lun->lun_lock); io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; ctl_check_blocked(lun); mtx_unlock(&lun->lun_lock); } void ctl_done(union ctl_io *io) { /* * Enable this to catch duplicate completion issues. */ #if 0 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { printf("%s: type %d msg %d cdb %x iptl: " "%u:%u:%u tag 0x%04x " "flag %#x status %x\n", __func__, io->io_hdr.io_type, io->io_hdr.msg_type, io->scsiio.cdb[0], io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, (io->io_hdr.io_type == CTL_IO_TASK) ? io->taskio.tag_num : io->scsiio.tag_num, io->io_hdr.flags, io->io_hdr.status); } else io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; #endif /* * This is an internal copy of an I/O, and should not go through * the normal done processing logic. */ if (io->io_hdr.flags & CTL_FLAG_INT_COPY) return; #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun = CTL_LUN(io); if ((lun != NULL) && (lun->delay_info.done_delay > 0)) { callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(&io->io_hdr.delay_callout, lun->delay_info.done_delay * hz, ctl_done_timer_wakeup, io); if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.done_delay = 0; return; } } #endif /* CTL_IO_DELAY */ ctl_enqueue_done(io); } static void ctl_work_thread(void *arg) { struct ctl_thread *thr = (struct ctl_thread *)arg; struct ctl_softc *softc = thr->ctl_softc; union ctl_io *io; int retval; CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); while (!softc->shutdown) { /* * We handle the queues in this order: * - ISC * - done queue (to free up resources, unblock other commands) * - RtR queue * - incoming queue * * If those queues are empty, we break out of the loop and * go to sleep. */ mtx_lock(&thr->queue_lock); io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->isc_queue, links); mtx_unlock(&thr->queue_lock); ctl_handle_isc(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->done_queue, links); /* clear any blocked commands, call fe_done */ mtx_unlock(&thr->queue_lock); ctl_process_done(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); mtx_unlock(&thr->queue_lock); if (io->io_hdr.io_type == CTL_IO_TASK) ctl_run_task(io); else ctl_scsiio_precheck(softc, &io->scsiio); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); mtx_unlock(&thr->queue_lock); retval = ctl_scsiio(&io->scsiio); if (retval != CTL_RETVAL_COMPLETE) CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); continue; } /* Sleep until we have something to do. */ mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); } thr->thread = NULL; kthread_exit(); } static void ctl_lun_thread(void *arg) { struct ctl_softc *softc = (struct ctl_softc *)arg; struct ctl_be_lun *be_lun; CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); while (!softc->shutdown) { mtx_lock(&softc->ctl_lock); be_lun = STAILQ_FIRST(&softc->pending_lun_queue); if (be_lun != NULL) { STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); mtx_unlock(&softc->ctl_lock); ctl_create_lun(be_lun); continue; } /* Sleep until we have something to do. */ mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, PDROP | PRIBIO, "-", 0); } softc->lun_thread = NULL; kthread_exit(); } static void ctl_thresh_thread(void *arg) { struct ctl_softc *softc = (struct ctl_softc *)arg; struct ctl_lun *lun; struct ctl_logical_block_provisioning_page *page; const char *attr; union ctl_ha_msg msg; uint64_t thres, val; int i, e, set; CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); while (!softc->shutdown) { mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if ((lun->flags & CTL_LUN_DISABLED) || (lun->flags & CTL_LUN_NO_MEDIA) || lun->backend->lun_attr == NULL) continue; if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && softc->ha_mode == CTL_HA_MODE_XFER) continue; if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) continue; e = 0; page = &lun->MODE_LBP; for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) continue; thres = scsi_4btoul(page->descr[i].count); thres <<= CTL_LBP_EXPONENT; switch (page->descr[i].resource) { case 0x01: attr = "blocksavail"; break; case 0x02: attr = "blocksused"; break; case 0xf1: attr = "poolblocksavail"; break; case 0xf2: attr = "poolblocksused"; break; default: continue; } mtx_unlock(&softc->ctl_lock); // XXX val = lun->backend->lun_attr( lun->be_lun->be_lun, attr); mtx_lock(&softc->ctl_lock); if (val == UINT64_MAX) continue; if ((page->descr[i].flags & SLBPPD_ARMING_MASK) == SLBPPD_ARMING_INC) e = (val >= thres); else e = (val <= thres); if (e) break; } mtx_lock(&lun->lun_lock); if (e) { scsi_u64to8b((uint8_t *)&page->descr[i] - (uint8_t *)page, lun->ua_tpt_info); if (lun->lasttpt == 0 || time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { lun->lasttpt = time_uptime; ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); set = 1; } else set = 0; } else { lun->lasttpt = 0; ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); set = -1; } mtx_unlock(&lun->lun_lock); if (set != 0 && lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { /* Send msg to other side. */ bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = (set > 0); msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); mtx_unlock(&softc->ctl_lock); // XXX ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); mtx_lock(&softc->ctl_lock); } } mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, PDROP | PRIBIO, "-", CTL_LBP_PERIOD * hz); } softc->thresh_thread = NULL; kthread_exit(); } static void ctl_enqueue_incoming(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; u_int idx; idx = (io->io_hdr.nexus.targ_port * 127 + io->io_hdr.nexus.initid) % worker_threads; thr = &softc->threads[idx]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_rtr(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_done(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_isc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } /* * vim: ts=8 */ Index: head/sys/cam/ctl/ctl.h =================================================================== --- head/sys/cam/ctl/ctl.h (revision 326264) +++ head/sys/cam/ctl/ctl.h (revision 326265) @@ -1,223 +1,225 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.h#5 $ * $FreeBSD$ */ /* * Function definitions used both within CTL and potentially in various CTL * clients. * * Author: Ken Merry */ #ifndef _CTL_H_ #define _CTL_H_ #define CTL_RETVAL_COMPLETE 0 #define CTL_RETVAL_QUEUED 1 #define CTL_RETVAL_ALLOCATED 2 #define CTL_RETVAL_ERROR 3 typedef enum { CTL_PORT_NONE = 0x00, CTL_PORT_FC = 0x01, CTL_PORT_SCSI = 0x02, CTL_PORT_IOCTL = 0x04, CTL_PORT_INTERNAL = 0x08, CTL_PORT_ISCSI = 0x10, CTL_PORT_SAS = 0x20, CTL_PORT_UMASS = 0x40, CTL_PORT_ALL = 0xff, CTL_PORT_ISC = 0x100 // FC port for inter-shelf communication } ctl_port_type; struct ctl_port_entry { ctl_port_type port_type; char port_name[64]; int32_t targ_port; int physical_port; int virtual_port; u_int flags; #define CTL_PORT_WWNN_VALID 0x01 #define CTL_PORT_WWPN_VALID 0x02 uint64_t wwnn; uint64_t wwpn; int online; }; struct ctl_modepage_header { uint8_t page_code; uint8_t subpage; uint16_t len_used; uint16_t len_left; }; union ctl_modepage_info { struct ctl_modepage_header header; }; /* * Serial number length, for VPD page 0x80. */ #define CTL_SN_LEN 16 /* * Device ID length, for VPD page 0x83. */ #define CTL_DEVID_LEN 64 #define CTL_DEVID_MIN_LEN 16 /* * WWPN length, for VPD page 0x83. */ #define CTL_WWPN_LEN 8 #define CTL_DRIVER_NAME_LEN 32 /* * Unit attention types. ASC/ASCQ values for these should be placed in * ctl_build_ua. These are also listed in order of reporting priority. * i.e. a poweron UA is reported first, bus reset second, etc. */ typedef enum { CTL_UA_NONE = 0x0000, CTL_UA_POWERON = 0x0001, CTL_UA_BUS_RESET = 0x0002, CTL_UA_TARG_RESET = 0x0004, CTL_UA_I_T_NEXUS_LOSS = 0x0008, CTL_UA_LUN_RESET = 0x0010, CTL_UA_LUN_CHANGE = 0x0020, CTL_UA_MODE_CHANGE = 0x0040, CTL_UA_LOG_CHANGE = 0x0080, CTL_UA_INQ_CHANGE = 0x0100, CTL_UA_RES_PREEMPT = 0x0400, CTL_UA_RES_RELEASE = 0x0800, CTL_UA_REG_PREEMPT = 0x1000, CTL_UA_ASYM_ACC_CHANGE = 0x2000, CTL_UA_CAPACITY_CHANGE = 0x4000, CTL_UA_THIN_PROV_THRES = 0x8000, CTL_UA_MEDIUM_CHANGE = 0x10000, CTL_UA_IE = 0x20000 } ctl_ua_type; #ifdef _KERNEL MALLOC_DECLARE(M_CTL); struct ctl_page_index; #ifdef SYSCTL_DECL /* from sysctl.h */ SYSCTL_DECL(_kern_cam_ctl); #endif struct ctl_lun; struct ctl_port; struct ctl_softc; /* * Put a string into an sbuf, escaping characters that are illegal or not * recommended in XML. Note this doesn't escape everything, just > < and &. */ int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size); int ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last); int ctl_set_mask(uint32_t *mask, uint32_t bit); int ctl_clear_mask(uint32_t *mask, uint32_t bit); int ctl_is_set(uint32_t *mask, uint32_t bit); int ctl_default_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr); int ctl_ie_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr); int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc); int ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc); int ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc); int ctl_config_move_done(union ctl_io *io); void ctl_datamove(union ctl_io *io); void ctl_serseq_done(union ctl_io *io); void ctl_done(union ctl_io *io); void ctl_data_submit_done(union ctl_io *io); void ctl_config_read_done(union ctl_io *io); void ctl_config_write_done(union ctl_io *io); void ctl_portDB_changed(int portnum); int ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua); void ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua); void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua); void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua); void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua); void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, ctl_ua_type ua_type); uint32_t ctl_decode_lun(uint64_t encoded); uint64_t ctl_encode_lun(uint32_t decoded); void ctl_isc_announce_lun(struct ctl_lun *lun); void ctl_isc_announce_port(struct ctl_port *port); void ctl_isc_announce_iid(struct ctl_port *port, int iid); void ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, uint8_t page, uint8_t subpage); /* * KPI to manipulate LUN/port options */ struct ctl_option { STAILQ_ENTRY(ctl_option) links; char *name; char *value; }; typedef STAILQ_HEAD(ctl_options, ctl_option) ctl_options_t; struct ctl_be_arg; void ctl_init_opts(ctl_options_t *opts, int num_args, struct ctl_be_arg *args); void ctl_update_opts(ctl_options_t *opts, int num_args, struct ctl_be_arg *args); void ctl_free_opts(ctl_options_t *opts); char * ctl_get_opt(ctl_options_t *opts, const char *name); int ctl_get_opt_number(ctl_options_t *opts, const char *name, uint64_t *num); int ctl_expand_number(const char *buf, uint64_t *num); #endif /* _KERNEL */ #endif /* _CTL_H_ */ /* * vim: ts=8 */ Index: head/sys/cam/ctl/ctl_backend.c =================================================================== --- head/sys/cam/ctl/ctl_backend.c (revision 326264) +++ head/sys/cam/ctl/ctl_backend.c (revision 326265) @@ -1,231 +1,233 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend.c#3 $ */ /* * CTL backend driver registration routines * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern struct ctl_softc *control_softc; int ctl_backend_register(struct ctl_backend_driver *be) { struct ctl_softc *softc = control_softc; struct ctl_backend_driver *be_tmp; int error; /* Sanity check, make sure this isn't a duplicate registration. */ mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(be_tmp, &softc->be_list, links) { if (strcmp(be_tmp->name, be->name) == 0) { mtx_unlock(&softc->ctl_lock); return (-1); } } mtx_unlock(&softc->ctl_lock); #ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED be->config_move_done = ctl_config_move_done; #endif be->num_luns = 0; /* Call the backend's initialization routine. */ if (be->init != NULL) { if ((error = be->init()) != 0) { printf("%s backend init error: %d\n", be->name, error); return (error); } } mtx_lock(&softc->ctl_lock); STAILQ_INSERT_TAIL(&softc->be_list, be, links); softc->num_backends++; mtx_unlock(&softc->ctl_lock); return (0); } int ctl_backend_deregister(struct ctl_backend_driver *be) { struct ctl_softc *softc = control_softc; int error; /* Call the backend's shutdown routine. */ if (be->shutdown != NULL) { if ((error = be->shutdown()) != 0) { printf("%s backend shutdown error: %d\n", be->name, error); return (error); } } mtx_lock(&softc->ctl_lock); STAILQ_REMOVE(&softc->be_list, be, ctl_backend_driver, links); softc->num_backends--; mtx_unlock(&softc->ctl_lock); return (0); } struct ctl_backend_driver * ctl_backend_find(char *backend_name) { struct ctl_softc *softc = control_softc; struct ctl_backend_driver *be_tmp; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(be_tmp, &softc->be_list, links) { if (strcmp(be_tmp->name, backend_name) == 0) { mtx_unlock(&softc->ctl_lock); return (be_tmp); } } mtx_unlock(&softc->ctl_lock); return (NULL); } void ctl_init_opts(ctl_options_t *opts, int num_args, struct ctl_be_arg *args) { struct ctl_option *opt; int i; STAILQ_INIT(opts); for (i = 0; i < num_args; i++) { if ((args[i].flags & CTL_BEARG_RD) == 0) continue; if ((args[i].flags & CTL_BEARG_ASCII) == 0) continue; opt = malloc(sizeof(*opt), M_CTL, M_WAITOK); opt->name = strdup(args[i].kname, M_CTL); opt->value = strdup(args[i].kvalue, M_CTL); STAILQ_INSERT_TAIL(opts, opt, links); } } void ctl_update_opts(ctl_options_t *opts, int num_args, struct ctl_be_arg *args) { struct ctl_option *opt; int i; for (i = 0; i < num_args; i++) { if ((args[i].flags & CTL_BEARG_RD) == 0) continue; if ((args[i].flags & CTL_BEARG_ASCII) == 0) continue; STAILQ_FOREACH(opt, opts, links) { if (strcmp(opt->name, args[i].kname) == 0) break; } if (args[i].kvalue != NULL && ((char *)args[i].kvalue)[0] != 0) { if (opt) { free(opt->value, M_CTL); opt->value = strdup(args[i].kvalue, M_CTL); } else { opt = malloc(sizeof(*opt), M_CTL, M_WAITOK); opt->name = strdup(args[i].kname, M_CTL); opt->value = strdup(args[i].kvalue, M_CTL); STAILQ_INSERT_TAIL(opts, opt, links); } } else if (opt) { STAILQ_REMOVE(opts, opt, ctl_option, links); free(opt->name, M_CTL); free(opt->value, M_CTL); free(opt, M_CTL); } } } void ctl_free_opts(ctl_options_t *opts) { struct ctl_option *opt; while ((opt = STAILQ_FIRST(opts)) != NULL) { STAILQ_REMOVE_HEAD(opts, links); free(opt->name, M_CTL); free(opt->value, M_CTL); free(opt, M_CTL); } } char * ctl_get_opt(ctl_options_t *opts, const char *name) { struct ctl_option *opt; STAILQ_FOREACH(opt, opts, links) { if (strcmp(opt->name, name) == 0) { return (opt->value); } } return (NULL); } int ctl_get_opt_number(ctl_options_t *opts, const char *name, uint64_t *val) { const char *value; value = ctl_get_opt(opts, name); if (value == NULL) return (-2); return (ctl_expand_number(value, val)); } Index: head/sys/cam/ctl/ctl_backend.h =================================================================== --- head/sys/cam/ctl/ctl_backend.h (revision 326264) +++ head/sys/cam/ctl/ctl_backend.h (revision 326265) @@ -1,269 +1,271 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend.h#2 $ * $FreeBSD$ */ /* * CTL backend driver definitions * * Author: Ken Merry */ #ifndef _CTL_BACKEND_H_ #define _CTL_BACKEND_H_ #include typedef enum { CTL_LUN_SERSEQ_OFF, CTL_LUN_SERSEQ_READ, CTL_LUN_SERSEQ_ON } ctl_lun_serseq; #ifdef _KERNEL #define CTL_BACKEND_DECLARE(name, driver) \ static int name ## _modevent(module_t mod, int type, void *data) \ { \ switch (type) { \ case MOD_LOAD: \ return (ctl_backend_register( \ (struct ctl_backend_driver *)data)); \ break; \ case MOD_UNLOAD: \ return (ctl_backend_deregister( \ (struct ctl_backend_driver *)data)); \ break; \ default: \ return EOPNOTSUPP; \ } \ return 0; \ } \ static moduledata_t name ## _mod = { \ #name, \ name ## _modevent, \ (void *)&driver \ }; \ DECLARE_MODULE(name, name ## _mod, SI_SUB_CONFIGURE, SI_ORDER_FOURTH); \ MODULE_DEPEND(name, ctl, 1, 1, 1); \ MODULE_DEPEND(name, cam, 1, 1, 1) typedef enum { CTL_LUN_CONFIG_OK, CTL_LUN_CONFIG_FAILURE } ctl_lun_config_status; typedef void (*be_callback_t)(void *be_lun); typedef void (*be_lun_config_t)(void *be_lun, ctl_lun_config_status status); /* * The lun_type field is the SCSI device type of this particular LUN. In * general, this should be T_DIRECT, although backends will want to create * a processor LUN, typically at LUN 0. See scsi_all.h for the defines for * the various SCSI device types. * * The flags are described above. * * The be_lun field is the backend driver's own context that will get * passsed back so that it can tell which LUN CTL is referencing. * * maxlba is the maximum accessible LBA on the LUN. Note that this is * different from the capacity of the array. capacity = maxlba + 1 * * blocksize is the size, in bytes, of each LBA on the LUN. In general * this should be 512. In theory CTL should be able to handle other block * sizes. Host application software may not deal with it very well, though. * * pblockexp is the log2() of number of LBAs on the LUN per physical sector. * * pblockoff is the lowest LBA on the LUN aligned to physical sector. * * ublockexp is the log2() of number of LBAs on the LUN per UNMAP block. * * ublockoff is the lowest LBA on the LUN aligned to UNMAP block. * * atomicblock is the number of blocks that can be written atomically. * * opttxferlen is the number of blocks that can be written in one operation. * * req_lun_id is the requested LUN ID. CTL only pays attention to this * field if the CTL_LUN_FLAG_ID_REQ flag is set. If the requested LUN ID is * not available, the LUN addition will fail. If a particular LUN ID isn't * requested, the first available LUN ID will be allocated. * * serial_num is the device serial number returned in the SCSI INQUIRY VPD * page 0x80. This should be a unique, per-shelf value. The data inside * this field should be ASCII only, left aligned, and any unused space * should be padded out with ASCII spaces. This field should NOT be NULL * terminated. * * device_id is the T10 device identifier returned in the SCSI INQUIRY VPD * page 0x83. This should be a unique, per-LUN value. The data inside * this field should be ASCII only, left aligned, and any unused space * should be padded with ASCII spaces. This field should NOT be NULL * terminated. * * The lun_shutdown() method is the callback for the ctl_invalidate_lun() * call. It is called when all outstanding I/O for that LUN has been * completed and CTL has deleted the resources for that LUN. When the CTL * backend gets this call, it can safely free its per-LUN resources. * * The lun_config_status() method is the callback for the ctl_add_lun() * call. It is called when the LUN is successfully added, or when LUN * addition fails. If the LUN is successfully added, the backend may call * the ctl_enable_lun() method to enable the LUN. * * The be field is a pointer to the ctl_backend_driver structure, which * contains the backend methods to be called by CTL. * * The ctl_lun field is for CTL internal use only, and should not be used * by the backend. * * The links field is for CTL internal use only, and should not be used by * the backend. */ struct ctl_be_lun { uint8_t lun_type; /* passed to CTL */ ctl_backend_lun_flags flags; /* passed to CTL */ ctl_lun_serseq serseq; /* passed to CTL */ void *be_lun; /* passed to CTL */ uint64_t maxlba; /* passed to CTL */ uint32_t blocksize; /* passed to CTL */ uint16_t pblockexp; /* passed to CTL */ uint16_t pblockoff; /* passed to CTL */ uint16_t ublockexp; /* passed to CTL */ uint16_t ublockoff; /* passed to CTL */ uint32_t atomicblock; /* passed to CTL */ uint32_t opttxferlen; /* passed to CTL */ uint32_t req_lun_id; /* passed to CTL */ uint32_t lun_id; /* returned from CTL */ uint8_t serial_num[CTL_SN_LEN]; /* passed to CTL */ uint8_t device_id[CTL_DEVID_LEN];/* passed to CTL */ be_callback_t lun_shutdown; /* passed to CTL */ be_lun_config_t lun_config_status; /* passed to CTL */ struct ctl_backend_driver *be; /* passed to CTL */ void *ctl_lun; /* used by CTL */ ctl_options_t options; /* passed to CTL */ STAILQ_ENTRY(ctl_be_lun) links; /* used by CTL */ }; typedef enum { CTL_BE_FLAG_NONE = 0x00, /* no flags */ CTL_BE_FLAG_HAS_CONFIG = 0x01, /* can do config reads, writes */ } ctl_backend_flags; typedef int (*be_init_t)(void); typedef int (*be_shutdown_t)(void); typedef int (*be_func_t)(union ctl_io *io); typedef void (*be_vfunc_t)(union ctl_io *io); typedef int (*be_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); typedef int (*be_luninfo_t)(void *be_lun, struct sbuf *sb); typedef uint64_t (*be_lunattr_t)(void *be_lun, const char *attrname); struct ctl_backend_driver { char name[CTL_BE_NAME_LEN]; /* passed to CTL */ ctl_backend_flags flags; /* passed to CTL */ be_init_t init; /* passed to CTL */ be_shutdown_t shutdown; /* passed to CTL */ be_func_t data_submit; /* passed to CTL */ be_func_t data_move_done; /* passed to CTL */ be_func_t config_read; /* passed to CTL */ be_func_t config_write; /* passed to CTL */ be_ioctl_t ioctl; /* passed to CTL */ be_luninfo_t lun_info; /* passed to CTL */ be_lunattr_t lun_attr; /* passed to CTL */ #ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED be_func_t config_move_done; /* passed to backend */ #endif #if 0 be_vfunc_t config_write_done; /* passed to backend */ #endif u_int num_luns; /* used by CTL */ STAILQ_ENTRY(ctl_backend_driver) links; /* used by CTL */ }; int ctl_backend_register(struct ctl_backend_driver *be); int ctl_backend_deregister(struct ctl_backend_driver *be); struct ctl_backend_driver *ctl_backend_find(char *backend_name); /* * To add a LUN, first call ctl_add_lun(). You will get the lun_config_status() * callback when the LUN addition has either succeeded or failed. * * Once you get that callback, you can then call ctl_enable_lun() to enable * the LUN. */ int ctl_add_lun(struct ctl_be_lun *be_lun); int ctl_enable_lun(struct ctl_be_lun *be_lun); /* * To delete a LUN, first call ctl_disable_lun(), then * ctl_invalidate_lun(). You will get the lun_shutdown() callback when all * I/O to the LUN has completed and the LUN has been deleted. */ int ctl_disable_lun(struct ctl_be_lun *be_lun); int ctl_invalidate_lun(struct ctl_be_lun *be_lun); /* * To start a LUN (transition from powered off to powered on state) call * ctl_start_lun(). To stop a LUN (transition from powered on to powered * off state) call ctl_stop_lun(). */ int ctl_start_lun(struct ctl_be_lun *be_lun); int ctl_stop_lun(struct ctl_be_lun *be_lun); /* * Methods to notify about media and tray status changes. */ int ctl_lun_no_media(struct ctl_be_lun *be_lun); int ctl_lun_has_media(struct ctl_be_lun *be_lun); int ctl_lun_ejected(struct ctl_be_lun *be_lun); /* * Called on LUN HA role change. */ int ctl_lun_primary(struct ctl_be_lun *be_lun); int ctl_lun_secondary(struct ctl_be_lun *be_lun); /* * Let the backend notify the initiators about changes. */ void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun); #endif /* _KERNEL */ #endif /* _CTL_BACKEND_H_ */ /* * vim: ts=8 */ Index: head/sys/cam/ctl/ctl_backend_block.c =================================================================== --- head/sys/cam/ctl/ctl_backend_block.c (revision 326264) +++ head/sys/cam/ctl/ctl_backend_block.c (revision 326265) @@ -1,2896 +1,2898 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2009-2011 Spectra Logic Corporation * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ */ /* * CAM Target Layer driver backend for block devices. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * The idea here is that we'll allocate enough S/G space to hold a 1MB * I/O. If we get an I/O larger than that, we'll split it. */ #define CTLBLK_HALF_IO_SIZE (512 * 1024) #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) #define CTLBLK_MAX_SEG MAXPHYS #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1) #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) #ifdef CTLBLK_DEBUG #define DPRINTF(fmt, args...) \ printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) #else #define DPRINTF(fmt, args...) do {} while(0) #endif #define PRIV(io) \ ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) #define ARGS(io) \ ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) SDT_PROVIDER_DEFINE(cbb); typedef enum { CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, CTL_BE_BLOCK_LUN_WAITING = 0x04, } ctl_be_block_lun_flags; typedef enum { CTL_BE_BLOCK_NONE, CTL_BE_BLOCK_DEV, CTL_BE_BLOCK_FILE } ctl_be_block_type; struct ctl_be_block_filedata { struct ucred *cred; }; union ctl_be_block_bedata { struct ctl_be_block_filedata file; }; struct ctl_be_block_io; struct ctl_be_block_lun; typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); typedef uint64_t (*cbb_getattr_t)(struct ctl_be_block_lun *be_lun, const char *attrname); /* * Backend LUN structure. There is a 1:1 mapping between a block device * and a backend block LUN, and between a backend block LUN and a CTL LUN. */ struct ctl_be_block_lun { struct ctl_lun_create_params params; char lunname[32]; char *dev_path; ctl_be_block_type dev_type; struct vnode *vn; union ctl_be_block_bedata backend; cbb_dispatch_t dispatch; cbb_dispatch_t lun_flush; cbb_dispatch_t unmap; cbb_dispatch_t get_lba_status; cbb_getattr_t getattr; uma_zone_t lun_zone; uint64_t size_blocks; uint64_t size_bytes; struct ctl_be_block_softc *softc; struct devstat *disk_stats; ctl_be_block_lun_flags flags; STAILQ_ENTRY(ctl_be_block_lun) links; struct ctl_be_lun cbe_lun; struct taskqueue *io_taskqueue; struct task io_task; int num_threads; STAILQ_HEAD(, ctl_io_hdr) input_queue; STAILQ_HEAD(, ctl_io_hdr) config_read_queue; STAILQ_HEAD(, ctl_io_hdr) config_write_queue; STAILQ_HEAD(, ctl_io_hdr) datamove_queue; struct mtx_padalign io_lock; struct mtx_padalign queue_lock; }; /* * Overall softc structure for the block backend module. */ struct ctl_be_block_softc { struct mtx lock; uma_zone_t beio_zone; int num_luns; STAILQ_HEAD(, ctl_be_block_lun) lun_list; }; static struct ctl_be_block_softc backend_block_softc; /* * Per-I/O information. */ struct ctl_be_block_io { union ctl_io *io; struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; struct iovec xiovecs[CTLBLK_MAX_SEGS]; int bio_cmd; int num_segs; int num_bios_sent; int num_bios_done; int send_complete; int first_error; uint64_t first_error_offset; struct bintime ds_t0; devstat_tag_type ds_tag_type; devstat_trans_flags ds_trans_type; uint64_t io_len; uint64_t io_offset; int io_arg; struct ctl_be_block_softc *softc; struct ctl_be_block_lun *lun; void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ }; extern struct ctl_softc *control_softc; static int cbb_num_threads = 14; SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, "CAM Target Layer Block Backend"); SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN, &cbb_num_threads, 0, "Number of threads per backing file"); static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); static void ctl_free_beio(struct ctl_be_block_io *beio); static void ctl_complete_beio(struct ctl_be_block_io *beio); static int ctl_be_block_move_done(union ctl_io *io); static void ctl_be_block_biodone(struct bio *bio); static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname); static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname); static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io); static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io); static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io); static void ctl_be_block_worker(void *context, int pending); static int ctl_be_block_submit(union ctl_io *io); static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req); static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req); static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); static int ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req); static int ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req); static int ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req); static int ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req); static void ctl_be_block_lun_shutdown(void *be_lun); static void ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status); static int ctl_be_block_config_write(union ctl_io *io); static int ctl_be_block_config_read(union ctl_io *io); static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); static uint64_t ctl_be_block_lun_attr(void *be_lun, const char *attrname); static int ctl_be_block_init(void); static int ctl_be_block_shutdown(void); static struct ctl_backend_driver ctl_be_block_driver = { .name = "block", .flags = CTL_BE_FLAG_HAS_CONFIG, .init = ctl_be_block_init, .shutdown = ctl_be_block_shutdown, .data_submit = ctl_be_block_submit, .data_move_done = ctl_be_block_move_done, .config_read = ctl_be_block_config_read, .config_write = ctl_be_block_config_write, .ioctl = ctl_be_block_ioctl, .lun_info = ctl_be_block_lun_info, .lun_attr = ctl_be_block_lun_attr }; MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); static struct ctl_be_block_io * ctl_alloc_beio(struct ctl_be_block_softc *softc) { struct ctl_be_block_io *beio; beio = uma_zalloc(softc->beio_zone, M_WAITOK | M_ZERO); beio->softc = softc; return (beio); } static void ctl_free_beio(struct ctl_be_block_io *beio) { int duplicate_free; int i; duplicate_free = 0; for (i = 0; i < beio->num_segs; i++) { if (beio->sg_segs[i].addr == NULL) duplicate_free++; uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); beio->sg_segs[i].addr = NULL; /* For compare we had two equal S/G lists. */ if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) { uma_zfree(beio->lun->lun_zone, beio->sg_segs[i + CTLBLK_HALF_SEGS].addr); beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL; } } if (duplicate_free > 0) { printf("%s: %d duplicate frees out of %d segments\n", __func__, duplicate_free, beio->num_segs); } uma_zfree(beio->softc->beio_zone, beio); } static void ctl_complete_beio(struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; if (beio->beio_cont != NULL) { beio->beio_cont(beio); } else { ctl_free_beio(beio); ctl_data_submit_done(io); } } static size_t cmp(uint8_t *a, uint8_t *b, size_t size) { size_t i; for (i = 0; i < size; i++) { if (a[i] != b[i]) break; } return (i); } static void ctl_be_block_compare(union ctl_io *io) { struct ctl_be_block_io *beio; uint64_t off, res; int i; uint8_t info[8]; beio = (struct ctl_be_block_io *)PRIV(io)->ptr; off = 0; for (i = 0; i < beio->num_segs; i++) { res = cmp(beio->sg_segs[i].addr, beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, beio->sg_segs[i].len); off += res; if (res < beio->sg_segs[i].len) break; } if (i < beio->num_segs) { scsi_u64to8b(off, info); ctl_set_sense(&io->scsiio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_MISCOMPARE, /*asc*/ 0x1D, /*ascq*/ 0x00, /*type*/ SSD_ELEM_INFO, /*size*/ sizeof(info), /*data*/ &info, /*type*/ SSD_ELEM_NONE); } else ctl_set_success(&io->scsiio); } static int ctl_be_block_move_done(union ctl_io *io) { struct ctl_be_block_io *beio; struct ctl_be_block_lun *be_lun; struct ctl_lba_len_flags *lbalen; #ifdef CTL_TIME_IO struct bintime cur_bt; #endif beio = (struct ctl_be_block_io *)PRIV(io)->ptr; be_lun = beio->lun; DPRINTF("entered\n"); #ifdef CTL_TIME_IO getbinuptime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); bintime_add(&io->io_hdr.dma_bt, &cur_bt); #endif io->io_hdr.num_dmas++; io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; /* * We set status at this point for read commands, and write * commands with errors. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { ; } else if ((io->io_hdr.port_status != 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ io->io_hdr.port_status); } else if (io->scsiio.kern_data_resid != 0 && (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_invalid_field_ciu(&io->scsiio); } else if ((io->io_hdr.port_status == 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { lbalen = ARGS(beio->io); if (lbalen->flags & CTL_LLF_READ) { ctl_set_success(&io->scsiio); } else if (lbalen->flags & CTL_LLF_COMPARE) { /* We have two data blocks ready for comparison. */ ctl_be_block_compare(io); } } /* * If this is a read, or a write with errors, it is done. */ if ((beio->bio_cmd == BIO_READ) || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { ctl_complete_beio(beio); return (0); } /* * At this point, we have a write and the DMA completed * successfully. We now have to queue it to the task queue to * execute the backend I/O. That is because we do blocking * memory allocations, and in the file backing case, blocking I/O. * This move done routine is generally called in the SIM's * interrupt context, and therefore we cannot block. */ mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); return (0); } static void ctl_be_block_biodone(struct bio *bio) { struct ctl_be_block_io *beio; struct ctl_be_block_lun *be_lun; union ctl_io *io; int error; beio = bio->bio_caller1; be_lun = beio->lun; io = beio->io; DPRINTF("entered\n"); error = bio->bio_error; mtx_lock(&be_lun->io_lock); if (error != 0 && (beio->first_error == 0 || bio->bio_offset < beio->first_error_offset)) { beio->first_error = error; beio->first_error_offset = bio->bio_offset; } beio->num_bios_done++; /* * XXX KDM will this cause WITNESS to complain? Holding a lock * during the free might cause it to complain. */ g_destroy_bio(bio); /* * If the send complete bit isn't set, or we aren't the last I/O to * complete, then we're done. */ if ((beio->send_complete == 0) || (beio->num_bios_done < beio->num_bios_sent)) { mtx_unlock(&be_lun->io_lock); return; } /* * At this point, we've verified that we are the last I/O to * complete, so it's safe to drop the lock. */ devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); /* * If there are any errors from the backing device, we fail the * entire I/O with a medium error. */ error = beio->first_error; if (error != 0) { if (error == EOPNOTSUPP) { ctl_set_invalid_opcode(&io->scsiio); } else if (error == ENOSPC || error == EDQUOT) { ctl_set_space_alloc_fail(&io->scsiio); } else if (error == EROFS || error == EACCES) { ctl_set_hw_write_protected(&io->scsiio); } else if (beio->bio_cmd == BIO_FLUSH) { /* XXX KDM is there is a better error here? */ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ 0xbad2); } else { ctl_set_medium_error(&io->scsiio, beio->bio_cmd == BIO_READ); } ctl_complete_beio(beio); return; } /* * If this is a write, a flush, a delete or verify, we're all done. * If this is a read, we can now send the data to the user. */ if ((beio->bio_cmd == BIO_WRITE) || (beio->bio_cmd == BIO_FLUSH) || (beio->bio_cmd == BIO_DELETE) || (ARGS(io)->flags & CTL_LLF_VERIFY)) { ctl_set_success(&io->scsiio); ctl_complete_beio(beio); } else { if ((ARGS(io)->flags & CTL_LLF_READ) && beio->beio_cont == NULL) { ctl_set_success(&io->scsiio); ctl_serseq_done(io); } #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif ctl_datamove(io); } } static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; struct mount *mountpoint; int error, lock_flags; DPRINTF("entered\n"); binuptime(&beio->ds_t0); mtx_lock(&be_lun->io_lock); devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); mtx_unlock(&be_lun->io_lock); (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount))) lock_flags = LK_SHARED; else lock_flags = LK_EXCLUSIVE; vn_lock(be_lun->vn, lock_flags | LK_RETRY); error = VOP_FSYNC(be_lun->vn, beio->io_arg ? MNT_NOWAIT : MNT_WAIT, curthread); VOP_UNLOCK(be_lun->vn, 0); vn_finished_write(mountpoint); mtx_lock(&be_lun->io_lock); devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); if (error == 0) ctl_set_success(&io->scsiio); else { /* XXX KDM is there is a better error here? */ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ 0xbad1); } ctl_complete_beio(beio); } SDT_PROBE_DEFINE1(cbb, , read, file_start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, file_start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , read, file_done,"uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, file_done, "uint64_t"); static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { struct ctl_be_block_filedata *file_data; union ctl_io *io; struct uio xuio; struct iovec *xiovec; size_t s; int error, flags, i; DPRINTF("entered\n"); file_data = &be_lun->backend.file; io = beio->io; flags = 0; if (ARGS(io)->flags & CTL_LLF_DPO) flags |= IO_DIRECT; if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) flags |= IO_SYNC; bzero(&xuio, sizeof(xuio)); if (beio->bio_cmd == BIO_READ) { SDT_PROBE0(cbb, , read, file_start); xuio.uio_rw = UIO_READ; } else { SDT_PROBE0(cbb, , write, file_start); xuio.uio_rw = UIO_WRITE; } xuio.uio_offset = beio->io_offset; xuio.uio_resid = beio->io_len; xuio.uio_segflg = UIO_SYSSPACE; xuio.uio_iov = beio->xiovecs; xuio.uio_iovcnt = beio->num_segs; xuio.uio_td = curthread; for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { xiovec->iov_base = beio->sg_segs[i].addr; xiovec->iov_len = beio->sg_segs[i].len; } binuptime(&beio->ds_t0); mtx_lock(&be_lun->io_lock); devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); mtx_unlock(&be_lun->io_lock); if (beio->bio_cmd == BIO_READ) { vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); /* * UFS pays attention to IO_DIRECT for reads. If the * DIRECTIO option is configured into the kernel, it calls * ffs_rawread(). But that only works for single-segment * uios with user space addresses. In our case, with a * kernel uio, it still reads into the buffer cache, but it * will just try to release the buffer from the cache later * on in ffs_read(). * * ZFS does not pay attention to IO_DIRECT for reads. * * UFS does not pay attention to IO_SYNC for reads. * * ZFS pays attention to IO_SYNC (which translates into the * Solaris define FRSYNC for zfs_read()) for reads. It * attempts to sync the file before reading. */ error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred); VOP_UNLOCK(be_lun->vn, 0); SDT_PROBE0(cbb, , read, file_done); if (error == 0 && xuio.uio_resid > 0) { /* * If we red less then requested (EOF), then * we should clean the rest of the buffer. */ s = beio->io_len - xuio.uio_resid; for (i = 0; i < beio->num_segs; i++) { if (s >= beio->sg_segs[i].len) { s -= beio->sg_segs[i].len; continue; } bzero((uint8_t *)beio->sg_segs[i].addr + s, beio->sg_segs[i].len - s); s = 0; } } } else { struct mount *mountpoint; int lock_flags; (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount))) lock_flags = LK_SHARED; else lock_flags = LK_EXCLUSIVE; vn_lock(be_lun->vn, lock_flags | LK_RETRY); /* * UFS pays attention to IO_DIRECT for writes. The write * is done asynchronously. (Normally the write would just * get put into cache. * * UFS pays attention to IO_SYNC for writes. It will * attempt to write the buffer out synchronously if that * flag is set. * * ZFS does not pay attention to IO_DIRECT for writes. * * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) * for writes. It will flush the transaction from the * cache before returning. */ error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred); VOP_UNLOCK(be_lun->vn, 0); vn_finished_write(mountpoint); SDT_PROBE0(cbb, , write, file_done); } mtx_lock(&be_lun->io_lock); devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); /* * If we got an error, set the sense data to "MEDIUM ERROR" and * return the I/O to the user. */ if (error != 0) { if (error == ENOSPC || error == EDQUOT) { ctl_set_space_alloc_fail(&io->scsiio); } else if (error == EROFS || error == EACCES) { ctl_set_hw_write_protected(&io->scsiio); } else { ctl_set_medium_error(&io->scsiio, beio->bio_cmd == BIO_READ); } ctl_complete_beio(beio); return; } /* * If this is a write or a verify, we're all done. * If this is a read, we can now send the data to the user. */ if ((beio->bio_cmd == BIO_WRITE) || (ARGS(io)->flags & CTL_LLF_VERIFY)) { ctl_set_success(&io->scsiio); ctl_complete_beio(beio); } else { if ((ARGS(io)->flags & CTL_LLF_READ) && beio->beio_cont == NULL) { ctl_set_success(&io->scsiio); ctl_serseq_done(io); } #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif ctl_datamove(io); } } static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; struct ctl_lba_len_flags *lbalen = ARGS(io); struct scsi_get_lba_status_data *data; off_t roff, off; int error, status; DPRINTF("entered\n"); off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); error = VOP_IOCTL(be_lun->vn, FIOSEEKHOLE, &off, 0, curthread->td_ucred, curthread); if (error == 0 && off > roff) status = 0; /* mapped up to off */ else { error = VOP_IOCTL(be_lun->vn, FIOSEEKDATA, &off, 0, curthread->td_ucred, curthread); if (error == 0 && off > roff) status = 1; /* deallocated up to off */ else { status = 0; /* unknown up to the end */ off = be_lun->size_bytes; } } VOP_UNLOCK(be_lun->vn, 0); data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; scsi_u64to8b(lbalen->lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - lbalen->lba), data->descr[0].length); data->descr[0].status = status; ctl_complete_beio(beio); } static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname) { struct vattr vattr; struct statfs statfs; uint64_t val; int error; val = UINT64_MAX; if (be_lun->vn == NULL) return (val); vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); if (strcmp(attrname, "blocksused") == 0) { error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); if (error == 0) val = vattr.va_bytes / be_lun->cbe_lun.blocksize; } if (strcmp(attrname, "blocksavail") == 0 && (be_lun->vn->v_iflag & VI_DOOMED) == 0) { error = VFS_STATFS(be_lun->vn->v_mount, &statfs); if (error == 0) val = statfs.f_bavail * statfs.f_bsize / be_lun->cbe_lun.blocksize; } VOP_UNLOCK(be_lun->vn, 0); return (val); } static void ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io; struct cdevsw *csw; struct cdev *dev; struct uio xuio; struct iovec *xiovec; int error, flags, i, ref; DPRINTF("entered\n"); io = beio->io; flags = 0; if (ARGS(io)->flags & CTL_LLF_DPO) flags |= IO_DIRECT; if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) flags |= IO_SYNC; bzero(&xuio, sizeof(xuio)); if (beio->bio_cmd == BIO_READ) { SDT_PROBE0(cbb, , read, file_start); xuio.uio_rw = UIO_READ; } else { SDT_PROBE0(cbb, , write, file_start); xuio.uio_rw = UIO_WRITE; } xuio.uio_offset = beio->io_offset; xuio.uio_resid = beio->io_len; xuio.uio_segflg = UIO_SYSSPACE; xuio.uio_iov = beio->xiovecs; xuio.uio_iovcnt = beio->num_segs; xuio.uio_td = curthread; for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { xiovec->iov_base = beio->sg_segs[i].addr; xiovec->iov_len = beio->sg_segs[i].len; } binuptime(&beio->ds_t0); mtx_lock(&be_lun->io_lock); devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); mtx_unlock(&be_lun->io_lock); csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw) { if (beio->bio_cmd == BIO_READ) error = csw->d_read(dev, &xuio, flags); else error = csw->d_write(dev, &xuio, flags); dev_relthread(dev, ref); } else error = ENXIO; if (beio->bio_cmd == BIO_READ) SDT_PROBE0(cbb, , read, file_done); else SDT_PROBE0(cbb, , write, file_done); mtx_lock(&be_lun->io_lock); devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); /* * If we got an error, set the sense data to "MEDIUM ERROR" and * return the I/O to the user. */ if (error != 0) { if (error == ENOSPC || error == EDQUOT) { ctl_set_space_alloc_fail(&io->scsiio); } else if (error == EROFS || error == EACCES) { ctl_set_hw_write_protected(&io->scsiio); } else { ctl_set_medium_error(&io->scsiio, beio->bio_cmd == BIO_READ); } ctl_complete_beio(beio); return; } /* * If this is a write or a verify, we're all done. * If this is a read, we can now send the data to the user. */ if ((beio->bio_cmd == BIO_WRITE) || (ARGS(io)->flags & CTL_LLF_VERIFY)) { ctl_set_success(&io->scsiio); ctl_complete_beio(beio); } else { if ((ARGS(io)->flags & CTL_LLF_READ) && beio->beio_cont == NULL) { ctl_set_success(&io->scsiio); ctl_serseq_done(io); } #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif ctl_datamove(io); } } static void ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; struct cdevsw *csw; struct cdev *dev; struct ctl_lba_len_flags *lbalen = ARGS(io); struct scsi_get_lba_status_data *data; off_t roff, off; int error, ref, status; DPRINTF("entered\n"); csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw == NULL) { status = 0; /* unknown up to the end */ off = be_lun->size_bytes; goto done; } off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; error = csw->d_ioctl(dev, FIOSEEKHOLE, (caddr_t)&off, FREAD, curthread); if (error == 0 && off > roff) status = 0; /* mapped up to off */ else { error = csw->d_ioctl(dev, FIOSEEKDATA, (caddr_t)&off, FREAD, curthread); if (error == 0 && off > roff) status = 1; /* deallocated up to off */ else { status = 0; /* unknown up to the end */ off = be_lun->size_bytes; } } dev_relthread(dev, ref); done: data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; scsi_u64to8b(lbalen->lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - lbalen->lba), data->descr[0].length); data->descr[0].status = status; ctl_complete_beio(beio); } static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { struct bio *bio; struct cdevsw *csw; struct cdev *dev; int ref; DPRINTF("entered\n"); /* This can't fail, it's a blocking allocation. */ bio = g_alloc_bio(); bio->bio_cmd = BIO_FLUSH; bio->bio_offset = 0; bio->bio_data = 0; bio->bio_done = ctl_be_block_biodone; bio->bio_caller1 = beio; bio->bio_pblkno = 0; /* * We don't need to acquire the LUN lock here, because we are only * sending one bio, and so there is no other context to synchronize * with. */ beio->num_bios_sent = 1; beio->send_complete = 1; binuptime(&beio->ds_t0); mtx_lock(&be_lun->io_lock); devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); mtx_unlock(&be_lun->io_lock); csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw) { bio->bio_dev = dev; csw->d_strategy(bio); dev_relthread(dev, ref); } else { bio->bio_error = ENXIO; ctl_be_block_biodone(bio); } } static void ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio, uint64_t off, uint64_t len, int last) { struct bio *bio; uint64_t maxlen; struct cdevsw *csw; struct cdev *dev; int ref; csw = devvn_refthread(be_lun->vn, &dev, &ref); maxlen = LONG_MAX - (LONG_MAX % be_lun->cbe_lun.blocksize); while (len > 0) { bio = g_alloc_bio(); bio->bio_cmd = BIO_DELETE; bio->bio_dev = dev; bio->bio_offset = off; bio->bio_length = MIN(len, maxlen); bio->bio_data = 0; bio->bio_done = ctl_be_block_biodone; bio->bio_caller1 = beio; bio->bio_pblkno = off / be_lun->cbe_lun.blocksize; off += bio->bio_length; len -= bio->bio_length; mtx_lock(&be_lun->io_lock); beio->num_bios_sent++; if (last && len == 0) beio->send_complete = 1; mtx_unlock(&be_lun->io_lock); if (csw) { csw->d_strategy(bio); } else { bio->bio_error = ENXIO; ctl_be_block_biodone(bio); } } if (csw) dev_relthread(dev, ref); } static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io; struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_desc *buf, *end; uint64_t len; io = beio->io; DPRINTF("entered\n"); binuptime(&beio->ds_t0); mtx_lock(&be_lun->io_lock); devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); mtx_unlock(&be_lun->io_lock); if (beio->io_offset == -1) { beio->io_len = 0; ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (; buf < end; buf++) { len = (uint64_t)scsi_4btoul(buf->length) * be_lun->cbe_lun.blocksize; beio->io_len += len; ctl_be_block_unmap_dev_range(be_lun, beio, scsi_8btou64(buf->lba) * be_lun->cbe_lun.blocksize, len, (end - buf < 2) ? TRUE : FALSE); } } else ctl_be_block_unmap_dev_range(be_lun, beio, beio->io_offset, beio->io_len, TRUE); } static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); struct bio *bio; struct cdevsw *csw; struct cdev *dev; off_t cur_offset; int i, max_iosize, ref; DPRINTF("entered\n"); csw = devvn_refthread(be_lun->vn, &dev, &ref); /* * We have to limit our I/O size to the maximum supported by the * backend device. Hopefully it is MAXPHYS. If the driver doesn't * set it properly, use DFLTPHYS. */ if (csw) { max_iosize = dev->si_iosize_max; if (max_iosize < PAGE_SIZE) max_iosize = DFLTPHYS; } else max_iosize = DFLTPHYS; cur_offset = beio->io_offset; for (i = 0; i < beio->num_segs; i++) { size_t cur_size; uint8_t *cur_ptr; cur_size = beio->sg_segs[i].len; cur_ptr = beio->sg_segs[i].addr; while (cur_size > 0) { /* This can't fail, it's a blocking allocation. */ bio = g_alloc_bio(); KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); bio->bio_cmd = beio->bio_cmd; bio->bio_dev = dev; bio->bio_caller1 = beio; bio->bio_length = min(cur_size, max_iosize); bio->bio_offset = cur_offset; bio->bio_data = cur_ptr; bio->bio_done = ctl_be_block_biodone; bio->bio_pblkno = cur_offset / be_lun->cbe_lun.blocksize; cur_offset += bio->bio_length; cur_ptr += bio->bio_length; cur_size -= bio->bio_length; TAILQ_INSERT_TAIL(&queue, bio, bio_queue); beio->num_bios_sent++; } } binuptime(&beio->ds_t0); mtx_lock(&be_lun->io_lock); devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); beio->send_complete = 1; mtx_unlock(&be_lun->io_lock); /* * Fire off all allocated requests! */ while ((bio = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bio, bio_queue); if (csw) csw->d_strategy(bio); else { bio->bio_error = ENXIO; ctl_be_block_biodone(bio); } } if (csw) dev_relthread(dev, ref); } static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname) { struct diocgattr_arg arg; struct cdevsw *csw; struct cdev *dev; int error, ref; csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw == NULL) return (UINT64_MAX); strlcpy(arg.name, attrname, sizeof(arg.name)); arg.len = sizeof(arg.value.off); if (csw->d_ioctl) { error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, curthread); } else error = ENODEV; dev_relthread(dev, ref); if (error != 0) return (UINT64_MAX); return (arg.value.off); } static void ctl_be_block_cw_dispatch_sync(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_be_block_io *beio; struct ctl_lba_len_flags *lbalen; DPRINTF("entered\n"); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; beio->io_len = lbalen->len * cbe_lun->blocksize; beio->io_offset = lbalen->lba * cbe_lun->blocksize; beio->io_arg = (lbalen->flags & SSC_IMMED) != 0; beio->bio_cmd = BIO_FLUSH; beio->ds_trans_type = DEVSTAT_NO_DATA; DPRINTF("SYNC\n"); be_lun->lun_flush(be_lun, beio); } static void ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) { union ctl_io *io; io = beio->io; ctl_free_beio(beio); if ((io->io_hdr.flags & CTL_FLAG_ABORT) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { ctl_config_write_done(io); return; } ctl_be_block_config_write(io); } static void ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_be_block_io *beio; struct ctl_lba_len_flags *lbalen; uint64_t len_left, lba; uint32_t pb, pbo, adj; int i, seglen; uint8_t *buf, *end; DPRINTF("entered\n"); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; lbalen = ARGS(beio->io); if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) || (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) { ctl_free_beio(beio); ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) { beio->io_offset = lbalen->lba * cbe_lun->blocksize; beio->io_len = (uint64_t)lbalen->len * cbe_lun->blocksize; beio->bio_cmd = BIO_DELETE; beio->ds_trans_type = DEVSTAT_FREE; be_lun->unmap(be_lun, beio); return; } beio->bio_cmd = BIO_WRITE; beio->ds_trans_type = DEVSTAT_WRITE; DPRINTF("WRITE SAME at LBA %jx len %u\n", (uintmax_t)lbalen->lba, lbalen->len); pb = cbe_lun->blocksize << be_lun->cbe_lun.pblockexp; if (be_lun->cbe_lun.pblockoff > 0) pbo = pb - cbe_lun->blocksize * be_lun->cbe_lun.pblockoff; else pbo = 0; len_left = (uint64_t)lbalen->len * cbe_lun->blocksize; for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { /* * Setup the S/G entry for this chunk. */ seglen = MIN(CTLBLK_MAX_SEG, len_left); if (pb > cbe_lun->blocksize) { adj = ((lbalen->lba + lba) * cbe_lun->blocksize + seglen - pbo) % pb; if (seglen > adj) seglen -= adj; else seglen -= seglen % cbe_lun->blocksize; } else seglen -= seglen % cbe_lun->blocksize; beio->sg_segs[i].len = seglen; beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); DPRINTF("segment %d addr %p len %zd\n", i, beio->sg_segs[i].addr, beio->sg_segs[i].len); beio->num_segs++; len_left -= seglen; buf = beio->sg_segs[i].addr; end = buf + seglen; for (; buf < end; buf += cbe_lun->blocksize) { if (lbalen->flags & SWS_NDOB) { memset(buf, 0, cbe_lun->blocksize); } else { memcpy(buf, io->scsiio.kern_data_ptr, cbe_lun->blocksize); } if (lbalen->flags & SWS_LBDATA) scsi_ulto4b(lbalen->lba + lba, buf); lba++; } } beio->io_offset = lbalen->lba * cbe_lun->blocksize; beio->io_len = lba * cbe_lun->blocksize; /* We can not do all in one run. Correct and schedule rerun. */ if (len_left > 0) { lbalen->lba += lba; lbalen->len -= lba; beio->beio_cont = ctl_be_block_cw_done_ws; } be_lun->dispatch(be_lun, beio); } static void ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_io *beio; struct ctl_ptr_len_flags *ptrlen; DPRINTF("entered\n"); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) { ctl_free_beio(beio); ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 0, /*command*/ 1, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } beio->io_len = 0; beio->io_offset = -1; beio->bio_cmd = BIO_DELETE; beio->ds_trans_type = DEVSTAT_FREE; DPRINTF("UNMAP\n"); be_lun->unmap(be_lun, beio); } static void ctl_be_block_cr_done(struct ctl_be_block_io *beio) { union ctl_io *io; io = beio->io; ctl_free_beio(beio); ctl_config_read_done(io); } static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_io *beio; struct ctl_be_block_softc *softc; DPRINTF("entered\n"); softc = be_lun->softc; beio = ctl_alloc_beio(softc); beio->io = io; beio->lun = be_lun; beio->beio_cont = ctl_be_block_cr_done; PRIV(io)->ptr = (void *)beio; switch (io->scsiio.cdb[0]) { case SERVICE_ACTION_IN: /* GET LBA STATUS */ beio->bio_cmd = -1; beio->ds_trans_type = DEVSTAT_NO_DATA; beio->ds_tag_type = DEVSTAT_TAG_ORDERED; beio->io_len = 0; if (be_lun->get_lba_status) be_lun->get_lba_status(be_lun, beio); else ctl_be_block_cr_done(beio); break; default: panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); break; } } static void ctl_be_block_cw_done(struct ctl_be_block_io *beio) { union ctl_io *io; io = beio->io; ctl_free_beio(beio); ctl_config_write_done(io); } static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_io *beio; struct ctl_be_block_softc *softc; DPRINTF("entered\n"); softc = be_lun->softc; beio = ctl_alloc_beio(softc); beio->io = io; beio->lun = be_lun; beio->beio_cont = ctl_be_block_cw_done; switch (io->scsiio.tag_type) { case CTL_TAG_ORDERED: beio->ds_tag_type = DEVSTAT_TAG_ORDERED; break; case CTL_TAG_HEAD_OF_QUEUE: beio->ds_tag_type = DEVSTAT_TAG_HEAD; break; case CTL_TAG_UNTAGGED: case CTL_TAG_SIMPLE: case CTL_TAG_ACA: default: beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; break; } PRIV(io)->ptr = (void *)beio; switch (io->scsiio.cdb[0]) { case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: ctl_be_block_cw_dispatch_sync(be_lun, io); break; case WRITE_SAME_10: case WRITE_SAME_16: ctl_be_block_cw_dispatch_ws(be_lun, io); break; case UNMAP: ctl_be_block_cw_dispatch_unmap(be_lun, io); break; default: panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); break; } } SDT_PROBE_DEFINE1(cbb, , read, start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , read, alloc_done, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, alloc_done, "uint64_t"); static void ctl_be_block_next(struct ctl_be_block_io *beio) { struct ctl_be_block_lun *be_lun; union ctl_io *io; io = beio->io; be_lun = beio->lun; ctl_free_beio(beio); if ((io->io_hdr.flags & CTL_FLAG_ABORT) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { ctl_data_submit_done(io); return; } io->io_hdr.status &= ~CTL_STATUS_MASK; io->io_hdr.status |= CTL_STATUS_NONE; mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); } static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_be_block_io *beio; struct ctl_be_block_softc *softc; struct ctl_lba_len_flags *lbalen; struct ctl_ptr_len_flags *bptrlen; uint64_t len_left, lbas; int i; softc = be_lun->softc; DPRINTF("entered\n"); lbalen = ARGS(io); if (lbalen->flags & CTL_LLF_WRITE) { SDT_PROBE0(cbb, , write, start); } else { SDT_PROBE0(cbb, , read, start); } beio = ctl_alloc_beio(softc); beio->io = io; beio->lun = be_lun; bptrlen = PRIV(io); bptrlen->ptr = (void *)beio; switch (io->scsiio.tag_type) { case CTL_TAG_ORDERED: beio->ds_tag_type = DEVSTAT_TAG_ORDERED; break; case CTL_TAG_HEAD_OF_QUEUE: beio->ds_tag_type = DEVSTAT_TAG_HEAD; break; case CTL_TAG_UNTAGGED: case CTL_TAG_SIMPLE: case CTL_TAG_ACA: default: beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; break; } if (lbalen->flags & CTL_LLF_WRITE) { beio->bio_cmd = BIO_WRITE; beio->ds_trans_type = DEVSTAT_WRITE; } else { beio->bio_cmd = BIO_READ; beio->ds_trans_type = DEVSTAT_READ; } DPRINTF("%s at LBA %jx len %u @%ju\n", (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); if (lbalen->flags & CTL_LLF_COMPARE) lbas = CTLBLK_HALF_IO_SIZE; else lbas = CTLBLK_MAX_IO_SIZE; lbas = MIN(lbalen->len - bptrlen->len, lbas / cbe_lun->blocksize); beio->io_offset = (lbalen->lba + bptrlen->len) * cbe_lun->blocksize; beio->io_len = lbas * cbe_lun->blocksize; bptrlen->len += lbas; for (i = 0, len_left = beio->io_len; len_left > 0; i++) { KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", i, CTLBLK_MAX_SEGS)); /* * Setup the S/G entry for this chunk. */ beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left); beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); DPRINTF("segment %d addr %p len %zd\n", i, beio->sg_segs[i].addr, beio->sg_segs[i].len); /* Set up second segment for compare operation. */ if (lbalen->flags & CTL_LLF_COMPARE) { beio->sg_segs[i + CTLBLK_HALF_SEGS].len = beio->sg_segs[i].len; beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); } beio->num_segs++; len_left -= beio->sg_segs[i].len; } if (bptrlen->len < lbalen->len) beio->beio_cont = ctl_be_block_next; io->scsiio.be_move_done = ctl_be_block_move_done; /* For compare we have separate S/G lists for read and datamove. */ if (lbalen->flags & CTL_LLF_COMPARE) io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; else io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; io->scsiio.kern_data_len = beio->io_len; io->scsiio.kern_sg_entries = beio->num_segs; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; /* * For the read case, we need to read the data into our buffers and * then we can send it back to the user. For the write case, we * need to get the data from the user first. */ if (beio->bio_cmd == BIO_READ) { SDT_PROBE0(cbb, , read, alloc_done); be_lun->dispatch(be_lun, beio); } else { SDT_PROBE0(cbb, , write, alloc_done); #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif ctl_datamove(io); } } static void ctl_be_block_worker(void *context, int pending) { struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)context; struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; union ctl_io *io; struct ctl_be_block_io *beio; DPRINTF("entered\n"); /* * Fetch and process I/Os from all queues. If we detect LUN * CTL_LUN_FLAG_NO_MEDIA status here -- it is result of a race, * so make response maximally opaque to not confuse initiator. */ for (;;) { mtx_lock(&be_lun->queue_lock); io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); if (io != NULL) { DPRINTF("datamove queue\n"); STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, ctl_io_hdr, links); mtx_unlock(&be_lun->queue_lock); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_complete_beio(beio); return; } be_lun->dispatch(be_lun, beio); continue; } io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); if (io != NULL) { DPRINTF("config write queue\n"); STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, ctl_io_hdr, links); mtx_unlock(&be_lun->queue_lock); if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_config_write_done(io); return; } ctl_be_block_cw_dispatch(be_lun, io); continue; } io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_read_queue); if (io != NULL) { DPRINTF("config read queue\n"); STAILQ_REMOVE(&be_lun->config_read_queue, &io->io_hdr, ctl_io_hdr, links); mtx_unlock(&be_lun->queue_lock); if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_config_read_done(io); return; } ctl_be_block_cr_dispatch(be_lun, io); continue; } io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); if (io != NULL) { DPRINTF("input queue\n"); STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, ctl_io_hdr, links); mtx_unlock(&be_lun->queue_lock); if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_data_submit_done(io); return; } ctl_be_block_dispatch(be_lun, io); continue; } /* * If we get here, there is no work left in the queues, so * just break out and let the task queue go to sleep. */ mtx_unlock(&be_lun->queue_lock); break; } } /* * Entry point from CTL to the backend for I/O. We queue everything to a * work thread, so this just puts the I/O on a queue and wakes up the * thread. */ static int ctl_be_block_submit(union ctl_io *io) { struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; DPRINTF("entered\n"); cbe_lun = CTL_BACKEND_LUN(io); be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun; /* * Make sure we only get SCSI I/O. */ KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " "%#x) encountered", io->io_hdr.io_type)); PRIV(io)->len = 0; mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); return (CTL_RETVAL_COMPLETE); } static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_be_block_softc *softc; int error; softc = &backend_block_softc; error = 0; switch (cmd) { case CTL_LUN_REQ: { struct ctl_lun_req *lun_req; lun_req = (struct ctl_lun_req *)addr; switch (lun_req->reqtype) { case CTL_LUNREQ_CREATE: error = ctl_be_block_create(softc, lun_req); break; case CTL_LUNREQ_RM: error = ctl_be_block_rm(softc, lun_req); break; case CTL_LUNREQ_MODIFY: error = ctl_be_block_modify(softc, lun_req); break; default: lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "invalid LUN request type %d", lun_req->reqtype); break; } break; } default: error = ENOTTY; break; } return (error); } static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun; struct ctl_be_block_filedata *file_data; struct ctl_lun_create_params *params; char *value; struct vattr vattr; off_t ps, pss, po, pos, us, uss, uo, uos; int error; cbe_lun = &be_lun->cbe_lun; file_data = &be_lun->backend.file; params = &be_lun->params; be_lun->dev_type = CTL_BE_BLOCK_FILE; be_lun->dispatch = ctl_be_block_dispatch_file; be_lun->lun_flush = ctl_be_block_flush_file; be_lun->get_lba_status = ctl_be_block_gls_file; be_lun->getattr = ctl_be_block_getattr_file; be_lun->unmap = NULL; cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); if (error != 0) { snprintf(req->error_str, sizeof(req->error_str), "error calling VOP_GETATTR() for file %s", be_lun->dev_path); return (error); } file_data->cred = crhold(curthread->td_ucred); if (params->lun_size_bytes != 0) be_lun->size_bytes = params->lun_size_bytes; else be_lun->size_bytes = vattr.va_size; /* * For files we can use any logical block size. Prefer 512 bytes * for compatibility reasons. If file's vattr.va_blocksize * (preferred I/O block size) is bigger and multiple to chosen * logical block size -- report it as physical block size. */ if (params->blocksize_bytes != 0) cbe_lun->blocksize = params->blocksize_bytes; else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = 2048; else cbe_lun->blocksize = 512; be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 0 : (be_lun->size_blocks - 1); us = ps = vattr.va_blocksize; uo = po = 0; value = ctl_get_opt(&cbe_lun->options, "pblocksize"); if (value != NULL) ctl_expand_number(value, &ps); value = ctl_get_opt(&cbe_lun->options, "pblockoffset"); if (value != NULL) ctl_expand_number(value, &po); pss = ps / cbe_lun->blocksize; pos = po / cbe_lun->blocksize; if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { cbe_lun->pblockexp = fls(pss) - 1; cbe_lun->pblockoff = (pss - pos) % pss; } value = ctl_get_opt(&cbe_lun->options, "ublocksize"); if (value != NULL) ctl_expand_number(value, &us); value = ctl_get_opt(&cbe_lun->options, "ublockoffset"); if (value != NULL) ctl_expand_number(value, &uo); uss = us / cbe_lun->blocksize; uos = uo / cbe_lun->blocksize; if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { cbe_lun->ublockexp = fls(uss) - 1; cbe_lun->ublockoff = (uss - uos) % uss; } /* * Sanity check. The media size has to be at least one * sector long. */ if (be_lun->size_bytes < cbe_lun->blocksize) { error = EINVAL; snprintf(req->error_str, sizeof(req->error_str), "file %s size %ju < block size %u", be_lun->dev_path, (uintmax_t)be_lun->size_bytes, cbe_lun->blocksize); } cbe_lun->opttxferlen = CTLBLK_MAX_IO_SIZE / cbe_lun->blocksize; return (error); } static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_lun_create_params *params; struct cdevsw *csw; struct cdev *dev; char *value; int error, atomic, maxio, ref, unmap, tmp; off_t ps, pss, po, pos, us, uss, uo, uos, otmp; params = &be_lun->params; be_lun->dev_type = CTL_BE_BLOCK_DEV; csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw == NULL) return (ENXIO); if (strcmp(csw->d_name, "zvol") == 0) { be_lun->dispatch = ctl_be_block_dispatch_zvol; be_lun->get_lba_status = ctl_be_block_gls_zvol; atomic = maxio = CTLBLK_MAX_IO_SIZE; } else { be_lun->dispatch = ctl_be_block_dispatch_dev; be_lun->get_lba_status = NULL; atomic = 0; maxio = dev->si_iosize_max; if (maxio <= 0) maxio = DFLTPHYS; if (maxio > CTLBLK_MAX_IO_SIZE) maxio = CTLBLK_MAX_IO_SIZE; } be_lun->lun_flush = ctl_be_block_flush_dev; be_lun->getattr = ctl_be_block_getattr_dev; be_lun->unmap = ctl_be_block_unmap_dev; if (!csw->d_ioctl) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "no d_ioctl for device %s!", be_lun->dev_path); return (ENODEV); } error = csw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&tmp, FREAD, curthread); if (error) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "error %d returned for DIOCGSECTORSIZE ioctl " "on %s!", error, be_lun->dev_path); return (error); } /* * If the user has asked for a blocksize that is greater than the * backing device's blocksize, we can do it only if the blocksize * the user is asking for is an even multiple of the underlying * device's blocksize. */ if ((params->blocksize_bytes != 0) && (params->blocksize_bytes >= tmp)) { if (params->blocksize_bytes % tmp == 0) { cbe_lun->blocksize = params->blocksize_bytes; } else { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "requested blocksize %u is not an even " "multiple of backing device blocksize %u", params->blocksize_bytes, tmp); return (EINVAL); } } else if (params->blocksize_bytes != 0) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "requested blocksize %u < backing device " "blocksize %u", params->blocksize_bytes, tmp); return (EINVAL); } else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = MAX(tmp, 2048); else cbe_lun->blocksize = tmp; error = csw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&otmp, FREAD, curthread); if (error) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "error %d returned for DIOCGMEDIASIZE " " ioctl on %s!", error, be_lun->dev_path); return (error); } if (params->lun_size_bytes != 0) { if (params->lun_size_bytes > otmp) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "requested LUN size %ju > backing device " "size %ju", (uintmax_t)params->lun_size_bytes, (uintmax_t)otmp); return (EINVAL); } be_lun->size_bytes = params->lun_size_bytes; } else be_lun->size_bytes = otmp; be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 0 : (be_lun->size_blocks - 1); error = csw->d_ioctl(dev, DIOCGSTRIPESIZE, (caddr_t)&ps, FREAD, curthread); if (error) ps = po = 0; else { error = csw->d_ioctl(dev, DIOCGSTRIPEOFFSET, (caddr_t)&po, FREAD, curthread); if (error) po = 0; } us = ps; uo = po; value = ctl_get_opt(&cbe_lun->options, "pblocksize"); if (value != NULL) ctl_expand_number(value, &ps); value = ctl_get_opt(&cbe_lun->options, "pblockoffset"); if (value != NULL) ctl_expand_number(value, &po); pss = ps / cbe_lun->blocksize; pos = po / cbe_lun->blocksize; if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { cbe_lun->pblockexp = fls(pss) - 1; cbe_lun->pblockoff = (pss - pos) % pss; } value = ctl_get_opt(&cbe_lun->options, "ublocksize"); if (value != NULL) ctl_expand_number(value, &us); value = ctl_get_opt(&cbe_lun->options, "ublockoffset"); if (value != NULL) ctl_expand_number(value, &uo); uss = us / cbe_lun->blocksize; uos = uo / cbe_lun->blocksize; if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { cbe_lun->ublockexp = fls(uss) - 1; cbe_lun->ublockoff = (uss - uos) % uss; } cbe_lun->atomicblock = atomic / cbe_lun->blocksize; cbe_lun->opttxferlen = maxio / cbe_lun->blocksize; if (be_lun->dispatch == ctl_be_block_dispatch_zvol) { unmap = 1; } else { struct diocgattr_arg arg; strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name)); arg.len = sizeof(arg.value.i); error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, curthread); unmap = (error == 0) ? arg.value.i : 0; } value = ctl_get_opt(&cbe_lun->options, "unmap"); if (value != NULL) unmap = (strcmp(value, "on") == 0); if (unmap) cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; else cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; dev_relthread(dev, ref); return (0); } static int ctl_be_block_close(struct ctl_be_block_lun *be_lun) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; int flags; if (be_lun->vn) { flags = FREAD; if ((cbe_lun->flags & CTL_LUN_FLAG_READONLY) == 0) flags |= FWRITE; (void)vn_close(be_lun->vn, flags, NOCRED, curthread); be_lun->vn = NULL; switch (be_lun->dev_type) { case CTL_BE_BLOCK_DEV: break; case CTL_BE_BLOCK_FILE: if (be_lun->backend.file.cred != NULL) { crfree(be_lun->backend.file.cred); be_lun->backend.file.cred = NULL; } break; case CTL_BE_BLOCK_NONE: break; default: panic("Unexpected backend type %d", be_lun->dev_type); break; } be_lun->dev_type = CTL_BE_BLOCK_NONE; } return (0); } static int ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct nameidata nd; char *value; int error, flags; error = 0; if (rootvnode == NULL) { snprintf(req->error_str, sizeof(req->error_str), "Root filesystem is not mounted"); return (1); } pwd_ensure_dirs(); value = ctl_get_opt(&cbe_lun->options, "file"); if (value == NULL) { snprintf(req->error_str, sizeof(req->error_str), "no file argument specified"); return (1); } free(be_lun->dev_path, M_CTLBLK); be_lun->dev_path = strdup(value, M_CTLBLK); flags = FREAD; value = ctl_get_opt(&cbe_lun->options, "readonly"); if (value != NULL) { if (strcmp(value, "on") != 0) flags |= FWRITE; } else if (cbe_lun->lun_type == T_DIRECT) flags |= FWRITE; again: NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); error = vn_open(&nd, &flags, 0, NULL); if ((error == EROFS || error == EACCES) && (flags & FWRITE)) { flags &= ~FWRITE; goto again; } if (error) { /* * This is the only reasonable guess we can make as far as * path if the user doesn't give us a fully qualified path. * If they want to specify a file, they need to specify the * full path. */ if (be_lun->dev_path[0] != '/') { char *dev_name; asprintf(&dev_name, M_CTLBLK, "/dev/%s", be_lun->dev_path); free(be_lun->dev_path, M_CTLBLK); be_lun->dev_path = dev_name; goto again; } snprintf(req->error_str, sizeof(req->error_str), "error opening %s: %d", be_lun->dev_path, error); return (error); } if (flags & FWRITE) cbe_lun->flags &= ~CTL_LUN_FLAG_READONLY; else cbe_lun->flags |= CTL_LUN_FLAG_READONLY; NDFREE(&nd, NDF_ONLY_PNBUF); be_lun->vn = nd.ni_vp; /* We only support disks and files. */ if (vn_isdisk(be_lun->vn, &error)) { error = ctl_be_block_open_dev(be_lun, req); } else if (be_lun->vn->v_type == VREG) { error = ctl_be_block_open_file(be_lun, req); } else { error = EINVAL; snprintf(req->error_str, sizeof(req->error_str), "%s is not a disk or plain file", be_lun->dev_path); } VOP_UNLOCK(be_lun->vn, 0); if (error != 0) ctl_be_block_close(be_lun); cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; if (be_lun->dispatch != ctl_be_block_dispatch_dev) cbe_lun->serseq = CTL_LUN_SERSEQ_READ; value = ctl_get_opt(&cbe_lun->options, "serseq"); if (value != NULL && strcmp(value, "on") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_ON; else if (value != NULL && strcmp(value, "read") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_READ; else if (value != NULL && strcmp(value, "off") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; return (0); } static int ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun; struct ctl_be_block_lun *be_lun; struct ctl_lun_create_params *params; char num_thread_str[16]; char tmpstr[32]; char *value; int retval, num_threads; int tmp_num_threads; params = &req->reqdata.create; retval = 0; req->status = CTL_LUN_OK; be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); cbe_lun = &be_lun->cbe_lun; cbe_lun->be_lun = be_lun; be_lun->params = req->reqdata.create; be_lun->softc = softc; STAILQ_INIT(&be_lun->input_queue); STAILQ_INIT(&be_lun->config_read_queue); STAILQ_INIT(&be_lun->config_write_queue); STAILQ_INIT(&be_lun->datamove_queue); sprintf(be_lun->lunname, "cblk%d", softc->num_luns); mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF); mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF); ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); if (be_lun->lun_zone == NULL) { snprintf(req->error_str, sizeof(req->error_str), "error allocating UMA zone"); goto bailout_error; } if (params->flags & CTL_LUN_FLAG_DEV_TYPE) cbe_lun->lun_type = params->device_type; else cbe_lun->lun_type = T_DIRECT; be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; cbe_lun->flags = 0; value = ctl_get_opt(&cbe_lun->options, "ha_role"); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; if (cbe_lun->lun_type == T_DIRECT || cbe_lun->lun_type == T_CDROM) { be_lun->size_bytes = params->lun_size_bytes; if (params->blocksize_bytes != 0) cbe_lun->blocksize = params->blocksize_bytes; else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = 2048; else cbe_lun->blocksize = 512; be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 0 : (be_lun->size_blocks - 1); if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { retval = ctl_be_block_open(be_lun, req); if (retval != 0) { retval = 0; req->status = CTL_LUN_WARNING; } } num_threads = cbb_num_threads; } else { num_threads = 1; } value = ctl_get_opt(&cbe_lun->options, "num_threads"); if (value != NULL) { tmp_num_threads = strtol(value, NULL, 0); /* * We don't let the user specify less than one * thread, but hope he's clueful enough not to * specify 1000 threads. */ if (tmp_num_threads < 1) { snprintf(req->error_str, sizeof(req->error_str), "invalid number of threads %s", num_thread_str); goto bailout_error; } num_threads = tmp_num_threads; } if (be_lun->vn == NULL) cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; /* Tell the user the blocksize we ended up using */ params->lun_size_bytes = be_lun->size_bytes; params->blocksize_bytes = cbe_lun->blocksize; if (params->flags & CTL_LUN_FLAG_ID_REQ) { cbe_lun->req_lun_id = params->req_lun_id; cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; } else cbe_lun->req_lun_id = 0; cbe_lun->lun_shutdown = ctl_be_block_lun_shutdown; cbe_lun->lun_config_status = ctl_be_block_lun_config_status; cbe_lun->be = &ctl_be_block_driver; if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", softc->num_luns); strncpy((char *)cbe_lun->serial_num, tmpstr, MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); /* Tell the user what we used for a serial number */ strncpy((char *)params->serial_num, tmpstr, MIN(sizeof(params->serial_num), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->serial_num, params->serial_num, MIN(sizeof(cbe_lun->serial_num), sizeof(params->serial_num))); } if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); strncpy((char *)cbe_lun->device_id, tmpstr, MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); /* Tell the user what we used for a device ID */ strncpy((char *)params->device_id, tmpstr, MIN(sizeof(params->device_id), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->device_id, params->device_id, MIN(sizeof(cbe_lun->device_id), sizeof(params->device_id))); } TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); if (be_lun->io_taskqueue == NULL) { snprintf(req->error_str, sizeof(req->error_str), "unable to create taskqueue"); goto bailout_error; } /* * Note that we start the same number of threads by default for * both the file case and the block device case. For the file * case, we need multiple threads to allow concurrency, because the * vnode interface is designed to be a blocking interface. For the * block device case, ZFS zvols at least will block the caller's * context in many instances, and so we need multiple threads to * overcome that problem. Other block devices don't need as many * threads, but they shouldn't cause too many problems. * * If the user wants to just have a single thread for a block * device, he can specify that when the LUN is created, or change * the tunable/sysctl to alter the default number of threads. */ retval = taskqueue_start_threads(&be_lun->io_taskqueue, /*num threads*/num_threads, /*priority*/PWAIT, /*thread name*/ "%s taskq", be_lun->lunname); if (retval != 0) goto bailout_error; be_lun->num_threads = num_threads; mtx_lock(&softc->lock); softc->num_luns++; STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); mtx_unlock(&softc->lock); retval = ctl_add_lun(&be_lun->cbe_lun); if (retval != 0) { mtx_lock(&softc->lock); STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); softc->num_luns--; mtx_unlock(&softc->lock); snprintf(req->error_str, sizeof(req->error_str), "ctl_add_lun() returned error %d, see dmesg for " "details", retval); retval = 0; goto bailout_error; } mtx_lock(&softc->lock); /* * Tell the config_status routine that we're waiting so it won't * clean up the LUN in the event of an error. */ be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); if (retval == EINTR) break; } be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { snprintf(req->error_str, sizeof(req->error_str), "LUN configuration error, see dmesg for details"); STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); softc->num_luns--; mtx_unlock(&softc->lock); goto bailout_error; } else { params->req_lun_id = cbe_lun->lun_id; } mtx_unlock(&softc->lock); be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, cbe_lun->blocksize, DEVSTAT_ALL_SUPPORTED, cbe_lun->lun_type | DEVSTAT_TYPE_IF_OTHER, DEVSTAT_PRIORITY_OTHER); return (retval); bailout_error: req->status = CTL_LUN_ERROR; if (be_lun->io_taskqueue != NULL) taskqueue_free(be_lun->io_taskqueue); ctl_be_block_close(be_lun); if (be_lun->dev_path != NULL) free(be_lun->dev_path, M_CTLBLK); if (be_lun->lun_zone != NULL) uma_zdestroy(be_lun->lun_zone); ctl_free_opts(&cbe_lun->options); mtx_destroy(&be_lun->queue_lock); mtx_destroy(&be_lun->io_lock); free(be_lun, M_CTLBLK); return (retval); } static int ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) { struct ctl_lun_rm_params *params; struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; int retval; params = &req->reqdata.rm; mtx_lock(&softc->lock); STAILQ_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) break; } mtx_unlock(&softc->lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "LUN %u is not managed by the block backend", params->lun_id); goto bailout_error; } cbe_lun = &be_lun->cbe_lun; retval = ctl_disable_lun(cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "error %d returned from ctl_disable_lun() for " "LUN %d", retval, params->lun_id); goto bailout_error; } if (be_lun->vn != NULL) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); taskqueue_drain_all(be_lun->io_taskqueue); ctl_be_block_close(be_lun); } retval = ctl_invalidate_lun(cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "error %d returned from ctl_invalidate_lun() for " "LUN %d", retval, params->lun_id); goto bailout_error; } mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); if (retval == EINTR) break; } be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { snprintf(req->error_str, sizeof(req->error_str), "interrupted waiting for LUN to be freed"); mtx_unlock(&softc->lock); goto bailout_error; } STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); softc->num_luns--; mtx_unlock(&softc->lock); taskqueue_drain_all(be_lun->io_taskqueue); taskqueue_free(be_lun->io_taskqueue); if (be_lun->disk_stats != NULL) devstat_remove_entry(be_lun->disk_stats); uma_zdestroy(be_lun->lun_zone); ctl_free_opts(&cbe_lun->options); free(be_lun->dev_path, M_CTLBLK); mtx_destroy(&be_lun->queue_lock); mtx_destroy(&be_lun->io_lock); free(be_lun, M_CTLBLK); req->status = CTL_LUN_OK; return (0); bailout_error: req->status = CTL_LUN_ERROR; return (0); } static int ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) { struct ctl_lun_modify_params *params; struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; char *value; uint64_t oldsize; int error, wasprim; params = &req->reqdata.modify; mtx_lock(&softc->lock); STAILQ_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) break; } mtx_unlock(&softc->lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "LUN %u is not managed by the block backend", params->lun_id); goto bailout_error; } cbe_lun = &be_lun->cbe_lun; if (params->lun_size_bytes != 0) be_lun->params.lun_size_bytes = params->lun_size_bytes; ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); value = ctl_get_opt(&cbe_lun->options, "ha_role"); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ctl_lun_primary(cbe_lun); else ctl_lun_secondary(cbe_lun); } oldsize = be_lun->size_blocks; if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { if (be_lun->vn == NULL) error = ctl_be_block_open(be_lun, req); else if (vn_isdisk(be_lun->vn, &error)) error = ctl_be_block_open_dev(be_lun, req); else if (be_lun->vn->v_type == VREG) { vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); error = ctl_be_block_open_file(be_lun, req); VOP_UNLOCK(be_lun->vn, 0); } else error = EINVAL; if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) && be_lun->vn != NULL) { cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; ctl_lun_has_media(cbe_lun); } else if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) == 0 && be_lun->vn == NULL) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); } cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; } else { if (be_lun->vn != NULL) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); taskqueue_drain_all(be_lun->io_taskqueue); error = ctl_be_block_close(be_lun); } else error = 0; } if (be_lun->size_blocks != oldsize) ctl_lun_capacity_changed(cbe_lun); /* Tell the user the exact size we ended up using */ params->lun_size_bytes = be_lun->size_bytes; req->status = error ? CTL_LUN_WARNING : CTL_LUN_OK; return (0); bailout_error: req->status = CTL_LUN_ERROR; return (0); } static void ctl_be_block_lun_shutdown(void *be_lun) { struct ctl_be_block_lun *lun = be_lun; struct ctl_be_block_softc *softc = lun->softc; mtx_lock(&softc->lock); lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) wakeup(lun); mtx_unlock(&softc->lock); } static void ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) { struct ctl_be_block_lun *lun; struct ctl_be_block_softc *softc; lun = (struct ctl_be_block_lun *)be_lun; softc = lun->softc; if (status == CTL_LUN_CONFIG_OK) { mtx_lock(&softc->lock); lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) wakeup(lun); mtx_unlock(&softc->lock); /* * We successfully added the LUN, attempt to enable it. */ if (ctl_enable_lun(&lun->cbe_lun) != 0) { printf("%s: ctl_enable_lun() failed!\n", __func__); if (ctl_invalidate_lun(&lun->cbe_lun) != 0) { printf("%s: ctl_invalidate_lun() failed!\n", __func__); } } return; } mtx_lock(&softc->lock); lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; wakeup(lun); mtx_unlock(&softc->lock); } static int ctl_be_block_config_write(union ctl_io *io) { struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; int retval; DPRINTF("entered\n"); cbe_lun = CTL_BACKEND_LUN(io); be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun; retval = 0; switch (io->scsiio.cdb[0]) { case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: case WRITE_SAME_10: case WRITE_SAME_16: case UNMAP: /* * The upper level CTL code will filter out any CDBs with * the immediate bit set and return the proper error. * * We don't really need to worry about what LBA range the * user asked to be synced out. When they issue a sync * cache command, we'll sync out the whole thing. */ mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); break; case START_STOP_UNIT: { struct scsi_start_stop_unit *cdb; struct ctl_lun_req req; cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; if ((cdb->how & SSS_PC_MASK) != 0) { ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } if (cdb->how & SSS_START) { if ((cdb->how & SSS_LOEJ) && be_lun->vn == NULL) { retval = ctl_be_block_open(be_lun, &req); cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; if (retval == 0) { cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; ctl_lun_has_media(cbe_lun); } else { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); } } ctl_start_lun(cbe_lun); } else { ctl_stop_lun(cbe_lun); if (cdb->how & SSS_LOEJ) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; cbe_lun->flags |= CTL_LUN_FLAG_EJECTED; ctl_lun_ejected(cbe_lun); if (be_lun->vn != NULL) ctl_be_block_close(be_lun); } } ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } case PREVENT_ALLOW: ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_write_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static int ctl_be_block_config_read(union ctl_io *io) { struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; int retval = 0; DPRINTF("entered\n"); cbe_lun = CTL_BACKEND_LUN(io); be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun; switch (io->scsiio.cdb[0]) { case SERVICE_ACTION_IN: if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->config_read_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); retval = CTL_RETVAL_QUEUED; break; } ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) { struct ctl_be_block_lun *lun; int retval; lun = (struct ctl_be_block_lun *)be_lun; retval = sbuf_printf(sb, "\t"); if (retval != 0) goto bailout; retval = sbuf_printf(sb, "%d", lun->num_threads); if (retval != 0) goto bailout; retval = sbuf_printf(sb, "\n"); bailout: return (retval); } static uint64_t ctl_be_block_lun_attr(void *be_lun, const char *attrname) { struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)be_lun; if (lun->getattr == NULL) return (UINT64_MAX); return (lun->getattr(lun, attrname)); } static int ctl_be_block_init(void) { struct ctl_be_block_softc *softc = &backend_block_softc; mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); STAILQ_INIT(&softc->lun_list); return (0); } static int ctl_be_block_shutdown(void) { struct ctl_be_block_softc *softc = &backend_block_softc; struct ctl_be_block_lun *lun, *next_lun; mtx_lock(&softc->lock); STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) { /* * Drop our lock here. Since ctl_invalidate_lun() can call * back into us, this could potentially lead to a recursive * lock of the same mutex, which would cause a hang. */ mtx_unlock(&softc->lock); ctl_disable_lun(&lun->cbe_lun); ctl_invalidate_lun(&lun->cbe_lun); mtx_lock(&softc->lock); } mtx_unlock(&softc->lock); uma_zdestroy(softc->beio_zone); mtx_destroy(&softc->lock); return (0); } Index: head/sys/cam/ctl/ctl_backend_ramdisk.c =================================================================== --- head/sys/cam/ctl/ctl_backend_ramdisk.c (revision 326264) +++ head/sys/cam/ctl/ctl_backend_ramdisk.c (revision 326265) @@ -1,1360 +1,1362 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003, 2008 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ */ /* * CAM Target Layer black hole and RAM disk backend. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PRIV(io) \ ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) #define ARGS(io) \ ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) #define PPP (PAGE_SIZE / sizeof(uint8_t **)) #ifdef __LP64__ #define PPPS (PAGE_SHIFT - 3) #else #define PPPS (PAGE_SHIFT - 2) #endif #define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry)) #define P_UNMAPPED NULL /* Page is unmapped. */ #define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */ typedef enum { GP_READ, /* Return data page or zero page. */ GP_WRITE, /* Return data page, try allocate if none. */ GP_ANCHOR, /* Return data page, try anchor if none. */ GP_OTHER, /* Return what present, do not allocate/anchor. */ } getpage_op_t; typedef enum { CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02, CTL_BE_RAMDISK_LUN_WAITING = 0x04 } ctl_be_ramdisk_lun_flags; struct ctl_be_ramdisk_lun { struct ctl_lun_create_params params; char lunname[32]; int indir; uint8_t **pages; uint8_t *zero_page; struct sx page_lock; u_int pblocksize; u_int pblockmul; uint64_t size_bytes; uint64_t size_blocks; uint64_t cap_bytes; uint64_t cap_used; struct ctl_be_ramdisk_softc *softc; ctl_be_ramdisk_lun_flags flags; STAILQ_ENTRY(ctl_be_ramdisk_lun) links; struct ctl_be_lun cbe_lun; struct taskqueue *io_taskqueue; struct task io_task; STAILQ_HEAD(, ctl_io_hdr) cont_queue; struct mtx_padalign queue_lock; }; struct ctl_be_ramdisk_softc { struct mtx lock; int num_luns; STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list; }; static struct ctl_be_ramdisk_softc rd_softc; extern struct ctl_softc *control_softc; static int ctl_backend_ramdisk_init(void); static int ctl_backend_ramdisk_shutdown(void); static int ctl_backend_ramdisk_move_done(union ctl_io *io); static void ctl_backend_ramdisk_compare(union ctl_io *io); static void ctl_backend_ramdisk_rw(union ctl_io *io); static int ctl_backend_ramdisk_submit(union ctl_io *io); static void ctl_backend_ramdisk_worker(void *context, int pending); static int ctl_backend_ramdisk_config_read(union ctl_io *io); static int ctl_backend_ramdisk_config_write(union ctl_io *io); static uint64_t ctl_backend_ramdisk_lun_attr(void *be_lun, const char *attrname); static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static void ctl_backend_ramdisk_lun_shutdown(void *be_lun); static void ctl_backend_ramdisk_lun_config_status(void *be_lun, ctl_lun_config_status status); static struct ctl_backend_driver ctl_be_ramdisk_driver = { .name = "ramdisk", .flags = CTL_BE_FLAG_HAS_CONFIG, .init = ctl_backend_ramdisk_init, .shutdown = ctl_backend_ramdisk_shutdown, .data_submit = ctl_backend_ramdisk_submit, .data_move_done = ctl_backend_ramdisk_move_done, .config_read = ctl_backend_ramdisk_config_read, .config_write = ctl_backend_ramdisk_config_write, .ioctl = ctl_backend_ramdisk_ioctl, .lun_attr = ctl_backend_ramdisk_lun_attr, }; MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk"); CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); static int ctl_backend_ramdisk_init(void) { struct ctl_be_ramdisk_softc *softc = &rd_softc; memset(softc, 0, sizeof(*softc)); mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF); STAILQ_INIT(&softc->lun_list); return (0); } static int ctl_backend_ramdisk_shutdown(void) { struct ctl_be_ramdisk_softc *softc = &rd_softc; struct ctl_be_ramdisk_lun *lun, *next_lun; mtx_lock(&softc->lock); STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) { /* * Drop our lock here. Since ctl_invalidate_lun() can call * back into us, this could potentially lead to a recursive * lock of the same mutex, which would cause a hang. */ mtx_unlock(&softc->lock); ctl_disable_lun(&lun->cbe_lun); ctl_invalidate_lun(&lun->cbe_lun); mtx_lock(&softc->lock); } mtx_unlock(&softc->lock); mtx_destroy(&softc->lock); return (0); } static uint8_t * ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn, getpage_op_t op) { uint8_t **p, ***pp; off_t i; int s; if (be_lun->cap_bytes == 0) { switch (op) { case GP_READ: return (be_lun->zero_page); case GP_WRITE: return ((uint8_t *)be_lun->pages); case GP_ANCHOR: return (P_ANCHORED); default: return (P_UNMAPPED); } } if (op == GP_WRITE || op == GP_ANCHOR) { sx_xlock(&be_lun->page_lock); pp = &be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (*pp == NULL) { *pp = malloc(PAGE_SIZE, M_RAMDISK, M_WAITOK|M_ZERO); } i = pn >> s; pp = (uint8_t ***)&(*pp)[i]; pn -= i << s; } if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { if (op == GP_WRITE) { *pp = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK|M_ZERO); } else *pp = P_ANCHORED; be_lun->cap_used += be_lun->pblocksize; } else if (*pp == P_ANCHORED && op == GP_WRITE) { *pp = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK|M_ZERO); } sx_xunlock(&be_lun->page_lock); return ((uint8_t *)*pp); } else { sx_slock(&be_lun->page_lock); p = be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (p == NULL) break; i = pn >> s; p = (uint8_t **)p[i]; pn -= i << s; } sx_sunlock(&be_lun->page_lock); if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ) return (be_lun->zero_page); return ((uint8_t *)p); } }; static void ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) { uint8_t ***pp; off_t i; int s; if (be_lun->cap_bytes == 0) return; sx_xlock(&be_lun->page_lock); pp = &be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (*pp == NULL) goto noindir; i = pn >> s; pp = (uint8_t ***)&(*pp)[i]; pn -= i << s; } if (*pp == P_ANCHORED) { be_lun->cap_used -= be_lun->pblocksize; *pp = P_UNMAPPED; } else if (*pp != P_UNMAPPED) { free(*pp, M_RAMDISK); be_lun->cap_used -= be_lun->pblocksize; *pp = P_UNMAPPED; } noindir: sx_xunlock(&be_lun->page_lock); }; static void ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) { uint8_t ***pp; off_t i; int s; if (be_lun->cap_bytes == 0) return; sx_xlock(&be_lun->page_lock); pp = &be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (*pp == NULL) goto noindir; i = pn >> s; pp = (uint8_t ***)&(*pp)[i]; pn -= i << s; } if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { be_lun->cap_used += be_lun->pblocksize; *pp = P_ANCHORED; } else if (*pp != P_ANCHORED) { free(*pp, M_RAMDISK); *pp = P_ANCHORED; } noindir: sx_xunlock(&be_lun->page_lock); }; static void ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir) { int i; if (p == NULL) return; if (indir == 0) { free(p, M_RAMDISK); return; } for (i = 0; i < PPP; i++) { if (p[i] == NULL) continue; ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1); } free(p, M_RAMDISK); }; static size_t cmp(uint8_t *a, uint8_t *b, size_t size) { size_t i; for (i = 0; i < size; i++) { if (a[i] != b[i]) break; } return (i); } static int ctl_backend_ramdisk_cmp(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; uint8_t *page; uint8_t info[8]; uint64_t lba; u_int lbaoff, lbas, res, off; lbas = io->scsiio.kern_data_len / cbe_lun->blocksize; lba = ARGS(io)->lba + PRIV(io)->len - lbas; off = 0; for (; lbas > 0; lbas--, lba++) { page = ctl_backend_ramdisk_getpage(be_lun, lba >> cbe_lun->pblockexp, GP_READ); lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); page += lbaoff * cbe_lun->blocksize; res = cmp(io->scsiio.kern_data_ptr + off, page, cbe_lun->blocksize); off += res; if (res < cbe_lun->blocksize) break; } if (lbas > 0) { off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len; scsi_u64to8b(off, info); ctl_set_sense(&io->scsiio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_MISCOMPARE, /*asc*/ 0x1D, /*ascq*/ 0x00, /*type*/ SSD_ELEM_INFO, /*size*/ sizeof(info), /*data*/ &info, /*type*/ SSD_ELEM_NONE); return (1); } return (0); } static int ctl_backend_ramdisk_move_done(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; #ifdef CTL_TIME_IO struct bintime cur_bt; #endif CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); #ifdef CTL_TIME_IO getbinuptime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); bintime_add(&io->io_hdr.dma_bt, &cur_bt); #endif io->io_hdr.num_dmas++; if (io->scsiio.kern_sg_entries > 0) free(io->scsiio.kern_data_ptr, M_RAMDISK); io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; if (io->io_hdr.flags & CTL_FLAG_ABORT) { ; } else if (io->io_hdr.port_status != 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ io->io_hdr.port_status); } else if (io->scsiio.kern_data_resid != 0 && (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_invalid_field_ciu(&io->scsiio); } else if ((io->io_hdr.port_status == 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { if (ARGS(io)->flags & CTL_LLF_COMPARE) { /* We have data block ready for comparison. */ if (ctl_backend_ramdisk_cmp(io)) goto done; } if (ARGS(io)->len > PRIV(io)->len) { mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->cont_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); return (0); } ctl_set_success(&io->scsiio); } done: ctl_data_submit_done(io); return(0); } static void ctl_backend_ramdisk_compare(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); u_int lbas, len; lbas = ARGS(io)->len - PRIV(io)->len; lbas = MIN(lbas, 131072 / cbe_lun->blocksize); len = lbas * cbe_lun->blocksize; io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK); io->scsiio.kern_data_len = len; io->scsiio.kern_sg_entries = 0; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; PRIV(io)->len += lbas; #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif ctl_datamove(io); } static void ctl_backend_ramdisk_rw(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; struct ctl_sg_entry *sg_entries; uint8_t *page; uint64_t lba; u_int i, len, lbaoff, lbas, sgs, off; getpage_op_t op; lba = ARGS(io)->lba + PRIV(io)->len; lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); lbas = ARGS(io)->len - PRIV(io)->len; lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff); sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp; off = lbaoff * cbe_lun->blocksize; op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ; if (sgs > 1) { io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * sgs, M_RAMDISK, M_WAITOK); sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; len = lbas * cbe_lun->blocksize; for (i = 0; i < sgs; i++) { page = ctl_backend_ramdisk_getpage(be_lun, (lba >> cbe_lun->pblockexp) + i, op); if (page == P_UNMAPPED || page == P_ANCHORED) { free(io->scsiio.kern_data_ptr, M_RAMDISK); nospc: ctl_set_space_alloc_fail(&io->scsiio); ctl_data_submit_done(io); return; } sg_entries[i].addr = page + off; sg_entries[i].len = MIN(len, be_lun->pblocksize - off); len -= sg_entries[i].len; off = 0; } } else { page = ctl_backend_ramdisk_getpage(be_lun, lba >> cbe_lun->pblockexp, op); if (page == P_UNMAPPED || page == P_ANCHORED) goto nospc; sgs = 0; io->scsiio.kern_data_ptr = page + off; } io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; io->scsiio.kern_data_len = lbas * cbe_lun->blocksize; io->scsiio.kern_sg_entries = sgs; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; PRIV(io)->len += lbas; if ((ARGS(io)->flags & CTL_LLF_READ) && ARGS(io)->len <= PRIV(io)->len) { ctl_set_success(&io->scsiio); ctl_serseq_done(io); } #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif ctl_datamove(io); } static int ctl_backend_ramdisk_submit(union ctl_io *io) { struct ctl_lba_len_flags *lbalen = ARGS(io); if (lbalen->flags & CTL_LLF_VERIFY) { ctl_set_success(&io->scsiio); ctl_data_submit_done(io); return (CTL_RETVAL_COMPLETE); } PRIV(io)->len = 0; if (lbalen->flags & CTL_LLF_COMPARE) ctl_backend_ramdisk_compare(io); else ctl_backend_ramdisk_rw(io); return (CTL_RETVAL_COMPLETE); } static void ctl_backend_ramdisk_worker(void *context, int pending) { struct ctl_be_ramdisk_lun *be_lun; union ctl_io *io; be_lun = (struct ctl_be_ramdisk_lun *)context; mtx_lock(&be_lun->queue_lock); for (;;) { io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); if (io != NULL) { STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr, ctl_io_hdr, links); mtx_unlock(&be_lun->queue_lock); if (ARGS(io)->flags & CTL_LLF_COMPARE) ctl_backend_ramdisk_compare(io); else ctl_backend_ramdisk_rw(io); mtx_lock(&be_lun->queue_lock); continue; } /* * If we get here, there is no work left in the queues, so * just break out and let the task queue go to sleep. */ break; } mtx_unlock(&be_lun->queue_lock); } static int ctl_backend_ramdisk_gls(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; struct scsi_get_lba_status_data *data; uint8_t *page; u_int lbaoff; data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr); lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp); scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length); page = ctl_backend_ramdisk_getpage(be_lun, ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER); if (page == P_UNMAPPED) data->descr[0].status = 1; else if (page == P_ANCHORED) data->descr[0].status = 2; else data->descr[0].status = 0; ctl_config_read_done(io); return (CTL_RETVAL_COMPLETE); } static int ctl_backend_ramdisk_config_read(union ctl_io *io) { int retval = 0; switch (io->scsiio.cdb[0]) { case SERVICE_ACTION_IN: if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { retval = ctl_backend_ramdisk_gls(io); break; } ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static void ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len, int anchor) { struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; uint8_t *page; uint64_t p, lp; u_int lbaoff; getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER; /* Partially zero first partial page. */ p = lba >> cbe_lun->pblockexp; lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); if (lbaoff != 0) { page = ctl_backend_ramdisk_getpage(be_lun, p, op); if (page != P_UNMAPPED && page != P_ANCHORED) { memset(page + lbaoff * cbe_lun->blocksize, 0, min(len, be_lun->pblockmul - lbaoff) * cbe_lun->blocksize); } p++; } /* Partially zero last partial page. */ lp = (lba + len) >> cbe_lun->pblockexp; lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp); if (p <= lp && lbaoff != 0) { page = ctl_backend_ramdisk_getpage(be_lun, lp, op); if (page != P_UNMAPPED && page != P_ANCHORED) memset(page, 0, lbaoff * cbe_lun->blocksize); } /* Delete remaining full pages. */ if (anchor) { for (; p < lp; p++) ctl_backend_ramdisk_anchorpage(be_lun, p); } else { for (; p < lp; p++) ctl_backend_ramdisk_unmappage(be_lun, p); } } static void ctl_backend_ramdisk_ws(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; struct ctl_lba_len_flags *lbalen = ARGS(io); uint8_t *page; uint64_t lba; u_int lbaoff, lbas; if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) { ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } if (lbalen->flags & SWS_UNMAP) { ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len, (lbalen->flags & SWS_ANCHOR) != 0); ctl_set_success(&io->scsiio); ctl_config_write_done(io); return; } for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) { page = ctl_backend_ramdisk_getpage(be_lun, lba >> cbe_lun->pblockexp, GP_WRITE); if (page == P_UNMAPPED || page == P_ANCHORED) { ctl_set_space_alloc_fail(&io->scsiio); ctl_data_submit_done(io); return; } lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); page += lbaoff * cbe_lun->blocksize; if (lbalen->flags & SWS_NDOB) { memset(page, 0, cbe_lun->blocksize); } else { memcpy(page, io->scsiio.kern_data_ptr, cbe_lun->blocksize); } if (lbalen->flags & SWS_LBDATA) scsi_ulto4b(lba, page); } ctl_set_success(&io->scsiio); ctl_config_write_done(io); } static void ctl_backend_ramdisk_unmap(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io); struct scsi_unmap_desc *buf, *end; if ((ptrlen->flags & ~SU_ANCHOR) != 0) { ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 0, /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (; buf < end; buf++) { ctl_backend_ramdisk_delete(cbe_lun, scsi_8btou64(buf->lba), scsi_4btoul(buf->length), (ptrlen->flags & SU_ANCHOR) != 0); } ctl_set_success(&io->scsiio); ctl_config_write_done(io); } static int ctl_backend_ramdisk_config_write(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); int retval = 0; switch (io->scsiio.cdb[0]) { case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: /* We have no cache to flush. */ ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; case START_STOP_UNIT: { struct scsi_start_stop_unit *cdb; cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; if ((cdb->how & SSS_PC_MASK) != 0) { ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } if (cdb->how & SSS_START) { if (cdb->how & SSS_LOEJ) ctl_lun_has_media(cbe_lun); ctl_start_lun(cbe_lun); } else { ctl_stop_lun(cbe_lun); if (cdb->how & SSS_LOEJ) ctl_lun_ejected(cbe_lun); } ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } case PREVENT_ALLOW: ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; case WRITE_SAME_10: case WRITE_SAME_16: ctl_backend_ramdisk_ws(io); break; case UNMAP: ctl_backend_ramdisk_unmap(io); break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_write_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static uint64_t ctl_backend_ramdisk_lun_attr(void *arg, const char *attrname) { struct ctl_be_ramdisk_lun *be_lun = arg; uint64_t val; val = UINT64_MAX; if (be_lun->cap_bytes == 0) return (val); sx_slock(&be_lun->page_lock); if (strcmp(attrname, "blocksused") == 0) { val = be_lun->cap_used / be_lun->cbe_lun.blocksize; } else if (strcmp(attrname, "blocksavail") == 0) { val = (be_lun->cap_bytes - be_lun->cap_used) / be_lun->cbe_lun.blocksize; } sx_sunlock(&be_lun->page_lock); return (val); } static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_be_ramdisk_softc *softc = &rd_softc; struct ctl_lun_req *lun_req; int retval; retval = 0; switch (cmd) { case CTL_LUN_REQ: lun_req = (struct ctl_lun_req *)addr; switch (lun_req->reqtype) { case CTL_LUNREQ_CREATE: retval = ctl_backend_ramdisk_create(softc, lun_req); break; case CTL_LUNREQ_RM: retval = ctl_backend_ramdisk_rm(softc, lun_req); break; case CTL_LUNREQ_MODIFY: retval = ctl_backend_ramdisk_modify(softc, lun_req); break; default: lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "%s: invalid LUN request type %d", __func__, lun_req->reqtype); break; } break; default: retval = ENOTTY; break; } return (retval); } static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_lun_rm_params *params; int retval; params = &req->reqdata.rm; mtx_lock(&softc->lock); STAILQ_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) break; } mtx_unlock(&softc->lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN %u is not managed by the ramdisk backend", __func__, params->lun_id); goto bailout_error; } retval = ctl_disable_lun(&be_lun->cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "%s: error %d returned from ctl_disable_lun() for " "LUN %d", __func__, retval, params->lun_id); goto bailout_error; } /* * Set the waiting flag before we invalidate the LUN. Our shutdown * routine can be called any time after we invalidate the LUN, * and can be called from our context. * * This tells the shutdown routine that we're waiting, or we're * going to wait for the shutdown to happen. */ mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; mtx_unlock(&softc->lock); retval = ctl_invalidate_lun(&be_lun->cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "%s: error %d returned from ctl_invalidate_lun() for " "LUN %d", __func__, retval, params->lun_id); mtx_lock(&softc->lock); be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; mtx_unlock(&softc->lock); goto bailout_error; } mtx_lock(&softc->lock); while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); if (retval == EINTR) break; } be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; /* * We only remove this LUN from the list and free it (below) if * retval == 0. If the user interrupted the wait, we just bail out * without actually freeing the LUN. We let the shutdown routine * free the LUN if that happens. */ if (retval == 0) { STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, links); softc->num_luns--; } mtx_unlock(&softc->lock); if (retval == 0) { taskqueue_drain_all(be_lun->io_taskqueue); taskqueue_free(be_lun->io_taskqueue); ctl_free_opts(&be_lun->cbe_lun.options); free(be_lun->zero_page, M_RAMDISK); ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); sx_destroy(&be_lun->page_lock); mtx_destroy(&be_lun->queue_lock); free(be_lun, M_RAMDISK); } req->status = CTL_LUN_OK; return (retval); bailout_error: req->status = CTL_LUN_ERROR; return (0); } static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_be_lun *cbe_lun; struct ctl_lun_create_params *params; char *value; char tmpstr[32]; uint64_t t; int retval; retval = 0; params = &req->reqdata.create; be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK); cbe_lun = &be_lun->cbe_lun; cbe_lun->be_lun = be_lun; be_lun->params = req->reqdata.create; be_lun->softc = softc; sprintf(be_lun->lunname, "cram%d", softc->num_luns); ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); if (params->flags & CTL_LUN_FLAG_DEV_TYPE) cbe_lun->lun_type = params->device_type; else cbe_lun->lun_type = T_DIRECT; be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED; cbe_lun->flags = 0; value = ctl_get_opt(&cbe_lun->options, "ha_role"); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; be_lun->pblocksize = PAGE_SIZE; value = ctl_get_opt(&cbe_lun->options, "pblocksize"); if (value != NULL) { ctl_expand_number(value, &t); be_lun->pblocksize = t; } if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) { snprintf(req->error_str, sizeof(req->error_str), "%s: unsupported pblocksize %u", __func__, be_lun->pblocksize); goto bailout_error; } if (cbe_lun->lun_type == T_DIRECT || cbe_lun->lun_type == T_CDROM) { if (params->blocksize_bytes != 0) cbe_lun->blocksize = params->blocksize_bytes; else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = 2048; else cbe_lun->blocksize = 512; be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize; if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) { snprintf(req->error_str, sizeof(req->error_str), "%s: pblocksize %u not exp2 of blocksize %u", __func__, be_lun->pblocksize, cbe_lun->blocksize); goto bailout_error; } if (params->lun_size_bytes < cbe_lun->blocksize) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN size %ju < blocksize %u", __func__, params->lun_size_bytes, cbe_lun->blocksize); goto bailout_error; } be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize; be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize; be_lun->indir = 0; t = be_lun->size_bytes / be_lun->pblocksize; while (t > 1) { t /= PPP; be_lun->indir++; } cbe_lun->maxlba = be_lun->size_blocks - 1; cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1; cbe_lun->pblockoff = 0; cbe_lun->ublockexp = cbe_lun->pblockexp; cbe_lun->ublockoff = 0; cbe_lun->atomicblock = be_lun->pblocksize; cbe_lun->opttxferlen = SGPP * be_lun->pblocksize; value = ctl_get_opt(&cbe_lun->options, "capacity"); if (value != NULL) ctl_expand_number(value, &be_lun->cap_bytes); } else { be_lun->pblockmul = 1; cbe_lun->pblockexp = 0; } /* Tell the user the blocksize we ended up using */ params->blocksize_bytes = cbe_lun->blocksize; params->lun_size_bytes = be_lun->size_bytes; value = ctl_get_opt(&cbe_lun->options, "unmap"); if (value == NULL || strcmp(value, "off") != 0) cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; value = ctl_get_opt(&cbe_lun->options, "readonly"); if (value != NULL) { if (strcmp(value, "on") == 0) cbe_lun->flags |= CTL_LUN_FLAG_READONLY; } else if (cbe_lun->lun_type != T_DIRECT) cbe_lun->flags |= CTL_LUN_FLAG_READONLY; cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; value = ctl_get_opt(&cbe_lun->options, "serseq"); if (value != NULL && strcmp(value, "on") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_ON; else if (value != NULL && strcmp(value, "read") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_READ; else if (value != NULL && strcmp(value, "off") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; if (params->flags & CTL_LUN_FLAG_ID_REQ) { cbe_lun->req_lun_id = params->req_lun_id; cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; } else cbe_lun->req_lun_id = 0; cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown; cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status; cbe_lun->be = &ctl_be_ramdisk_driver; if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", softc->num_luns); strncpy((char *)cbe_lun->serial_num, tmpstr, MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); /* Tell the user what we used for a serial number */ strncpy((char *)params->serial_num, tmpstr, MIN(sizeof(params->serial_num), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->serial_num, params->serial_num, MIN(sizeof(cbe_lun->serial_num), sizeof(params->serial_num))); } if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); strncpy((char *)cbe_lun->device_id, tmpstr, MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); /* Tell the user what we used for a device ID */ strncpy((char *)params->device_id, tmpstr, MIN(sizeof(params->device_id), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->device_id, params->device_id, MIN(sizeof(cbe_lun->device_id), sizeof(params->device_id))); } STAILQ_INIT(&be_lun->cont_queue); sx_init(&be_lun->page_lock, "cram page lock"); if (be_lun->cap_bytes == 0) { be_lun->indir = 0; be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK); } be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK|M_ZERO); mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF); TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, be_lun); be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); if (be_lun->io_taskqueue == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: Unable to create taskqueue", __func__); goto bailout_error; } retval = taskqueue_start_threads(&be_lun->io_taskqueue, /*num threads*/1, /*priority*/PWAIT, /*thread name*/ "%s taskq", be_lun->lunname); if (retval != 0) goto bailout_error; mtx_lock(&softc->lock); softc->num_luns++; STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); mtx_unlock(&softc->lock); retval = ctl_add_lun(&be_lun->cbe_lun); if (retval != 0) { mtx_lock(&softc->lock); STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, links); softc->num_luns--; mtx_unlock(&softc->lock); snprintf(req->error_str, sizeof(req->error_str), "%s: ctl_add_lun() returned error %d, see dmesg for " "details", __func__, retval); retval = 0; goto bailout_error; } mtx_lock(&softc->lock); /* * Tell the config_status routine that we're waiting so it won't * clean up the LUN in the event of an error. */ be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); if (retval == EINTR) break; } be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN configuration error, see dmesg for details", __func__); STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, links); softc->num_luns--; mtx_unlock(&softc->lock); goto bailout_error; } else { params->req_lun_id = cbe_lun->lun_id; } mtx_unlock(&softc->lock); req->status = CTL_LUN_OK; return (retval); bailout_error: req->status = CTL_LUN_ERROR; if (be_lun != NULL) { if (be_lun->io_taskqueue != NULL) taskqueue_free(be_lun->io_taskqueue); ctl_free_opts(&cbe_lun->options); free(be_lun->zero_page, M_RAMDISK); ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); sx_destroy(&be_lun->page_lock); mtx_destroy(&be_lun->queue_lock); free(be_lun, M_RAMDISK); } return (retval); } static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_be_lun *cbe_lun; struct ctl_lun_modify_params *params; char *value; uint32_t blocksize; int wasprim; params = &req->reqdata.modify; mtx_lock(&softc->lock); STAILQ_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) break; } mtx_unlock(&softc->lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN %u is not managed by the ramdisk backend", __func__, params->lun_id); goto bailout_error; } cbe_lun = &be_lun->cbe_lun; if (params->lun_size_bytes != 0) be_lun->params.lun_size_bytes = params->lun_size_bytes; ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); value = ctl_get_opt(&cbe_lun->options, "ha_role"); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ctl_lun_primary(cbe_lun); else ctl_lun_secondary(cbe_lun); } blocksize = be_lun->cbe_lun.blocksize; if (be_lun->params.lun_size_bytes < blocksize) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN size %ju < blocksize %u", __func__, be_lun->params.lun_size_bytes, blocksize); goto bailout_error; } be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize; be_lun->size_bytes = be_lun->size_blocks * blocksize; be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1; ctl_lun_capacity_changed(&be_lun->cbe_lun); /* Tell the user the exact size we ended up using */ params->lun_size_bytes = be_lun->size_bytes; req->status = CTL_LUN_OK; return (0); bailout_error: req->status = CTL_LUN_ERROR; return (0); } static void ctl_backend_ramdisk_lun_shutdown(void *be_lun) { struct ctl_be_ramdisk_lun *lun = be_lun; struct ctl_be_ramdisk_softc *softc = lun->softc; mtx_lock(&softc->lock); lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { wakeup(lun); } else { STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, links); softc->num_luns--; free(be_lun, M_RAMDISK); } mtx_unlock(&softc->lock); } static void ctl_backend_ramdisk_lun_config_status(void *be_lun, ctl_lun_config_status status) { struct ctl_be_ramdisk_lun *lun; struct ctl_be_ramdisk_softc *softc; lun = (struct ctl_be_ramdisk_lun *)be_lun; softc = lun->softc; if (status == CTL_LUN_CONFIG_OK) { mtx_lock(&softc->lock); lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) wakeup(lun); mtx_unlock(&softc->lock); /* * We successfully added the LUN, attempt to enable it. */ if (ctl_enable_lun(&lun->cbe_lun) != 0) { printf("%s: ctl_enable_lun() failed!\n", __func__); if (ctl_invalidate_lun(&lun->cbe_lun) != 0) { printf("%s: ctl_invalidate_lun() failed!\n", __func__); } } return; } mtx_lock(&softc->lock); lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; /* * If we have a user waiting, let him handle the cleanup. If not, * clean things up here. */ if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR; wakeup(lun); } else { STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, links); softc->num_luns--; free(lun, M_RAMDISK); } mtx_unlock(&softc->lock); } Index: head/sys/cam/ctl/ctl_cmd_table.c =================================================================== --- head/sys/cam/ctl/ctl_cmd_table.c (revision 326264) +++ head/sys/cam/ctl/ctl_cmd_table.c (revision 326265) @@ -1,1859 +1,1861 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003, 2004, 2005, 2009 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_cmd_table.c#4 $ * $FreeBSD$ */ /* * CAM Target Layer command table. * * Author: Ken Merry , Kim Le */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Whenever support for a new command is added, it should be added to these * tables. */ /* 3B WRITE BUFFER */ const struct ctl_cmd_entry ctl_cmd_table_3b[32] = { /* 00 WRITE BUFFER HDR DATA */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 01 WRITE BUFFER VENDOR */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 02 WRITE BUFFER DATA */ {ctl_write_buffer, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_NONE, 10, {0x02, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 03 WRITE BUFFER DESCR */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 04 WRITE BUFFER DOWNLOAD */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 05 WRITE BUFFER DOWNLOAD SAVE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 06 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 07 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 08 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 09 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0A WRITE BUFFER ECHO */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0B WRITE BUFFER ECHO DESCRIPTOR */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0C */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0D */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0E */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0F */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 10 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 11 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 12 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 13 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 14 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 15 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 16 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 17 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 18 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 19 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1A */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1C WRITE BUFFER ERROR HISTORY */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1d-1f */ }; /* 3C READ BUFFER(10) */ const struct ctl_cmd_entry ctl_cmd_table_3c[32] = { /* 00 READ BUFFER(10) HDR DATA */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 01 READ BUFFER(10) VENDOR */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 02 READ BUFFER(10) DATA */ {ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_NONE, 10, {0x02, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 03 READ BUFFER(10) DESCR */ {ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_NONE, 10, {0x03, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 04 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 05 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 06 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 07 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 08 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 09 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0A READ BUFFER(10) ECHO */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0B READ BUFFER(10) ECHO DESCRIPTOR */ {ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_NONE, 10, {0x0b, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 0C */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0D */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0E */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0F */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 10 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 11 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 12 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 13 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 14 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 15 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 16 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 17 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 18 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 19 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1A */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1C READ BUFFER(10) ERROR HISTORY */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1d-1f */ }; /* 5E PERSISTENT RESERVE IN */ const struct ctl_cmd_entry ctl_cmd_table_5e[32] = { /* 00 READ KEYS */ {ctl_persistent_reserve_in, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x00, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}}, /* 01 READ RESERVATION */ {ctl_persistent_reserve_in, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x01, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}}, /* 02 REPORT CAPABILITIES */ {ctl_persistent_reserve_in, CTL_SERIDX_INQ, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x02, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}}, /* 03 READ FULL STATUS */ {ctl_persistent_reserve_in, CTL_SERIDX_INQ, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x03, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}}, /* 04-1f */ }; /* 5F PERSISTENT RESERVE OUT */ const struct ctl_cmd_entry ctl_cmd_table_5f[32] = { /* 00 REGISTER */ {ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x00, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 01 RESERVE */ {ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x01, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 02 RELEASE */ {ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x02, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 03 CLEAR */ {ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x03, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 04 PREEMPT */ {ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x04, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 05 PREEMPT AND ABORT */ {ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x05, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 06 REGISTER AND IGNORE EXISTING KEY */ {ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, { 0x06, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}}, /* 07 REGISTER AND MOVE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 08-1f */ }; /* 83 EXTENDED COPY */ const struct ctl_cmd_entry ctl_cmd_table_83[32] = { /* 00 EXTENDED COPY (LID1) */ {ctl_extended_copy_lid1, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_NONE, 16, { 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 01 EXTENDED COPY (LID4) */ {ctl_extended_copy_lid4, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_NONE, 16, { 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 02 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 03 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 04 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 05 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 06 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 07 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 08 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 09 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0A */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0C */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0D */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0E */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0F */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 10 POPULATE TOKEN */ {ctl_populate_token, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_NONE, 16, { 0x10, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 11 WRITE USING TOKEN */ {ctl_write_using_token, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_NONE, 16, { 0x11, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 12 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 13 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 14 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 15 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 16 SET TAPE STREAM MIRRORING */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 17 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 18 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 19 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1A */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1C COPY OPERATION ABORT */ {ctl_copy_operation_abort, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_FLAG_DATA_NONE, CTL_LUN_PAT_NONE, 16, { 0x1c, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x07}}, /* 1D COPY OPERATION CLOSE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1e-1f */ }; /* 84 RECEIVE COPY STATUS */ const struct ctl_cmd_entry ctl_cmd_table_84[32] = { /* 00 RECEIVE COPY STATUS (LID1) */ {ctl_receive_copy_status_lid1, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 16, {0x00, 0xff, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 01 RECEIVE COPY DATA (LID1) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 02 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 03 RECEIVE COPY OPERATING PARAMETERS */ {ctl_receive_copy_operating_parameters, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 16, {0x03, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 04 RECEIVE COPY FAILURE DETAILS (LID1) */ {ctl_receive_copy_failure_details, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 16, {0x04, 0xff, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 05 RECEIVE COPY STATUS (LID4) */ {ctl_receive_copy_status_lid4, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 16, {0x05, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 06 RECEIVE COPY DATA (LID4)*/ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 07 RECEIVE ROD TOKEN INFORMATION */ {ctl_receive_rod_token_information, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 16, {0x07, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 08 REPORT ALL ROD TOKENS */ {ctl_report_all_rod_tokens, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 16, {0x08, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 09 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0A */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0C */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0D */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0E */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0F */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 10 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 11 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 12 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 13 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 14 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 15 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 16 REPORT TAPE STREAM MIRRORING */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 17-1f */ }; /* 9B READ BUFFER(16) */ const struct ctl_cmd_entry ctl_cmd_table_9b[32] = { /* 00 READ BUFFER(16) HDR DATA */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 01 READ BUFFER(16) VENDOR */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 02 READ BUFFER(16) DATA */ {ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN, CTL_LUN_PAT_NONE, 16, {0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 03 READ BUFFER(16) DESCR */ {ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN, CTL_LUN_PAT_NONE, 16, {0x03, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 04 READ BUFFER(16) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 05 READ BUFFER(16) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 06 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 07 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 08 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 09 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0A READ BUFFER(16) ECHO */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0B READ BUFFER(16) ECHO DESCRIPTOR */ {ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN, CTL_LUN_PAT_NONE, 16, {0x0b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 0C */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0D */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0E */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0F */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 10 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 11 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 12 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 13 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 14 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 15 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 16 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 17 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 18 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 19 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1A */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1C READ BUFFER(16) ERROR HISTORY */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1d-1f */ }; /* 9E SERVICE ACTION IN(16) */ const struct ctl_cmd_entry ctl_cmd_table_9e[32] = { /* 00 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 01 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 02 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 03 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 04 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 05 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 06 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 07 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 08 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 09 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0A */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0C */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0D */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0E */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0F */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 10 READ CAPACITY(16) */ {ctl_read_capacity_16, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_READCAP, 16, {0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 11 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 12 GET LBA STATUS */ {ctl_get_lba_status, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE, 16, {0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 13-1f */ }; /* A3 MAINTENANCE IN */ const struct ctl_cmd_entry ctl_cmd_table_a3[32] = { /* 00 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 01 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 02 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 03 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 04 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 05 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 06 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 07 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 08 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 09 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0A REPORT TARGET PORT GROUPS */ {ctl_report_tagret_port_groups, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_CMD_FLAG_OK_ON_UNAVAIL | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 12, {0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 0B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0C REPORT SUPPORTED_OPCODES */ {ctl_report_supported_opcodes, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_CMD_FLAG_OK_ON_UNAVAIL | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 12, {0x0c, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 0D REPORT SUPPORTED_TASK MANAGEMENT FUNCTIONS */ {ctl_report_supported_tmf, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_CMD_FLAG_OK_ON_UNAVAIL | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 12, {0x0d, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 0E */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0F REPORT TIMESTAMP */ {ctl_report_timestamp, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_CMD_FLAG_OK_ON_UNAVAIL | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 12, {0x0f, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 10-1f */ }; const struct ctl_cmd_entry ctl_cmd_table[256] = { /* 00 TEST UNIT READY */ {ctl_tur, CTL_SERIDX_TUR, CTL_CMD_FLAG_OK_ON_BOTH | CTL_FLAG_DATA_NONE | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_TUR, 6, {0, 0, 0, 0, 0x07}}, /* 01 REWIND */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 02 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 03 REQUEST SENSE */ {ctl_request_sense, CTL_SERIDX_RQ_SNS, CTL_FLAG_DATA_IN | CTL_CMD_FLAG_OK_ON_NO_LUN | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_NO_SENSE | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_CMD_FLAG_OK_ON_UNAVAIL | CTL_CMD_FLAG_ALLOW_ON_PR_RESV | CTL_CMD_FLAG_RUN_HERE, CTL_LUN_PAT_NONE, 6, {0x01, 0, 0, 0xff, 0x07}}, /* 04 FORMAT UNIT */ {ctl_format, CTL_SERIDX_FORMAT, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_NONE, 6, {0xff, 0, 0, 0, 0x07}}, /* 05 READ BLOCK LIMITS */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 06 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 07 REASSIGN BLOCKS */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 08 READ(6) */ {ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE, 6, {0x1f, 0xff, 0xff, 0xff, 0x07}}, /* 09 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0A WRITE(6) */ {ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 6, {0x1f, 0xff, 0xff, 0xff, 0x07}}, /* 0B SEEK(6) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0C */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0D */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0E */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 0F READ REVERSE(6) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 10 WRITE FILEMARKS(6) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 11 SPACE(6) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 12 INQUIRY */ {ctl_inquiry, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_NO_LUN | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_NO_SENSE | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_CMD_FLAG_OK_ON_UNAVAIL | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 6, {0xe1, 0xff, 0xff, 0xff, 0x07}}, /* 13 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 14 RECOVER BUFFERED DATA */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 15 MODE SELECT(6) */ {ctl_mode_select, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_NONE, 6, {0x13, 0, 0, 0xff, 0x07}}, /* 16 RESERVE(6) */ {ctl_scsi_reserve, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_NONE, 6, {0, 0, 0, 0, 0x07}}, /* 17 RELEASE(6) */ {ctl_scsi_release, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_NONE, CTL_LUN_PAT_NONE, 6, {0, 0, 0, 0, 0x07}}, /* 18 COPY */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 19 ERASE(6) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1A MODE SENSE(6) */ {ctl_mode_sense, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_NONE, 6, {0x08, 0xff, 0xff, 0xff, 0x07}}, /* 1B START STOP UNIT */ {ctl_start_stop, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_CMD_FLAG_OK_ON_CDROM | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_FLAG_DATA_NONE | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 6, {0x01, 0, 0x0f, 0xf7, 0x07}}, /* 1C RECEIVE DIAGNOSTIC RESULTS */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1D SEND DIAGNOSTIC */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 1E PREVENT ALLOW MEDIUM REMOVAL */ {ctl_prevent_allow, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_CMD_FLAG_OK_ON_CDROM | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_FLAG_DATA_NONE, CTL_LUN_PAT_NONE, 6, {0x01, 0, 0, 0x03, 0x07}}, /* 1F */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 20 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 21 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 22 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 23 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 24 SET WINDOW */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 25 READ CAPACITY(10) */ {ctl_read_capacity, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_CMD_FLAG_OK_ON_CDROM | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_READCAP, 10, {0, 0, 0, 0, 0, 0, 0, 0, 0x07}}, /* 26 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 27 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 28 READ(10) */ {ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_CMD_FLAG_OK_ON_CDROM | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE, 10, {0x1a, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}}, /* 29 READ GENERATION */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 2A WRITE(10) */ {ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 10, {0x1a, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}}, /* 2B SEEK(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 2C ERASE(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 2D READ UPDATED BLOCK */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 2E WRITE AND VERIFY(10) */ {ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 10, {0x12, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}}, /* 2F VERIFY(10) */ {ctl_verify, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE, 10, {0x16, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}}, /* 30 SEARCH DATA HIGH(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 31 SEARCH DATA EQUAL(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 32 SEARCH DATA LOW(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 33 SET LIMITS(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 34 PRE-FETCH(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 35 SYNCHRONIZE CACHE(10) */ {ctl_sync_cache, CTL_SERIDX_SYNC, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_NONE, CTL_LUN_PAT_WRITE, 10, {0x06, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}}, /* 36 LOCK UNLOCK CACHE(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 37 READ DEFECT DATA(10) */ {ctl_read_defect, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_NONE, 10, {0, 0x1f, 0, 0, 0, 0, 0xff, 0xff, 0x07}}, /* 38 MEDIUM SCAN */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 39 COMPARE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 3A COPY AND VERIFY */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 3B WRITE BUFFER */ {__DECONST(ctl_opfunc *, ctl_cmd_table_3b), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, CTL_LUN_PAT_NONE}, /* 3C READ BUFFER */ {__DECONST(ctl_opfunc *, ctl_cmd_table_3c), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, CTL_LUN_PAT_NONE}, /* 3D UPDATE BLOCK */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 3E READ LONG */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 3F WRITE LONG */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 40 CHANGE DEFINITION */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 41 WRITE SAME(10) */ {ctl_write_same, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 10, {0x1a, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}}, /* 42 READ SUB-CHANNEL / UNMAP */ {ctl_unmap, CTL_SERIDX_UNMAP, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE, 10, {1, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}}, /* 43 READ TOC/PMA/ATIP */ {ctl_read_toc, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_CDROM | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV | CTL_FLAG_DATA_IN, CTL_LUN_PAT_NONE, 10, {0x02, 0x01, 0, 0, 0, 0xff, 0xff, 0xff, 0x07}}, /* 44 REPORT DENSITY SUPPORT */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 45 PLAY AUDIO(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 46 GET CONFIGURATION */ {ctl_get_config, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_CDROM | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_ALLOW_ON_PR_RESV | CTL_FLAG_DATA_IN, CTL_LUN_PAT_NONE, 10, {0x03, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0x07}}, /* 47 PLAY AUDIO MSF */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 48 PLAY AUDIO TRACK INDEX */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 49 PLAY TRACK RELATIVE(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 4A GET EVENT STATUS NOTIFICATION */ {ctl_get_event_status, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_CDROM | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_ALLOW_ON_PR_RESV | CTL_FLAG_DATA_IN, CTL_LUN_PAT_NONE, 10, {0x02, 0x01, 0, 0, 0, 0xff, 0xff, 0xff, 0x07}}, /* 4B PAUSE/RESUME */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 4C LOG SELECT */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 4D LOG SENSE */ {ctl_log_sense, CTL_SERIDX_LOG_SNS, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_RESV, CTL_LUN_PAT_NONE, 10, {0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0x07} }, /* 4E STOP PLAY/SCAN */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 4F */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 50 XDWRITE(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 51 XPWRITE(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 52 XDREAD(10) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 53 RESERVE TRACK */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 54 SEND OPC INFORMATION */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 55 MODE SELECT(10) */ {ctl_mode_select, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_NONE, 10, {0x13, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07} }, /* 56 RESERVE(10) */ {ctl_scsi_reserve, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_NONE, 10, {0, 0, 0, 0, 0, 0, 0, 0, 0x07} }, /* 57 RELEASE(10) */ {ctl_scsi_release, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_NONE, 10, {0, 0, 0, 0, 0, 0, 0, 0, 0x07}}, /* 58 REPAIR TRACK */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 59 READ MASTER CUE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 5A MODE SENSE(10) */ {ctl_mode_sense, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_NONE, 10, {0x18, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0x07} }, /* 5B CLOSE TRACK/SESSION */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 5C READ BUFFER CAPACITY */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 5D SEND CUE SHEET */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 5E PERSISTENT RESERVE IN */ {__DECONST(ctl_opfunc *, ctl_cmd_table_5e), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, CTL_LUN_PAT_NONE}, /* 5F PERSISTENT RESERVE OUT */ {__DECONST(ctl_opfunc *, ctl_cmd_table_5f), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, CTL_LUN_PAT_NONE}, /* 60 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 61 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 62 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 63 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 64 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 65 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 66 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 67 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 68 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 69 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 6A */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 6B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 6C */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 6D */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 6E */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 6F */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 70 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 71 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 72 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 73 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 74 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 75 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 76 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 77 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 78 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 79 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 7A */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 7B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 7C */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 7D */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 7E */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 7F */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 80 XDWRITE EXTENDED(16) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 81 REBUILD(16) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 82 REGENERATE(16) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 83 EXTENDED COPY */ {__DECONST(ctl_opfunc *, ctl_cmd_table_83), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, CTL_LUN_PAT_NONE}, /* 84 RECEIVE COPY RESULTS */ {__DECONST(ctl_opfunc *, ctl_cmd_table_84), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, CTL_LUN_PAT_NONE}, /* 85 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 86 ACCESS CONTROL IN */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 87 ACCESS CONTROL OUT */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 88 READ(16) */ {ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE, 16, {0x1a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 89 COMPARE AND WRITE */ {ctl_cnw, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 16, {0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0, 0x07}}, /* 8A WRITE(16) */ {ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 16, {0x1a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 8B */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 8C READ ATTRIBUTE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 8D WRITE ATTRIBUTE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 8E WRITE AND VERIFY(16) */ {ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 16, {0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 8F VERIFY(16) */ {ctl_verify, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE, 16, {0x16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 90 PRE-FETCH(16) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 91 SYNCHRONIZE CACHE(16) */ {ctl_sync_cache, CTL_SERIDX_SYNC, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_NONE, CTL_LUN_PAT_WRITE, 16, {0x06, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 92 LOCK UNLOCK CACHE(16) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 93 WRITE SAME(16) */ {ctl_write_same, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 16, {0x1b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* 94 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 95 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 96 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 97 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 98 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 99 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 9A */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 9B READ BUFFER(16) */ {__DECONST(ctl_opfunc *, ctl_cmd_table_9b), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, CTL_LUN_PAT_NONE}, /* 9C WRITE ATOMIC (16) */ {ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 16, {0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0x07}}, /* 9D */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* 9E SERVICE ACTION IN(16) */ {__DECONST(ctl_opfunc *, ctl_cmd_table_9e), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, CTL_LUN_PAT_NONE}, /* 9F SERVICE ACTION OUT(16) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* A0 REPORT LUNS */ {ctl_report_luns, CTL_SERIDX_INQ, CTL_FLAG_DATA_IN | CTL_CMD_FLAG_OK_ON_NO_LUN | CTL_CMD_FLAG_OK_ON_BOTH | CTL_CMD_FLAG_ALLOW_ON_RESV | CTL_CMD_FLAG_NO_SENSE | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_OK_ON_STANDBY | CTL_CMD_FLAG_OK_ON_UNAVAIL | CTL_CMD_FLAG_ALLOW_ON_PR_RESV | CTL_CMD_FLAG_RUN_HERE, CTL_LUN_PAT_NONE, 12, {0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* A1 BLANK */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* A2 SEND EVENT */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* A3 MAINTENANCE IN */ {__DECONST(ctl_opfunc *, ctl_cmd_table_a3), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5, CTL_LUN_PAT_NONE}, /* A4 MAINTENANCE OUT */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* A5 MOVE MEDIUM */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* A6 EXCHANGE MEDIUM */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* A7 MOVE MEDIUM ATTACHED */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* A8 READ(12) */ {ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_CMD_FLAG_OK_ON_CDROM | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE, 12, {0x1a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* A9 PLAY TRACK RELATIVE(12) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* AA WRITE(12) */ {ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 12, {0x1a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* AB SERVICE ACTION IN(12) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* AC ERASE(12) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* AD READ DVD STRUCTURE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* AE WRITE AND VERIFY(12) */ {ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_DIRECT| CTL_FLAG_DATA_OUT, CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 12, {0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* AF VERIFY(12) */ {ctl_verify, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_OUT | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE, 12, {0x16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* B0 SEARCH DATA HIGH(12) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* B1 SEARCH DATA EQUAL(12) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* B2 SEARCH DATA LOW(12) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* B3 SET LIMITS(12) */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* B4 READ ELEMENT STATUS ATTACHED */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* B5 REQUEST VOLUME ELEMENT ADDRESS */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* B6 SEND VOLUME TAG */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* B7 READ DEFECT DATA(12) */ {ctl_read_defect, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_DIRECT | CTL_FLAG_DATA_IN | CTL_CMD_FLAG_ALLOW_ON_PR_WRESV, CTL_LUN_PAT_NONE, 12, {0x1f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}}, /* B8 READ ELEMENT STATUS */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* B9 READ CD MSF */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* BA REDUNDANCY GROUP IN */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* BB REDUNDANCY GROUP OUT */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* BC SPARE IN */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* BD SPARE OUT / MECHANISM STATUS */ {ctl_mechanism_status, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_CDROM | CTL_CMD_FLAG_OK_ON_NO_MEDIA | CTL_CMD_FLAG_ALLOW_ON_PR_RESV | CTL_FLAG_DATA_IN, CTL_LUN_PAT_NONE, 12, {0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0, 0x07}}, /* BE VOLUME SET IN */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* BF VOLUME SET OUT */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* C0 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* C1 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* C2 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* C3 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* C4 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* C5 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* C6 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* C7 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* C8 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* C9 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* CA */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* CB */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* CC */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* CD */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* CE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* CF */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* D0 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* D1 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* D2 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* D3 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* D4 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* D5 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* D6 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* D7 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* D8 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* D9 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* DA */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* DB */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* DC */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* DD */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* DE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* DF */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* E0 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* E1 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* E2 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* E3 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* E4 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* E5 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* E6 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* E7 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* E8 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* E9 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* EA */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* EB */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* EC */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* ED */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* EE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* EF */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* F0 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* F1 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* F2 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* F3 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* F4 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* F5 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* F6 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* F7 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* F8 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* F9 */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* FA */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* FB */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* FC */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* FD */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* FE */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE}, /* FF */ {NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE} }; Index: head/sys/cam/ctl/ctl_debug.h =================================================================== --- head/sys/cam/ctl/ctl_debug.h (revision 326264) +++ head/sys/cam/ctl/ctl_debug.h (revision 326265) @@ -1,62 +1,64 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_debug.h#2 $ * $FreeBSD$ */ /* * CAM Target Layer debugging interface. * * Author: Ken Merry */ #ifndef _CTL_DEBUG_H_ #define _CTL_DEBUG_H_ /* * Debugging flags. */ typedef enum { CTL_DEBUG_NONE = 0x00, /* no debugging */ CTL_DEBUG_INFO = 0x01, /* SCSI errors */ CTL_DEBUG_CDB = 0x02, /* SCSI CDBs and tasks */ CTL_DEBUG_CDB_DATA = 0x04 /* SCSI CDB DATA */ } ctl_debug_flags; #ifdef CAM_CTL_DEBUG #define CTL_DEBUG_PRINT(X) \ do { \ printf("ctl_debug: "); \ printf X; \ } while (0) #else /* CAM_CTL_DEBUG */ #define CTL_DEBUG_PRINT(X) #endif /* CAM_CTL_DEBUG */ #endif /* _CTL_DEBUG_H_ */ Index: head/sys/cam/ctl/ctl_error.c =================================================================== --- head/sys/cam/ctl/ctl_error.c (revision 326264) +++ head/sys/cam/ctl/ctl_error.c (revision 326265) @@ -1,987 +1,989 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003-2009 Silicon Graphics International Corp. * Copyright (c) 2011 Spectra Logic Corporation * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_error.c#2 $ */ /* * CAM Target Layer error reporting routines. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void ctl_set_sense_data_va(struct scsi_sense_data *sense_data, u_int *sense_len, void *lunptr, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, va_list ap) { struct ctl_lun *lun; lun = (struct ctl_lun *)lunptr; /* * Determine whether to return fixed or descriptor format sense * data. */ if (sense_format == SSD_TYPE_NONE) { /* * If the format isn't specified, we only return descriptor * sense if the LUN exists and descriptor sense is turned * on for that LUN. */ if ((lun != NULL) && (lun->MODE_CTRL.rlec & SCP_DSENSE)) sense_format = SSD_TYPE_DESC; else sense_format = SSD_TYPE_FIXED; } /* * Determine maximum sense data length to return. */ if (*sense_len == 0) { if ((lun != NULL) && (lun->MODE_CTRLE.max_sense != 0)) *sense_len = lun->MODE_CTRLE.max_sense; else *sense_len = SSD_FULL_SIZE; } scsi_set_sense_data_va(sense_data, sense_len, sense_format, current_error, sense_key, asc, ascq, ap); } void ctl_set_sense_data(struct scsi_sense_data *sense_data, u_int *sense_len, void *lunptr, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, ...) { va_list ap; va_start(ap, ascq); ctl_set_sense_data_va(sense_data, sense_len, lunptr, sense_format, current_error, sense_key, asc, ascq, ap); va_end(ap); } void ctl_set_sense(struct ctl_scsiio *ctsio, int current_error, int sense_key, int asc, int ascq, ...) { va_list ap; struct ctl_lun *lun; u_int sense_len; /* * The LUN can't go away until all of the commands have been * completed. Therefore we can safely access the LUN structure and * flags without the lock. */ lun = CTL_LUN(ctsio); va_start(ap, ascq); sense_len = 0; ctl_set_sense_data_va(&ctsio->sense_data, &sense_len, lun, SSD_TYPE_NONE, current_error, sense_key, asc, ascq, ap); va_end(ap); ctsio->scsi_status = SCSI_STATUS_CHECK_COND; ctsio->sense_len = sense_len; ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; } /* * Transform fixed sense data into descriptor sense data. * * For simplicity's sake, we assume that both sense structures are * SSD_FULL_SIZE. Otherwise, the logic gets more complicated. */ void ctl_sense_to_desc(struct scsi_sense_data_fixed *sense_src, struct scsi_sense_data_desc *sense_dest) { struct scsi_sense_stream stream_sense; int current_error; u_int sense_len; uint8_t stream_bits; bzero(sense_dest, sizeof(*sense_dest)); if ((sense_src->error_code & SSD_ERRCODE) == SSD_DEFERRED_ERROR) current_error = 0; else current_error = 1; bzero(&stream_sense, sizeof(stream_sense)); /* * Check to see whether any of the tape-specific bits are set. If * so, we'll need a stream sense descriptor. */ if (sense_src->flags & (SSD_ILI|SSD_EOM|SSD_FILEMARK)) stream_bits = sense_src->flags & ~SSD_KEY; else stream_bits = 0; /* * Utilize our sense setting routine to do the transform. If a * value is set in the fixed sense data, set it in the descriptor * data. Otherwise, skip it. */ sense_len = SSD_FULL_SIZE; ctl_set_sense_data((struct scsi_sense_data *)sense_dest, &sense_len, /*lun*/ NULL, /*sense_format*/ SSD_TYPE_DESC, current_error, /*sense_key*/ sense_src->flags & SSD_KEY, /*asc*/ sense_src->add_sense_code, /*ascq*/ sense_src->add_sense_code_qual, /* Information Bytes */ (sense_src->error_code & SSD_ERRCODE_VALID) ? SSD_ELEM_INFO : SSD_ELEM_SKIP, sizeof(sense_src->info), sense_src->info, /* Command specific bytes */ (scsi_4btoul(sense_src->cmd_spec_info) != 0) ? SSD_ELEM_COMMAND : SSD_ELEM_SKIP, sizeof(sense_src->cmd_spec_info), sense_src->cmd_spec_info, /* FRU */ (sense_src->fru != 0) ? SSD_ELEM_FRU : SSD_ELEM_SKIP, sizeof(sense_src->fru), &sense_src->fru, /* Sense Key Specific */ (sense_src->sense_key_spec[0] & SSD_SCS_VALID) ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sense_src->sense_key_spec), sense_src->sense_key_spec, /* Tape bits */ (stream_bits != 0) ? SSD_ELEM_STREAM : SSD_ELEM_SKIP, sizeof(stream_bits), &stream_bits, SSD_ELEM_NONE); } /* * Transform descriptor format sense data into fixed sense data. * * Some data may be lost in translation, because there are descriptors * thant can't be represented as fixed sense data. * * For simplicity's sake, we assume that both sense structures are * SSD_FULL_SIZE. Otherwise, the logic gets more complicated. */ void ctl_sense_to_fixed(struct scsi_sense_data_desc *sense_src, struct scsi_sense_data_fixed *sense_dest) { int current_error; uint8_t *info_ptr = NULL, *cmd_ptr = NULL, *fru_ptr = NULL; uint8_t *sks_ptr = NULL, *stream_ptr = NULL; int info_size = 0, cmd_size = 0, fru_size = 0; int sks_size = 0, stream_size = 0; int pos; u_int sense_len; if ((sense_src->error_code & SSD_ERRCODE) == SSD_DESC_CURRENT_ERROR) current_error = 1; else current_error = 0; for (pos = 0; pos < (int)(sense_src->extra_len - 1);) { struct scsi_sense_desc_header *header; header = (struct scsi_sense_desc_header *) &sense_src->sense_desc[pos]; /* * See if this record goes past the end of the sense data. * It shouldn't, but check just in case. */ if ((pos + header->length + sizeof(*header)) > sense_src->extra_len) break; switch (sense_src->sense_desc[pos]) { case SSD_DESC_INFO: { struct scsi_sense_info *info; info = (struct scsi_sense_info *)header; info_ptr = info->info; info_size = sizeof(info->info); pos += info->length + sizeof(struct scsi_sense_desc_header); break; } case SSD_DESC_COMMAND: { struct scsi_sense_command *cmd; cmd = (struct scsi_sense_command *)header; cmd_ptr = cmd->command_info; cmd_size = sizeof(cmd->command_info); pos += cmd->length + sizeof(struct scsi_sense_desc_header); break; } case SSD_DESC_FRU: { struct scsi_sense_fru *fru; fru = (struct scsi_sense_fru *)header; fru_ptr = &fru->fru; fru_size = sizeof(fru->fru); pos += fru->length + sizeof(struct scsi_sense_desc_header); break; } case SSD_DESC_SKS: { struct scsi_sense_sks *sks; sks = (struct scsi_sense_sks *)header; sks_ptr = sks->sense_key_spec; sks_size = sizeof(sks->sense_key_spec); pos = sks->length + sizeof(struct scsi_sense_desc_header); break; } case SSD_DESC_STREAM: { struct scsi_sense_stream *stream_sense; stream_sense = (struct scsi_sense_stream *)header; stream_ptr = &stream_sense->byte3; stream_size = sizeof(stream_sense->byte3); pos = stream_sense->length + sizeof(struct scsi_sense_desc_header); break; } default: /* * We don't recognize this particular sense * descriptor type, so just skip it. */ pos += sizeof(*header) + header->length; break; } } sense_len = SSD_FULL_SIZE; ctl_set_sense_data((struct scsi_sense_data *)sense_dest, &sense_len, /*lun*/ NULL, /*sense_format*/ SSD_TYPE_FIXED, current_error, /*sense_key*/ sense_src->sense_key & SSD_KEY, /*asc*/ sense_src->add_sense_code, /*ascq*/ sense_src->add_sense_code_qual, /* Information Bytes */ (info_ptr != NULL) ? SSD_ELEM_INFO : SSD_ELEM_SKIP, info_size, info_ptr, /* Command specific bytes */ (cmd_ptr != NULL) ? SSD_ELEM_COMMAND : SSD_ELEM_SKIP, cmd_size, cmd_ptr, /* FRU */ (fru_ptr != NULL) ? SSD_ELEM_FRU : SSD_ELEM_SKIP, fru_size, fru_ptr, /* Sense Key Specific */ (sks_ptr != NULL) ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sks_size, sks_ptr, /* Tape bits */ (stream_ptr != NULL) ? SSD_ELEM_STREAM : SSD_ELEM_SKIP, stream_size, stream_ptr, SSD_ELEM_NONE); } void ctl_set_ua(struct ctl_scsiio *ctsio, int asc, int ascq) { ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_UNIT_ATTENTION, asc, ascq, SSD_ELEM_NONE); } static void ctl_ua_to_ascq(struct ctl_lun *lun, ctl_ua_type ua_to_build, int *asc, int *ascq, ctl_ua_type *ua_to_clear, uint8_t **info) { switch (ua_to_build) { case CTL_UA_POWERON: /* 29h/01h POWER ON OCCURRED */ *asc = 0x29; *ascq = 0x01; *ua_to_clear = ~0; break; case CTL_UA_BUS_RESET: /* 29h/02h SCSI BUS RESET OCCURRED */ *asc = 0x29; *ascq = 0x02; *ua_to_clear = ~0; break; case CTL_UA_TARG_RESET: /* 29h/03h BUS DEVICE RESET FUNCTION OCCURRED*/ *asc = 0x29; *ascq = 0x03; *ua_to_clear = ~0; break; case CTL_UA_I_T_NEXUS_LOSS: /* 29h/07h I_T NEXUS LOSS OCCURRED */ *asc = 0x29; *ascq = 0x07; *ua_to_clear = ~0; break; case CTL_UA_LUN_RESET: /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ /* * Since we don't have a specific ASC/ASCQ pair for a LUN * reset, just return the generic reset code. */ *asc = 0x29; *ascq = 0x00; break; case CTL_UA_LUN_CHANGE: /* 3Fh/0Eh REPORTED LUNS DATA HAS CHANGED */ *asc = 0x3F; *ascq = 0x0E; break; case CTL_UA_MODE_CHANGE: /* 2Ah/01h MODE PARAMETERS CHANGED */ *asc = 0x2A; *ascq = 0x01; break; case CTL_UA_LOG_CHANGE: /* 2Ah/02h LOG PARAMETERS CHANGED */ *asc = 0x2A; *ascq = 0x02; break; case CTL_UA_INQ_CHANGE: /* 3Fh/03h INQUIRY DATA HAS CHANGED */ *asc = 0x3F; *ascq = 0x03; break; case CTL_UA_RES_PREEMPT: /* 2Ah/03h RESERVATIONS PREEMPTED */ *asc = 0x2A; *ascq = 0x03; break; case CTL_UA_RES_RELEASE: /* 2Ah/04h RESERVATIONS RELEASED */ *asc = 0x2A; *ascq = 0x04; break; case CTL_UA_REG_PREEMPT: /* 2Ah/05h REGISTRATIONS PREEMPTED */ *asc = 0x2A; *ascq = 0x05; break; case CTL_UA_ASYM_ACC_CHANGE: /* 2Ah/06h ASYMMETRIC ACCESS STATE CHANGED */ *asc = 0x2A; *ascq = 0x06; break; case CTL_UA_CAPACITY_CHANGE: /* 2Ah/09h CAPACITY DATA HAS CHANGED */ *asc = 0x2A; *ascq = 0x09; break; case CTL_UA_THIN_PROV_THRES: /* 38h/07h THIN PROVISIONING SOFT THRESHOLD REACHED */ *asc = 0x38; *ascq = 0x07; *info = lun->ua_tpt_info; break; case CTL_UA_MEDIUM_CHANGE: /* 28h/00h NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ *asc = 0x28; *ascq = 0x00; break; case CTL_UA_IE: /* Informational exception */ *asc = lun->ie_asc; *ascq = lun->ie_ascq; break; default: panic("%s: Unknown UA %x", __func__, ua_to_build); } } ctl_ua_type ctl_build_qae(struct ctl_lun *lun, uint32_t initidx, uint8_t *resp) { ctl_ua_type ua; ctl_ua_type ua_to_build, ua_to_clear; uint8_t *info; int asc, ascq; uint32_t p, i; mtx_assert(&lun->lun_lock, MA_OWNED); p = initidx / CTL_MAX_INIT_PER_PORT; i = initidx % CTL_MAX_INIT_PER_PORT; if (lun->pending_ua[p] == NULL) ua = CTL_UA_POWERON; else ua = lun->pending_ua[p][i]; if (ua == CTL_UA_NONE) return (CTL_UA_NONE); ua_to_build = (1 << (ffs(ua) - 1)); ua_to_clear = ua_to_build; info = NULL; ctl_ua_to_ascq(lun, ua_to_build, &asc, &ascq, &ua_to_clear, &info); resp[0] = SSD_KEY_UNIT_ATTENTION; if (ua_to_build == ua) resp[0] |= 0x10; else resp[0] |= 0x20; resp[1] = asc; resp[2] = ascq; return (ua_to_build); } ctl_ua_type ctl_build_ua(struct ctl_lun *lun, uint32_t initidx, struct scsi_sense_data *sense, u_int *sense_len, scsi_sense_data_type sense_format) { ctl_ua_type *ua; ctl_ua_type ua_to_build, ua_to_clear; uint8_t *info; int asc, ascq; uint32_t p, i; mtx_assert(&lun->lun_lock, MA_OWNED); mtx_assert(&lun->ctl_softc->ctl_lock, MA_NOTOWNED); p = initidx / CTL_MAX_INIT_PER_PORT; if ((ua = lun->pending_ua[p]) == NULL) { mtx_unlock(&lun->lun_lock); ua = malloc(sizeof(ctl_ua_type) * CTL_MAX_INIT_PER_PORT, M_CTL, M_WAITOK); mtx_lock(&lun->lun_lock); if (lun->pending_ua[p] == NULL) { lun->pending_ua[p] = ua; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) ua[i] = CTL_UA_POWERON; } else { free(ua, M_CTL); ua = lun->pending_ua[p]; } } i = initidx % CTL_MAX_INIT_PER_PORT; if (ua[i] == CTL_UA_NONE) return (CTL_UA_NONE); ua_to_build = (1 << (ffs(ua[i]) - 1)); ua_to_clear = ua_to_build; info = NULL; ctl_ua_to_ascq(lun, ua_to_build, &asc, &ascq, &ua_to_clear, &info); ctl_set_sense_data(sense, sense_len, lun, sense_format, 1, /*sense_key*/ SSD_KEY_UNIT_ATTENTION, asc, ascq, ((info != NULL) ? SSD_ELEM_INFO : SSD_ELEM_SKIP), 8, info, SSD_ELEM_NONE); /* We're reporting this UA, so clear it */ ua[i] &= ~ua_to_clear; if (ua_to_build == CTL_UA_LUN_CHANGE) { mtx_unlock(&lun->lun_lock); mtx_lock(&lun->ctl_softc->ctl_lock); ctl_clr_ua_allluns(lun->ctl_softc, initidx, ua_to_build); mtx_unlock(&lun->ctl_softc->ctl_lock); mtx_lock(&lun->lun_lock); } else if (ua_to_build == CTL_UA_THIN_PROV_THRES && (lun->MODE_LBP.main.flags & SLBPP_SITUA) != 0) { ctl_clr_ua_all(lun, -1, ua_to_build); } return (ua_to_build); } void ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio) { /* OVERLAPPED COMMANDS ATTEMPTED */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x4E, /*ascq*/ 0x00, SSD_ELEM_NONE); } void ctl_set_overlapped_tag(struct ctl_scsiio *ctsio, uint8_t tag) { /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x4D, /*ascq*/ tag, SSD_ELEM_NONE); } /* * Tell the user that there was a problem with the command or data he sent. */ void ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command, int field, int bit_valid, int bit) { uint8_t sks[3]; int asc; if (command != 0) { /* "Invalid field in CDB" */ asc = 0x24; } else { /* "Invalid field in parameter list" */ asc = 0x26; } if (sks_valid) { sks[0] = SSD_SCS_VALID; if (command) sks[0] |= SSD_FIELDPTR_CMD; scsi_ulto2b(field, &sks[1]); if (bit_valid) sks[0] |= SSD_BITPTR_VALID | bit; } ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, asc, /*ascq*/ 0x00, /*type*/ (sks_valid != 0) ? SSD_ELEM_SKS : SSD_ELEM_SKIP, /*size*/ sizeof(sks), /*data*/ sks, SSD_ELEM_NONE); } void ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio) { /* "Invalid field in command information unit" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ABORTED_COMMAND, /*ascq*/ 0x0E, /*ascq*/ 0x03, SSD_ELEM_NONE); } void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio) { uint8_t sks[3]; sks[0] = SSD_SCS_VALID | SSD_FIELDPTR_CMD; scsi_ulto2b(0, &sks[1]); /* "Invalid command operation code" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x20, /*ascq*/ 0x00, /*type*/ SSD_ELEM_SKS, /*size*/ sizeof(sks), /*data*/ sks, SSD_ELEM_NONE); } void ctl_set_param_len_error(struct ctl_scsiio *ctsio) { /* "Parameter list length error" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x1a, /*ascq*/ 0x00, SSD_ELEM_NONE); } void ctl_set_already_locked(struct ctl_scsiio *ctsio) { /* Vendor unique "Somebody already is locked" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x81, /*ascq*/ 0x00, SSD_ELEM_NONE); } void ctl_set_unsupported_lun(struct ctl_scsiio *ctsio) { /* "Logical unit not supported" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x25, /*ascq*/ 0x00, SSD_ELEM_NONE); } void ctl_set_internal_failure(struct ctl_scsiio *ctsio, int sks_valid, uint16_t retry_count) { uint8_t sks[3]; if (sks_valid) { sks[0] = SSD_SCS_VALID; sks[1] = (retry_count >> 8) & 0xff; sks[2] = retry_count & 0xff; } /* "Internal target failure" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_HARDWARE_ERROR, /*asc*/ 0x44, /*ascq*/ 0x00, /*type*/ (sks_valid != 0) ? SSD_ELEM_SKS : SSD_ELEM_SKIP, /*size*/ sizeof(sks), /*data*/ sks, SSD_ELEM_NONE); } void ctl_set_medium_error(struct ctl_scsiio *ctsio, int read) { if (read) { /* "Unrecovered read error" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_MEDIUM_ERROR, /*asc*/ 0x11, /*ascq*/ 0x00, SSD_ELEM_NONE); } else { /* "Write error - auto reallocation failed" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_MEDIUM_ERROR, /*asc*/ 0x0C, /*ascq*/ 0x02, SSD_ELEM_NONE); } } void ctl_set_aborted(struct ctl_scsiio *ctsio) { ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ABORTED_COMMAND, /*asc*/ 0x45, /*ascq*/ 0x00, SSD_ELEM_NONE); } void ctl_set_lba_out_of_range(struct ctl_scsiio *ctsio, uint64_t lba) { uint8_t info[8]; scsi_u64to8b(lba, info); /* "Logical block address out of range" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x21, /*ascq*/ 0x00, /*type*/ (lba != 0) ? SSD_ELEM_INFO : SSD_ELEM_SKIP, /*size*/ sizeof(info), /*data*/ &info, SSD_ELEM_NONE); } void ctl_set_lun_stopped(struct ctl_scsiio *ctsio) { /* "Logical unit not ready, initializing cmd. required" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x04, /*ascq*/ 0x02, SSD_ELEM_NONE); } void ctl_set_lun_int_reqd(struct ctl_scsiio *ctsio) { /* "Logical unit not ready, manual intervention required" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x04, /*ascq*/ 0x03, SSD_ELEM_NONE); } void ctl_set_lun_ejected(struct ctl_scsiio *ctsio) { /* "Medium not present - tray open" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x3A, /*ascq*/ 0x02, SSD_ELEM_NONE); } void ctl_set_lun_no_media(struct ctl_scsiio *ctsio) { /* "Medium not present - tray closed" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x3A, /*ascq*/ 0x01, SSD_ELEM_NONE); } void ctl_set_illegal_pr_release(struct ctl_scsiio *ctsio) { /* "Invalid release of persistent reservation" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x26, /*ascq*/ 0x04, SSD_ELEM_NONE); } void ctl_set_lun_transit(struct ctl_scsiio *ctsio) { /* "Logical unit not ready, asymmetric access state transition" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x04, /*ascq*/ 0x0a, SSD_ELEM_NONE); } void ctl_set_lun_standby(struct ctl_scsiio *ctsio) { /* "Logical unit not ready, target port in standby state" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x04, /*ascq*/ 0x0b, SSD_ELEM_NONE); } void ctl_set_lun_unavail(struct ctl_scsiio *ctsio) { /* "Logical unit not ready, target port in unavailable state" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x04, /*ascq*/ 0x0c, SSD_ELEM_NONE); } void ctl_set_medium_format_corrupted(struct ctl_scsiio *ctsio) { /* "Medium format corrupted" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_MEDIUM_ERROR, /*asc*/ 0x31, /*ascq*/ 0x00, SSD_ELEM_NONE); } void ctl_set_medium_magazine_inaccessible(struct ctl_scsiio *ctsio) { /* "Medium magazine not accessible" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x3b, /*ascq*/ 0x11, SSD_ELEM_NONE); } void ctl_set_data_phase_error(struct ctl_scsiio *ctsio) { /* "Data phase error" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NOT_READY, /*asc*/ 0x4b, /*ascq*/ 0x00, SSD_ELEM_NONE); } void ctl_set_reservation_conflict(struct ctl_scsiio *ctsio) { ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; ctsio->sense_len = 0; ctsio->io_hdr.status = CTL_SCSI_ERROR; } void ctl_set_queue_full(struct ctl_scsiio *ctsio) { ctsio->scsi_status = SCSI_STATUS_QUEUE_FULL; ctsio->sense_len = 0; ctsio->io_hdr.status = CTL_SCSI_ERROR; } void ctl_set_busy(struct ctl_scsiio *ctsio) { ctsio->scsi_status = SCSI_STATUS_BUSY; ctsio->sense_len = 0; ctsio->io_hdr.status = CTL_SCSI_ERROR; } void ctl_set_task_aborted(struct ctl_scsiio *ctsio) { ctsio->scsi_status = SCSI_STATUS_TASK_ABORTED; ctsio->sense_len = 0; ctsio->io_hdr.status = CTL_CMD_ABORTED; } void ctl_set_hw_write_protected(struct ctl_scsiio *ctsio) { /* "Hardware write protected" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_DATA_PROTECT, /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); } void ctl_set_space_alloc_fail(struct ctl_scsiio *ctsio) { /* "Space allocation failed write protect" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_DATA_PROTECT, /*asc*/ 0x27, /*ascq*/ 0x07, SSD_ELEM_NONE); } void ctl_set_success(struct ctl_scsiio *ctsio) { ctsio->scsi_status = SCSI_STATUS_OK; ctsio->sense_len = 0; ctsio->io_hdr.status = CTL_SUCCESS; } Index: head/sys/cam/ctl/ctl_error.h =================================================================== --- head/sys/cam/ctl/ctl_error.h (revision 326264) +++ head/sys/cam/ctl/ctl_error.h (revision 326265) @@ -1,98 +1,100 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_error.h#1 $ * $FreeBSD$ */ /* * Function definitions for various error reporting routines used both * within CTL and various CTL clients. * * Author: Ken Merry */ #include #ifndef _CTL_ERROR_H_ #define _CTL_ERROR_H_ struct ctl_lun; void ctl_set_sense_data_va(struct scsi_sense_data *sense_data, u_int *sense_len, void *lun, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, va_list ap); void ctl_set_sense_data(struct scsi_sense_data *sense_data, u_int *sense_len, void *lun, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, ...); void ctl_set_sense(struct ctl_scsiio *ctsio, int current_error, int sense_key, int asc, int ascq, ...); void ctl_sense_to_desc(struct scsi_sense_data_fixed *sense_src, struct scsi_sense_data_desc *sense_dest); void ctl_sense_to_fixed(struct scsi_sense_data_desc *sense_src, struct scsi_sense_data_fixed *sense_dest); void ctl_set_ua(struct ctl_scsiio *ctsio, int asc, int ascq); ctl_ua_type ctl_build_qae(struct ctl_lun *lun, uint32_t initidx, uint8_t *resp); ctl_ua_type ctl_build_ua(struct ctl_lun *lun, uint32_t initidx, struct scsi_sense_data *sense, u_int *sense_len, scsi_sense_data_type sense_format); void ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio); void ctl_set_overlapped_tag(struct ctl_scsiio *ctsio, uint8_t tag); void ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command, int field, int bit_valid, int bit); void ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio); void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio); void ctl_set_param_len_error(struct ctl_scsiio *ctsio); void ctl_set_already_locked(struct ctl_scsiio *ctsio); void ctl_set_unsupported_lun(struct ctl_scsiio *ctsio); void ctl_set_lun_transit(struct ctl_scsiio *ctsio); void ctl_set_lun_standby(struct ctl_scsiio *ctsio); void ctl_set_lun_unavail(struct ctl_scsiio *ctsio); void ctl_set_internal_failure(struct ctl_scsiio *ctsio, int sks_valid, uint16_t retry_count); void ctl_set_medium_error(struct ctl_scsiio *ctsio, int read); void ctl_set_aborted(struct ctl_scsiio *ctsio); void ctl_set_lba_out_of_range(struct ctl_scsiio *ctsio, uint64_t lba); void ctl_set_lun_stopped(struct ctl_scsiio *ctsio); void ctl_set_lun_int_reqd(struct ctl_scsiio *ctsio); void ctl_set_lun_ejected(struct ctl_scsiio *ctsio); void ctl_set_lun_no_media(struct ctl_scsiio *ctsio); void ctl_set_illegal_pr_release(struct ctl_scsiio *ctsio); void ctl_set_medium_format_corrupted(struct ctl_scsiio *ctsio); void ctl_set_medium_magazine_inaccessible(struct ctl_scsiio *ctsio); void ctl_set_data_phase_error(struct ctl_scsiio *ctsio); void ctl_set_reservation_conflict(struct ctl_scsiio *ctsio); void ctl_set_queue_full(struct ctl_scsiio *ctsio); void ctl_set_busy(struct ctl_scsiio *ctsio); void ctl_set_task_aborted(struct ctl_scsiio *ctsio); void ctl_set_hw_write_protected(struct ctl_scsiio *ctsio); void ctl_set_space_alloc_fail(struct ctl_scsiio *ctsio); void ctl_set_success(struct ctl_scsiio *ctsio); #endif /* _CTL_ERROR_H_ */ Index: head/sys/cam/ctl/ctl_frontend.c =================================================================== --- head/sys/cam/ctl/ctl_frontend.c (revision 326264) +++ head/sys/cam/ctl/ctl_frontend.c (revision 326265) @@ -1,389 +1,391 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend.c#4 $ */ /* * CAM Target Layer front end interface code * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXX KDM move defines from ctl_ioctl.h to somewhere else */ #include #include #include #include extern struct ctl_softc *control_softc; int ctl_frontend_register(struct ctl_frontend *fe) { struct ctl_softc *softc = control_softc; struct ctl_frontend *fe_tmp; int error; KASSERT(softc != NULL, ("CTL is not initialized")); /* Sanity check, make sure this isn't a duplicate registration. */ mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(fe_tmp, &softc->fe_list, links) { if (strcmp(fe_tmp->name, fe->name) == 0) { mtx_unlock(&softc->ctl_lock); return (-1); } } mtx_unlock(&softc->ctl_lock); STAILQ_INIT(&fe->port_list); /* Call the frontend's initialization routine. */ if (fe->init != NULL) { if ((error = fe->init()) != 0) { printf("%s frontend init error: %d\n", fe->name, error); return (error); } } mtx_lock(&softc->ctl_lock); softc->num_frontends++; STAILQ_INSERT_TAIL(&softc->fe_list, fe, links); mtx_unlock(&softc->ctl_lock); return (0); } int ctl_frontend_deregister(struct ctl_frontend *fe) { struct ctl_softc *softc = control_softc; int error; /* Call the frontend's shutdown routine.*/ if (fe->shutdown != NULL) { if ((error = fe->shutdown()) != 0) { printf("%s frontend shutdown error: %d\n", fe->name, error); return (error); } } mtx_lock(&softc->ctl_lock); STAILQ_REMOVE(&softc->fe_list, fe, ctl_frontend, links); softc->num_frontends--; mtx_unlock(&softc->ctl_lock); return (0); } struct ctl_frontend * ctl_frontend_find(char *frontend_name) { struct ctl_softc *softc = control_softc; struct ctl_frontend *fe; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(fe, &softc->fe_list, links) { if (strcmp(fe->name, frontend_name) == 0) { mtx_unlock(&softc->ctl_lock); return (fe); } } mtx_unlock(&softc->ctl_lock); return (NULL); } int ctl_port_register(struct ctl_port *port) { struct ctl_softc *softc = control_softc; struct ctl_port *tport, *nport; void *pool; int port_num; int retval; KASSERT(softc != NULL, ("CTL is not initialized")); port->ctl_softc = softc; mtx_lock(&softc->ctl_lock); if (port->targ_port >= 0) port_num = port->targ_port; else port_num = ctl_ffz(softc->ctl_port_mask, softc->port_min, softc->port_max); if ((port_num < 0) || (ctl_set_mask(softc->ctl_port_mask, port_num) < 0)) { mtx_unlock(&softc->ctl_lock); return (1); } softc->num_ports++; mtx_unlock(&softc->ctl_lock); /* * Initialize the initiator and portname mappings */ port->max_initiators = CTL_MAX_INIT_PER_PORT; port->wwpn_iid = malloc(sizeof(*port->wwpn_iid) * port->max_initiators, M_CTL, M_NOWAIT | M_ZERO); if (port->wwpn_iid == NULL) { retval = ENOMEM; goto error; } /* * We add 20 to whatever the caller requests, so he doesn't get * burned by queueing things back to the pending sense queue. In * theory, there should probably only be one outstanding item, at * most, on the pending sense queue for a LUN. We'll clear the * pending sense queue on the next command, whether or not it is * a REQUEST SENSE. */ retval = ctl_pool_create(softc, port->port_name, port->num_requested_ctl_io + 20, &pool); if (retval != 0) { free(port->wwpn_iid, M_CTL); error: port->targ_port = -1; mtx_lock(&softc->ctl_lock); ctl_clear_mask(softc->ctl_port_mask, port_num); mtx_unlock(&softc->ctl_lock); return (retval); } port->targ_port = port_num; port->ctl_pool_ref = pool; if (port->options.stqh_first == NULL) STAILQ_INIT(&port->options); port->stats.item = port_num; mtx_init(&port->port_lock, "CTL port", NULL, MTX_DEF); mtx_lock(&softc->ctl_lock); STAILQ_INSERT_TAIL(&port->frontend->port_list, port, fe_links); for (tport = NULL, nport = STAILQ_FIRST(&softc->port_list); nport != NULL && nport->targ_port < port_num; tport = nport, nport = STAILQ_NEXT(tport, links)) { } if (tport) STAILQ_INSERT_AFTER(&softc->port_list, tport, port, links); else STAILQ_INSERT_HEAD(&softc->port_list, port, links); softc->ctl_ports[port->targ_port] = port; mtx_unlock(&softc->ctl_lock); return (retval); } int ctl_port_deregister(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_io_pool *pool = (struct ctl_io_pool *)port->ctl_pool_ref; int i; if (port->targ_port == -1) return (1); mtx_lock(&softc->ctl_lock); STAILQ_REMOVE(&softc->port_list, port, ctl_port, links); STAILQ_REMOVE(&port->frontend->port_list, port, ctl_port, fe_links); softc->num_ports--; ctl_clear_mask(softc->ctl_port_mask, port->targ_port); softc->ctl_ports[port->targ_port] = NULL; mtx_unlock(&softc->ctl_lock); ctl_pool_free(pool); ctl_free_opts(&port->options); ctl_lun_map_deinit(port); free(port->port_devid, M_CTL); port->port_devid = NULL; free(port->target_devid, M_CTL); port->target_devid = NULL; free(port->init_devid, M_CTL); port->init_devid = NULL; for (i = 0; i < port->max_initiators; i++) free(port->wwpn_iid[i].name, M_CTL); free(port->wwpn_iid, M_CTL); mtx_destroy(&port->port_lock); return (0); } void ctl_port_set_wwns(struct ctl_port *port, int wwnn_valid, uint64_t wwnn, int wwpn_valid, uint64_t wwpn) { struct scsi_vpd_id_descriptor *desc; int len, proto; if (port->port_type == CTL_PORT_FC) proto = SCSI_PROTO_FC << 4; else if (port->port_type == CTL_PORT_SAS) proto = SCSI_PROTO_SAS << 4; else if (port->port_type == CTL_PORT_ISCSI) proto = SCSI_PROTO_ISCSI << 4; else proto = SCSI_PROTO_SPI << 4; if (wwnn_valid) { port->wwnn = wwnn; free(port->target_devid, M_CTL); len = sizeof(struct scsi_vpd_device_id) + CTL_WWPN_LEN; port->target_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->target_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data; desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET | SVPD_ID_TYPE_NAA; desc->length = CTL_WWPN_LEN; scsi_u64to8b(port->wwnn, desc->identifier); } if (wwpn_valid) { port->wwpn = wwpn; free(port->port_devid, M_CTL); len = sizeof(struct scsi_vpd_device_id) + CTL_WWPN_LEN; port->port_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->port_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data; desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA; desc->length = CTL_WWPN_LEN; scsi_u64to8b(port->wwpn, desc->identifier); } } void ctl_port_online(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; const char *value; uint32_t l; if (port->lun_enable != NULL) { if (port->lun_map) { for (l = 0; l < port->lun_map_size; l++) { if (ctl_lun_map_from_port(port, l) == UINT32_MAX) continue; port->lun_enable(port->targ_lun_arg, l); } } else { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_enable(port->targ_lun_arg, lun->lun); } } if (port->port_online != NULL) port->port_online(port->onoff_arg); mtx_lock(&softc->ctl_lock); if (softc->is_single == 0) { value = ctl_get_opt(&port->options, "ha_shared"); if (value != NULL && strcmp(value, "on") == 0) port->status |= CTL_PORT_STATUS_HA_SHARED; else port->status &= ~CTL_PORT_STATUS_HA_SHARED; } port->status |= CTL_PORT_STATUS_ONLINE; STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_port(port); } void ctl_port_offline(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; uint32_t l; if (port->port_offline != NULL) port->port_offline(port->onoff_arg); if (port->lun_disable != NULL) { if (port->lun_map) { for (l = 0; l < port->lun_map_size; l++) { if (ctl_lun_map_from_port(port, l) == UINT32_MAX) continue; port->lun_disable(port->targ_lun_arg, l); } } else { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_disable(port->targ_lun_arg, lun->lun); } } mtx_lock(&softc->ctl_lock); port->status &= ~CTL_PORT_STATUS_ONLINE; STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_port(port); } /* * vim: ts=8 */ Index: head/sys/cam/ctl/ctl_frontend.h =================================================================== --- head/sys/cam/ctl/ctl_frontend.h (revision 326264) +++ head/sys/cam/ctl/ctl_frontend.h (revision 326265) @@ -1,336 +1,338 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend.h#2 $ * $FreeBSD$ */ /* * CAM Target Layer front end registration hooks * * Author: Ken Merry */ #ifndef _CTL_FRONTEND_H_ #define _CTL_FRONTEND_H_ #include typedef enum { CTL_PORT_STATUS_NONE = 0x00, CTL_PORT_STATUS_ONLINE = 0x01, CTL_PORT_STATUS_HA_SHARED = 0x02 } ctl_port_status; typedef int (*fe_init_t)(void); typedef int (*fe_shutdown_t)(void); typedef void (*port_func_t)(void *onoff_arg); typedef int (*port_info_func_t)(void *onoff_arg, struct sbuf *sb); typedef int (*lun_func_t)(void *arg, int lun_id); typedef int (*fe_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); #define CTL_FRONTEND_DECLARE(name, driver) \ static int name ## _modevent(module_t mod, int type, void *data) \ { \ switch (type) { \ case MOD_LOAD: \ return (ctl_frontend_register( \ (struct ctl_frontend *)data)); \ break; \ case MOD_UNLOAD: \ return (ctl_frontend_deregister( \ (struct ctl_frontend *)data)); \ break; \ default: \ return EOPNOTSUPP; \ } \ return 0; \ } \ static moduledata_t name ## _mod = { \ #name, \ name ## _modevent, \ (void *)&driver \ }; \ DECLARE_MODULE(name, name ## _mod, SI_SUB_CONFIGURE, SI_ORDER_FOURTH); \ MODULE_DEPEND(name, ctl, 1, 1, 1); \ MODULE_DEPEND(name, cam, 1, 1, 1) struct ctl_wwpn_iid { int in_use; time_t last_use; uint64_t wwpn; char *name; }; /* * The ctl_frontend structure is the registration mechanism between a FETD * (Front End Target Driver) and the CTL layer. Here is a description of * the fields: * * port_type: This field tells CTL what kind of front end it is * dealing with. This field serves two purposes. * The first is to let CTL know whether the frontend * in question is inside the main CTL module (i.e. * the ioctl front end), and therefore its module * reference count shouldn't be incremented. The * CTL ioctl front end should continue to use the * CTL_PORT_IOCTL argument as long as it is part of * the main CTL module. The second is to let CTL * know what kind of front end it is dealing with, so * it can return the proper inquiry data for that * particular port. * * num_requested_ctl_io: This is the number of ctl_io structures that the * front end needs for its pool. This should * generally be the maximum number of outstanding * transactions that the FETD can handle. The CTL * layer will add a few to this to account for * ctl_io buffers queued for pending sense data. * (Pending sense only gets queued if the FETD * doesn't support autosense. e.g. non-packetized * parallel SCSI doesn't support autosense.) * * port_name: A string describing the FETD. e.g. "LSI 1030T U320" * or whatever you want to use to describe the driver. * * physical_port: This is the physical port number of this * particular port within the driver/hardware. This * number is hardware/driver specific. * virtual_port: This is the virtual port number of this * particular port. This is for things like NP-IV. * * port_online(): This function is called, with onoff_arg as its * argument, by the CTL layer when it wants the FETD * to start responding to selections on the specified * target ID. * * port_offline(): This function is called, with onoff_arg as its * argument, by the CTL layer when it wants the FETD * to stop responding to selection on the specified * target ID. * * onoff_arg: This is supplied as an argument to port_online() * and port_offline(). This is specified by the * FETD. * * lun_enable(): This function is called, with targ_lun_arg, a target * ID and a LUN ID as its arguments, by CTL when it * wants the FETD to enable a particular LUN. If the * FETD doesn't really know about LUNs, it should * just ignore this call and return 0. If the FETD * cannot enable the requested LUN for some reason, the * FETD should return non-zero status. * * lun_disable(): This function is called, with targ_lun_arg, a target * ID and LUN ID as its arguments, by CTL when it * wants the FETD to disable a particular LUN. If the * FETD doesn't really know about LUNs, it should just * ignore this call and return 0. If the FETD cannot * disable the requested LUN for some reason, the * FETD should return non-zero status. * * targ_lun_arg: This is supplied as an argument to the targ/lun * enable/disable() functions. This is specified by * the FETD. * * fe_datamove(): This function is called one or more times per I/O * by the CTL layer to tell the FETD to initiate a * DMA to or from the data buffer(s) specified by * the passed-in ctl_io structure. * * fe_done(): This function is called by the CTL layer when a * particular SCSI I/O or task management command has * completed. For SCSI I/O requests (CTL_IO_SCSI), * sense data is always supplied if the status is * CTL_SCSI_ERROR and the SCSI status byte is * SCSI_STATUS_CHECK_COND. If the FETD doesn't * support autosense, the sense should be queued * back to the CTL layer via ctl_queue_sense(). * * fe_dump(): This function, if it exists, is called by CTL * to request a dump of any debugging information or * state to the console. * * targ_port: The CTL layer assigns a "port number" to every * FETD. This port number should be passed back in * in the header of every ctl_io that is queued to * the CTL layer. This enables us to determine * which bus the command came in on. * * ctl_pool_ref: Memory pool reference used by the FETD in calls to * ctl_alloc_io(). * * max_initiators: Maximum number of initiators that the FETD is * allowed to have. Initiators should be numbered * from 0 to max_initiators - 1. This value will * typically be 16, and thus not a problem for * parallel SCSI. This may present issues for Fibre * Channel. * * wwnn World Wide Node Name to be used by the FETD. * Note that this is set *after* registration. It * will be set prior to the online function getting * called. * * wwpn World Wide Port Name to be used by the FETD. * Note that this is set *after* registration. It * will be set prior to the online function getting * called. * * status: Used by CTL to keep track of per-FETD state. * * links: Linked list pointers, used by CTL. The FETD * shouldn't touch this field. */ struct ctl_port { struct ctl_softc *ctl_softc; struct ctl_frontend *frontend; ctl_port_type port_type; /* passed to CTL */ int num_requested_ctl_io; /* passed to CTL */ char *port_name; /* passed to CTL */ int physical_port; /* passed to CTL */ int virtual_port; /* passed to CTL */ port_func_t port_online; /* passed to CTL */ port_func_t port_offline; /* passed to CTL */ port_info_func_t port_info; /* passed to CTL */ void *onoff_arg; /* passed to CTL */ lun_func_t lun_enable; /* passed to CTL */ lun_func_t lun_disable; /* passed to CTL */ int lun_map_size; /* passed to CTL */ uint32_t *lun_map; /* passed to CTL */ void *targ_lun_arg; /* passed to CTL */ void (*fe_datamove)(union ctl_io *io); /* passed to CTL */ void (*fe_done)(union ctl_io *io); /* passed to CTL */ int32_t targ_port; /* passed back to FETD */ void *ctl_pool_ref; /* passed back to FETD */ uint32_t max_initiators; /* passed back to FETD */ struct ctl_wwpn_iid *wwpn_iid; /* used by CTL */ uint64_t wwnn; /* set by CTL before online */ uint64_t wwpn; /* set by CTL before online */ ctl_port_status status; /* used by CTL */ ctl_options_t options; /* passed to CTL */ struct ctl_devid *port_devid; /* passed to CTL */ struct ctl_devid *target_devid; /* passed to CTL */ struct ctl_devid *init_devid; /* passed to CTL */ struct ctl_io_stats stats; /* used by CTL */ struct mtx port_lock; /* used by CTL */ STAILQ_ENTRY(ctl_port) fe_links; /* used by CTL */ STAILQ_ENTRY(ctl_port) links; /* used by CTL */ }; struct ctl_frontend { char name[CTL_DRIVER_NAME_LEN]; /* passed to CTL */ fe_init_t init; /* passed to CTL */ fe_ioctl_t ioctl; /* passed to CTL */ void (*fe_dump)(void); /* passed to CTL */ fe_shutdown_t shutdown; /* passed to CTL */ STAILQ_HEAD(, ctl_port) port_list; /* used by CTL */ STAILQ_ENTRY(ctl_frontend) links; /* used by CTL */ }; /* * This may block until resources are allocated. Called at FETD module load * time. Returns 0 for success, non-zero for failure. */ int ctl_frontend_register(struct ctl_frontend *fe); /* * Called at FETD module unload time. * Returns 0 for success, non-zero for failure. */ int ctl_frontend_deregister(struct ctl_frontend *fe); /* * Find the frontend by its name. Returns NULL if not found. */ struct ctl_frontend * ctl_frontend_find(char *frontend_name); /* * This may block until resources are allocated. Called at FETD module load * time. Returns 0 for success, non-zero for failure. */ int ctl_port_register(struct ctl_port *port); /* * Called at FETD module unload time. * Returns 0 for success, non-zero for failure. */ int ctl_port_deregister(struct ctl_port *port); /* * Called to set the WWNN and WWPN for a particular frontend. */ void ctl_port_set_wwns(struct ctl_port *port, int wwnn_valid, uint64_t wwnn, int wwpn_valid, uint64_t wwpn); /* * Called to bring a particular frontend online. */ void ctl_port_online(struct ctl_port *fe); /* * Called to take a particular frontend offline. */ void ctl_port_offline(struct ctl_port *fe); /* * This routine queues I/O and task management requests from the FETD to the * CTL layer. Returns immediately. Returns 0 for success, non-zero for * failure. */ int ctl_queue(union ctl_io *io); /* * This routine is used if the front end interface doesn't support * autosense (e.g. non-packetized parallel SCSI). This will queue the * scsiio structure back to a per-lun pending sense queue. This MUST be * called BEFORE any request sense can get queued to the CTL layer -- I * need it in the queue in order to service the request. The scsiio * structure passed in here will be freed by the CTL layer when sense is * retrieved by the initiator. Returns 0 for success, non-zero for failure. */ int ctl_queue_sense(union ctl_io *io); /* * This routine adds an initiator to CTL's port database. * The iid field should be the same as the iid passed in the nexus of each * ctl_io from this initiator. * The WWPN should be the FC WWPN, if available. */ int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name); /* * This routine will remove an initiator from CTL's port database. * The iid field should be the same as the iid passed in the nexus of each * ctl_io from this initiator. */ int ctl_remove_initiator(struct ctl_port *port, int iid); #endif /* _CTL_FRONTEND_H_ */ Index: head/sys/cam/ctl/ctl_frontend_cam_sim.c =================================================================== --- head/sys/cam/ctl/ctl_frontend_cam_sim.c (revision 326264) +++ head/sys/cam/ctl/ctl_frontend_cam_sim.c (revision 326265) @@ -1,811 +1,813 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2009 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_cam_sim.c#4 $ */ /* * CTL frontend to CAM SIM interface. This allows access to CTL LUNs via * the da(4) and pass(4) drivers from inside the system. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define io_ptr spriv_ptr1 struct cfcs_io { union ccb *ccb; }; struct cfcs_softc { struct ctl_port port; char port_name[32]; struct cam_sim *sim; struct cam_devq *devq; struct cam_path *path; struct mtx lock; uint64_t wwnn; uint64_t wwpn; uint32_t cur_tag_num; int online; }; /* * We can't handle CCBs with these flags. For the most part, we just don't * handle physical addresses yet. That would require mapping things in * order to do the copy. */ #define CFCS_BAD_CCB_FLAGS (CAM_DATA_ISPHYS | CAM_MSG_BUF_PHYS | \ CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR | \ CAM_SENSE_PHYS) static int cfcs_init(void); static int cfcs_shutdown(void); static void cfcs_poll(struct cam_sim *sim); static void cfcs_online(void *arg); static void cfcs_offline(void *arg); static void cfcs_datamove(union ctl_io *io); static void cfcs_done(union ctl_io *io); void cfcs_action(struct cam_sim *sim, union ccb *ccb); struct cfcs_softc cfcs_softc; /* * This is primarily intended to allow for error injection to test the CAM * sense data and sense residual handling code. This sets the maximum * amount of SCSI sense data that we will report to CAM. */ static int cfcs_max_sense = sizeof(struct scsi_sense_data); SYSCTL_NODE(_kern_cam, OID_AUTO, ctl2cam, CTLFLAG_RD, 0, "CAM Target Layer SIM frontend"); SYSCTL_INT(_kern_cam_ctl2cam, OID_AUTO, max_sense, CTLFLAG_RW, &cfcs_max_sense, 0, "Maximum sense data size"); static struct ctl_frontend cfcs_frontend = { .name = "camsim", .init = cfcs_init, .shutdown = cfcs_shutdown, }; CTL_FRONTEND_DECLARE(ctlcfcs, cfcs_frontend); static int cfcs_init(void) { struct cfcs_softc *softc; struct ctl_port *port; int retval; softc = &cfcs_softc; bzero(softc, sizeof(*softc)); mtx_init(&softc->lock, "ctl2cam", NULL, MTX_DEF); port = &softc->port; port->frontend = &cfcs_frontend; port->port_type = CTL_PORT_INTERNAL; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; snprintf(softc->port_name, sizeof(softc->port_name), "camsim"); port->port_name = softc->port_name; port->port_online = cfcs_online; port->port_offline = cfcs_offline; port->onoff_arg = softc; port->fe_datamove = cfcs_datamove; port->fe_done = cfcs_done; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with error %d!\n", __func__, retval); mtx_destroy(&softc->lock); return (retval); } /* * If the CTL frontend didn't tell us what our WWNN/WWPN is, go * ahead and set something random. */ if (port->wwnn == 0) { uint64_t random_bits; arc4rand(&random_bits, sizeof(random_bits), 0); softc->wwnn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000000000000000ULL | /* NL-Port */ 0x0300; softc->wwpn = softc->wwnn + port->targ_port + 1; ctl_port_set_wwns(port, true, softc->wwnn, true, softc->wwpn); } else { softc->wwnn = port->wwnn; softc->wwpn = port->wwpn; } mtx_lock(&softc->lock); softc->devq = cam_simq_alloc(port->num_requested_ctl_io); if (softc->devq == NULL) { printf("%s: error allocating devq\n", __func__); retval = ENOMEM; goto bailout; } softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name, softc, /*unit*/ 0, &softc->lock, 1, port->num_requested_ctl_io, softc->devq); if (softc->sim == NULL) { printf("%s: error allocating SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_bus_register(softc->sim, NULL, 0) != CAM_SUCCESS) { printf("%s: error registering SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_create_path(&softc->path, /*periph*/NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: error creating path\n", __func__); xpt_bus_deregister(cam_sim_path(softc->sim)); retval = EINVAL; goto bailout; } mtx_unlock(&softc->lock); return (retval); bailout: if (softc->sim) cam_sim_free(softc->sim, /*free_devq*/ TRUE); else if (softc->devq) cam_simq_free(softc->devq); mtx_unlock(&softc->lock); mtx_destroy(&softc->lock); return (retval); } static int cfcs_shutdown(void) { struct cfcs_softc *softc = &cfcs_softc; struct ctl_port *port = &softc->port; int error; ctl_port_offline(port); mtx_lock(&softc->lock); xpt_free_path(softc->path); xpt_bus_deregister(cam_sim_path(softc->sim)); cam_sim_free(softc->sim, /*free_devq*/ TRUE); mtx_unlock(&softc->lock); mtx_destroy(&softc->lock); if ((error = ctl_port_deregister(port)) != 0) printf("%s: cam_sim port deregistration failed\n", __func__); return (error); } static void cfcs_poll(struct cam_sim *sim) { } static void cfcs_onoffline(void *arg, int online) { struct cfcs_softc *softc; union ccb *ccb; softc = (struct cfcs_softc *)arg; mtx_lock(&softc->lock); softc->online = online; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("%s: unable to allocate CCB for rescan\n", __func__); goto bailout; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: can't allocate path for rescan\n", __func__); xpt_free_ccb(ccb); goto bailout; } xpt_rescan(ccb); bailout: mtx_unlock(&softc->lock); } static void cfcs_online(void *arg) { cfcs_onoffline(arg, /*online*/ 1); } static void cfcs_offline(void *arg) { cfcs_onoffline(arg, /*online*/ 0); } /* * This function is very similar to ctl_ioctl_do_datamove(). Is there a * way to combine the functionality? * * XXX KDM may need to move this into a thread. We're doing a bcopy in the * caller's context, which will usually be the backend. That may not be a * good thing. */ static void cfcs_datamove(union ctl_io *io) { union ccb *ccb; bus_dma_segment_t cam_sg_entry, *cam_sglist; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; int cam_sg_count, ctl_sg_count, cam_sg_start; int cam_sg_offset; int len_to_copy; int ctl_watermark, cam_watermark; int i, j; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; /* * Note that we have a check in cfcs_action() to make sure that any * CCBs with "bad" flags are returned with CAM_REQ_INVALID. This * is just to make sure no one removes that check without updating * this code to provide the additional functionality necessary to * support those modes of operation. */ KASSERT(((ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) == 0), ("invalid " "CAM flags %#x", (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS))); /* * Simplify things on both sides by putting single buffers into a * single entry S/G list. */ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { case CAM_DATA_SG: { int len_seen; cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; cam_sg_count = ccb->csio.sglist_cnt; cam_sg_start = cam_sg_count; cam_sg_offset = 0; for (i = 0, len_seen = 0; i < cam_sg_count; i++) { if ((len_seen + cam_sglist[i].ds_len) >= io->scsiio.kern_rel_offset) { cam_sg_start = i; cam_sg_offset = io->scsiio.kern_rel_offset - len_seen; break; } len_seen += cam_sglist[i].ds_len; } break; } case CAM_DATA_VADDR: cam_sglist = &cam_sg_entry; cam_sglist[0].ds_len = ccb->csio.dxfer_len; cam_sglist[0].ds_addr = (bus_addr_t)ccb->csio.data_ptr; cam_sg_count = 1; cam_sg_start = 0; cam_sg_offset = io->scsiio.kern_rel_offset; break; default: panic("Invalid CAM flags %#x", ccb->ccb_h.flags); } if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } ctl_watermark = 0; cam_watermark = cam_sg_offset; for (i = cam_sg_start, j = 0; i < cam_sg_count && j < ctl_sg_count;) { uint8_t *cam_ptr, *ctl_ptr; len_to_copy = MIN(cam_sglist[i].ds_len - cam_watermark, ctl_sglist[j].len - ctl_watermark); cam_ptr = (uint8_t *)cam_sglist[i].ds_addr; cam_ptr = cam_ptr + cam_watermark; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else ctl_ptr = (uint8_t *)ctl_sglist[j].addr; ctl_ptr = ctl_ptr + ctl_watermark; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", ctl_ptr, __func__, cam_ptr)); bcopy(ctl_ptr, cam_ptr, len_to_copy); } else { CTL_DEBUG_PRINT(("%s: copying %d bytes from CAM\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", cam_ptr, __func__, ctl_ptr)); bcopy(cam_ptr, ctl_ptr, len_to_copy); } io->scsiio.ext_data_filled += len_to_copy; io->scsiio.kern_data_resid -= len_to_copy; cam_watermark += len_to_copy; if (cam_sglist[i].ds_len == cam_watermark) { i++; cam_watermark = 0; } ctl_watermark += len_to_copy; if (ctl_sglist[j].len == ctl_watermark) { j++; ctl_watermark = 0; } } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL; io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; ccb->csio.resid = ccb->csio.dxfer_len - io->scsiio.ext_data_filled; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(ccb); } io->scsiio.be_move_done(io); } static void cfcs_done(union ctl_io *io) { union ccb *ccb; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; if (ccb == NULL) { ctl_free_io(io); return; } /* * At this point we should have status. If we don't, that's a bug. */ KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); /* * Translate CTL status to CAM status. */ if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->csio.resid = ccb->csio.dxfer_len - io->scsiio.ext_data_filled; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (io->io_hdr.status & CTL_STATUS_MASK) { case CTL_SUCCESS: ccb->ccb_h.status |= CAM_REQ_CMP; break; case CTL_SCSI_ERROR: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; ccb->csio.scsi_status = io->scsiio.scsi_status; bcopy(&io->scsiio.sense_data, &ccb->csio.sense_data, min(io->scsiio.sense_len, ccb->csio.sense_len)); if (ccb->csio.sense_len > io->scsiio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - io->scsiio.sense_len; else ccb->csio.sense_resid = 0; if ((ccb->csio.sense_len - ccb->csio.sense_resid) > cfcs_max_sense) { ccb->csio.sense_resid = ccb->csio.sense_len - cfcs_max_sense; } break; case CTL_CMD_ABORTED: ccb->ccb_h.status |= CAM_REQ_ABORTED; break; case CTL_ERROR: default: ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(ccb); ctl_free_io(io); } void cfcs_action(struct cam_sim *sim, union ccb *ccb) { struct cfcs_softc *softc; int err; softc = (struct cfcs_softc *)cam_sim_softc(sim); mtx_assert(&softc->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { union ctl_io *io; struct ccb_scsiio *csio; csio = &ccb->csio; /* * Catch CCB flags, like physical address flags, that * indicate situations we currently can't handle. */ if (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) { ccb->ccb_h.status = CAM_REQ_INVALID; printf("%s: bad CCB flags %#x (all flags %#x)\n", __func__, ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS, ccb->ccb_h.flags); xpt_done(ccb); return; } /* * If we aren't online, there are no devices to see. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { printf("%s: can't allocate ctl_io\n", __func__); ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down via the XPT_RESET_BUS/LUN CCBs below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); /* * This tag scheme isn't the best, since we could in theory * have a very long-lived I/O and tag collision, especially * in a high I/O environment. But it should work well * enough for now. Since we're using unsigned ints, * they'll just wrap around. */ io->scsiio.tag_num = softc->cur_tag_num++; csio->tag_id = io->scsiio.tag_num; switch (csio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, csio->tag_action); break; } if (csio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, csio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(csio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(scsiio_cdb_ptr(csio), io->scsiio.cdb, io->scsiio.cdb_len); ccb->ccb_h.status |= CAM_SIM_QUEUED; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s: func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } break; } case XPT_ABORT: { union ctl_io *io; union ccb *abort_ccb; abort_ccb = ccb->cab.abort_ccb; if (abort_ccb->ccb_h.func_code != XPT_SCSI_IO) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); } /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = abort_ccb->csio.tag_id; switch (abort_ccb->csio.tag_action) { case CAM_TAG_ACTION_NONE: io->taskio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->taskio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->taskio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->taskio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->taskio.tag_type = CTL_TAG_ACA; break; default: io->taskio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, abort_ccb->csio.tag_action); break; } err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_fc *fc; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_FC; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 800000; fc->wwnn = softc->wwnn; fc->wwpn = softc->wwpn; fc->port = softc->port.targ_port; fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: /* XXX KDM should we actually do something here? */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_BUS: case XPT_RESET_DEV: { union ctl_io *io; /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ if (ccb->ccb_h.func_code == XPT_RESET_DEV) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); if (ccb->ccb_h.func_code == XPT_RESET_BUS) io->taskio.task_action = CTL_TASK_BUS_RESET; else io->taskio.task_action = CTL_TASK_LUN_RESET; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi; cpi = &ccb->cpi; cpi->version_num = 0; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_EXTLUNS; cpi->hba_eng_cnt = 0; cpi->max_target = 1; cpi->max_lun = 1024; /* Do we really have a limit? */ cpi->maxio = 1024 * 1024; cpi->async_flags = 0; cpi->hpath_id = 0; cpi->initiator_id = 0; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = 0; cpi->bus_id = 0; cpi->base_transfer_speed = 800000; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; /* * Pretend to be Fibre Channel. */ cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->xport_specific.fc.wwnn = softc->wwnn; cpi->xport_specific.fc.wwpn = softc->wwpn; cpi->xport_specific.fc.port = softc->port.targ_port; cpi->xport_specific.fc.bitrate = 8 * 1000 * 1000; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_PROVIDE_FAIL; printf("%s: unsupported CCB type %#x\n", __func__, ccb->ccb_h.func_code); xpt_done(ccb); break; } } Index: head/sys/cam/ctl/ctl_frontend_iscsi.c =================================================================== --- head/sys/cam/ctl/ctl_frontend_iscsi.c (revision 326264) +++ head/sys/cam/ctl/ctl_frontend_iscsi.c (revision 326265) @@ -1,3001 +1,3003 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * CTL frontend for the iSCSI protocol. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ICL_KERNEL_PROXY #include #endif #ifdef ICL_KERNEL_PROXY FEATURE(cfiscsi_kernel_proxy, "iSCSI target built with ICL_KERNEL_PROXY"); #endif static MALLOC_DEFINE(M_CFISCSI, "cfiscsi", "Memory used for CTL iSCSI frontend"); static uma_zone_t cfiscsi_data_wait_zone; SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, iscsi, CTLFLAG_RD, 0, "CAM Target Layer iSCSI Frontend"); static int debug = 1; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN, &debug, 1, "Enable debug messages"); static int ping_timeout = 5; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, &ping_timeout, 5, "Interval between ping (NOP-Out) requests, in seconds"); static int login_timeout = 60; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, &login_timeout, 60, "Time to wait for ctld(8) to finish Login Phase, in seconds"); static int maxtags = 256; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxtags, CTLFLAG_RWTUN, &maxtags, 0, "Max number of requests queued by initiator"); #define CFISCSI_DEBUG(X, ...) \ do { \ if (debug > 1) { \ printf("%s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_WARN(X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_DEBUG(S, X, ...) \ do { \ if (debug > 1) { \ printf("%s: %s (%s): " X "\n", \ __func__, S->cs_initiator_addr, \ S->cs_initiator_name, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_WARN(S, X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s (%s): " X "\n", \ S->cs_initiator_addr, \ S->cs_initiator_name, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_LOCK(X) mtx_lock(&X->cs_lock) #define CFISCSI_SESSION_UNLOCK(X) mtx_unlock(&X->cs_lock) #define CFISCSI_SESSION_LOCK_ASSERT(X) mtx_assert(&X->cs_lock, MA_OWNED) #define CONN_SESSION(X) ((struct cfiscsi_session *)(X)->ic_prv0) #define PDU_SESSION(X) CONN_SESSION((X)->ip_conn) #define PDU_EXPDATASN(X) (X)->ip_prv0 #define PDU_TOTAL_TRANSFER_LEN(X) (X)->ip_prv1 #define PDU_R2TSN(X) (X)->ip_prv2 static int cfiscsi_init(void); static int cfiscsi_shutdown(void); static void cfiscsi_online(void *arg); static void cfiscsi_offline(void *arg); static int cfiscsi_info(void *arg, struct sbuf *sb); static int cfiscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static void cfiscsi_datamove(union ctl_io *io); static void cfiscsi_datamove_in(union ctl_io *io); static void cfiscsi_datamove_out(union ctl_io *io); static void cfiscsi_done(union ctl_io *io); static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request); static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request); static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request); static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request); static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request); static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request); static void cfiscsi_session_terminate(struct cfiscsi_session *cs); static struct cfiscsi_data_wait *cfiscsi_data_wait_new( struct cfiscsi_session *cs, union ctl_io *io, uint32_t initiator_task_tag, uint32_t *target_transfer_tagp); static void cfiscsi_data_wait_free(struct cfiscsi_session *cs, struct cfiscsi_data_wait *cdw); static struct cfiscsi_target *cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name, uint16_t tag); static struct cfiscsi_target *cfiscsi_target_find_or_create( struct cfiscsi_softc *softc, const char *name, const char *alias, uint16_t tag); static void cfiscsi_target_release(struct cfiscsi_target *ct); static void cfiscsi_session_delete(struct cfiscsi_session *cs); static struct cfiscsi_softc cfiscsi_softc; static struct ctl_frontend cfiscsi_frontend = { .name = "iscsi", .init = cfiscsi_init, .ioctl = cfiscsi_ioctl, .shutdown = cfiscsi_shutdown, }; CTL_FRONTEND_DECLARE(cfiscsi, cfiscsi_frontend); MODULE_DEPEND(cfiscsi, icl, 1, 1, 1); static struct icl_pdu * cfiscsi_pdu_new_response(struct icl_pdu *request, int flags) { return (icl_pdu_new(request->ip_conn, flags)); } static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request) { const struct iscsi_bhs_scsi_command *bhssc; struct cfiscsi_session *cs; uint32_t cmdsn, expstatsn; cs = PDU_SESSION(request); /* * Every incoming PDU - not just NOP-Out - resets the ping timer. * The purpose of the timeout is to reset the connection when it stalls; * we don't want this to happen when NOP-In or NOP-Out ends up delayed * in some queue. * * XXX: Locking? */ cs->cs_timeout = 0; /* * Data-Out PDUs don't contain CmdSN. */ if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) return (false); /* * We're only using fields common for all the request * (initiator -> target) PDUs. */ bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; cmdsn = ntohl(bhssc->bhssc_cmdsn); expstatsn = ntohl(bhssc->bhssc_expstatsn); CFISCSI_SESSION_LOCK(cs); #if 0 if (expstatsn != cs->cs_statsn) { CFISCSI_SESSION_DEBUG(cs, "received PDU with ExpStatSN %d, " "while current StatSN is %d", expstatsn, cs->cs_statsn); } #endif if ((request->ip_bhs->bhs_opcode & ISCSI_BHS_OPCODE_IMMEDIATE) == 0) { /* * The target MUST silently ignore any non-immediate command * outside of this range. */ if (ISCSI_SNLT(cmdsn, cs->cs_cmdsn) || ISCSI_SNGT(cmdsn, cs->cs_cmdsn - 1 + maxtags)) { CFISCSI_SESSION_UNLOCK(cs); CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, " "while expected %u", cmdsn, cs->cs_cmdsn); return (true); } /* * We don't support multiple connections now, so any * discontinuity in CmdSN means lost PDUs. Since we don't * support PDU retransmission -- terminate the connection. */ if (cmdsn != cs->cs_cmdsn) { CFISCSI_SESSION_UNLOCK(cs); CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, " "while expected %u; dropping connection", cmdsn, cs->cs_cmdsn); cfiscsi_session_terminate(cs); return (true); } cs->cs_cmdsn++; } CFISCSI_SESSION_UNLOCK(cs); return (false); } static void cfiscsi_pdu_handle(struct icl_pdu *request) { struct cfiscsi_session *cs; bool ignore; cs = PDU_SESSION(request); ignore = cfiscsi_pdu_update_cmdsn(request); if (ignore) { icl_pdu_free(request); return; } /* * Handle the PDU; this includes e.g. receiving the remaining * part of PDU and submitting the SCSI command to CTL * or queueing a reply. The handling routine is responsible * for freeing the PDU when it's no longer needed. */ switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) { case ISCSI_BHS_OPCODE_NOP_OUT: cfiscsi_pdu_handle_nop_out(request); break; case ISCSI_BHS_OPCODE_SCSI_COMMAND: cfiscsi_pdu_handle_scsi_command(request); break; case ISCSI_BHS_OPCODE_TASK_REQUEST: cfiscsi_pdu_handle_task_request(request); break; case ISCSI_BHS_OPCODE_SCSI_DATA_OUT: cfiscsi_pdu_handle_data_out(request); break; case ISCSI_BHS_OPCODE_LOGOUT_REQUEST: cfiscsi_pdu_handle_logout_request(request); break; default: CFISCSI_SESSION_WARN(cs, "received PDU with unsupported " "opcode 0x%x; dropping connection", request->ip_bhs->bhs_opcode); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static void cfiscsi_receive_callback(struct icl_pdu *request) { #ifdef ICL_KERNEL_PROXY struct cfiscsi_session *cs; cs = PDU_SESSION(request); if (cs->cs_waiting_for_ctld || cs->cs_login_phase) { if (cs->cs_login_pdu == NULL) cs->cs_login_pdu = request; else icl_pdu_free(request); cv_signal(&cs->cs_login_cv); return; } #endif cfiscsi_pdu_handle(request); } static void cfiscsi_error_callback(struct icl_conn *ic) { struct cfiscsi_session *cs; cs = CONN_SESSION(ic); CFISCSI_SESSION_WARN(cs, "connection error; dropping connection"); cfiscsi_session_terminate(cs); } static int cfiscsi_pdu_prepare(struct icl_pdu *response) { struct cfiscsi_session *cs; struct iscsi_bhs_scsi_response *bhssr; bool advance_statsn = true; cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK_ASSERT(cs); /* * We're only using fields common for all the response * (target -> initiator) PDUs. */ bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; /* * 10.8.3: "The StatSN for this connection is not advanced * after this PDU is sent." */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_R2T) advance_statsn = false; /* * 10.19.2: "However, when the Initiator Task Tag is set to 0xffffffff, * StatSN for the connection is not advanced after this PDU is sent." */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_NOP_IN && bhssr->bhssr_initiator_task_tag == 0xffffffff) advance_statsn = false; /* * See the comment below - StatSN is not meaningful and must * not be advanced. */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_SCSI_DATA_IN && (bhssr->bhssr_flags & BHSDI_FLAGS_S) == 0) advance_statsn = false; /* * 10.7.3: "The fields StatSN, Status, and Residual Count * only have meaningful content if the S bit is set to 1." */ if (bhssr->bhssr_opcode != ISCSI_BHS_OPCODE_SCSI_DATA_IN || (bhssr->bhssr_flags & BHSDI_FLAGS_S)) bhssr->bhssr_statsn = htonl(cs->cs_statsn); bhssr->bhssr_expcmdsn = htonl(cs->cs_cmdsn); bhssr->bhssr_maxcmdsn = htonl(cs->cs_cmdsn - 1 + imax(0, maxtags - cs->cs_outstanding_ctl_pdus)); if (advance_statsn) cs->cs_statsn++; return (0); } static void cfiscsi_pdu_queue(struct icl_pdu *response) { struct cfiscsi_session *cs; cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK(cs); cfiscsi_pdu_prepare(response); icl_pdu_queue(response); CFISCSI_SESSION_UNLOCK(cs); } static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request) { struct cfiscsi_session *cs; struct iscsi_bhs_nop_out *bhsno; struct iscsi_bhs_nop_in *bhsni; struct icl_pdu *response; void *data = NULL; size_t datasize; int error; cs = PDU_SESSION(request); bhsno = (struct iscsi_bhs_nop_out *)request->ip_bhs; if (bhsno->bhsno_initiator_task_tag == 0xffffffff) { /* * Nothing to do, iscsi_pdu_update_statsn() already * zeroed the timeout. */ icl_pdu_free(request); return; } datasize = icl_pdu_data_segment_length(request); if (datasize > 0) { data = malloc(datasize, M_CFISCSI, M_NOWAIT | M_ZERO); if (data == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } icl_pdu_get_data(request, 0, data, datasize); } response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "droppping connection"); free(data, M_CFISCSI); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhsni = (struct iscsi_bhs_nop_in *)response->ip_bhs; bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN; bhsni->bhsni_flags = 0x80; bhsni->bhsni_initiator_task_tag = bhsno->bhsno_initiator_task_tag; bhsni->bhsni_target_transfer_tag = 0xffffffff; if (datasize > 0) { error = icl_pdu_append_data(response, data, datasize, M_NOWAIT); if (error != 0) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); free(data, M_CFISCSI); icl_pdu_free(request); icl_pdu_free(response); cfiscsi_session_terminate(cs); return; } free(data, M_CFISCSI); } icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request) { struct iscsi_bhs_scsi_command *bhssc; struct cfiscsi_session *cs; union ctl_io *io; int error; cs = PDU_SESSION(request); bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; //CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x", // bhssc->bhssc_initiator_task_tag); if (request->ip_data_len > 0 && cs->cs_immediate_data == false) { CFISCSI_SESSION_WARN(cs, "unsolicited data with " "ImmediateData=No; dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun(be64toh(bhssc->bhssc_lun)); io->scsiio.tag_num = bhssc->bhssc_initiator_task_tag; switch ((bhssc->bhssc_flags & BHSSC_FLAGS_ATTR)) { case BHSSC_FLAGS_ATTR_UNTAGGED: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case BHSSC_FLAGS_ATTR_SIMPLE: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case BHSSC_FLAGS_ATTR_ORDERED: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case BHSSC_FLAGS_ATTR_HOQ: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case BHSSC_FLAGS_ATTR_ACA: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; CFISCSI_SESSION_WARN(cs, "unhandled tag type %d", bhssc->bhssc_flags & BHSSC_FLAGS_ATTR); break; } io->scsiio.cdb_len = sizeof(bhssc->bhssc_cdb); /* Which is 16. */ memcpy(io->scsiio.cdb, bhssc->bhssc_cdb, sizeof(bhssc->bhssc_cdb)); refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d; " "dropping connection", error); ctl_free_io(io); refcount_release(&cs->cs_outstanding_ctl_pdus); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request) { struct iscsi_bhs_task_management_request *bhstmr; struct iscsi_bhs_task_management_response *bhstmr2; struct icl_pdu *response; struct cfiscsi_session *cs; union ctl_io *io; int error; cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun(be64toh(bhstmr->bhstmr_lun)); io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */ switch (bhstmr->bhstmr_function & ~0x80) { case BHSTMR_FUNCTION_ABORT_TASK: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK"); #endif io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag; break; case BHSTMR_FUNCTION_ABORT_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case BHSTMR_FUNCTION_CLEAR_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_CLEAR_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case BHSTMR_FUNCTION_LOGICAL_UNIT_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_LOGICAL_UNIT_RESET"); #endif io->taskio.task_action = CTL_TASK_LUN_RESET; break; case BHSTMR_FUNCTION_TARGET_WARM_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_WARM_RESET"); #endif io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case BHSTMR_FUNCTION_TARGET_COLD_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_COLD_RESET"); #endif io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case BHSTMR_FUNCTION_QUERY_TASK: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK"); #endif io->taskio.task_action = CTL_TASK_QUERY_TASK; io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag; break; case BHSTMR_FUNCTION_QUERY_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; break; case BHSTMR_FUNCTION_I_T_NEXUS_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_I_T_NEXUS_RESET"); #endif io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; break; case BHSTMR_FUNCTION_QUERY_ASYNC_EVENT: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_ASYNC_EVENT"); #endif io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT; break; default: CFISCSI_SESSION_DEBUG(cs, "unsupported function 0x%x", bhstmr->bhstmr_function & ~0x80); ctl_free_io(io); response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhstmr2 = (struct iscsi_bhs_task_management_response *) response->ip_bhs; bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE; bhstmr2->bhstmr_flags = 0x80; bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED; bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); return; } refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d; " "dropping connection", error); ctl_free_io(io); refcount_release(&cs->cs_outstanding_ctl_pdus); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static bool cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *cdw) { struct iscsi_bhs_data_out *bhsdo; struct cfiscsi_session *cs; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; size_t copy_len, len, off, buffer_offset; int ctl_sg_count; union ctl_io *io; cs = PDU_SESSION(request); KASSERT((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT || (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bad opcode 0x%x", request->ip_bhs->bhs_opcode)); /* * We're only using fields common for Data-Out and SCSI Command PDUs. */ bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; io = cdw->cdw_ctl_io; KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN, ("CTL_FLAG_DATA_IN")); #if 0 CFISCSI_SESSION_DEBUG(cs, "received %zd bytes out of %d", request->ip_data_len, io->scsiio.kern_total_len); #endif if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) buffer_offset = ntohl(bhsdo->bhsdo_buffer_offset); else buffer_offset = 0; len = icl_pdu_data_segment_length(request); /* * Make sure the offset, as sent by the initiator, matches the offset * we're supposed to be at in the scatter-gather list. */ if (buffer_offset > io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled || buffer_offset + len <= io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled) { CFISCSI_SESSION_WARN(cs, "received bad buffer offset %zd, " "expected %zd; dropping connection", buffer_offset, (size_t)io->scsiio.kern_rel_offset + (size_t)io->scsiio.ext_data_filled); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } /* * This is the offset within the PDU data segment, as opposed * to buffer_offset, which is the offset within the task (SCSI * command). */ off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled - buffer_offset; /* * Iterate over the scatter/gather segments, filling them with data * from the PDU data segment. Note that this can get called multiple * times for one SCSI command; the cdw structure holds state for the * scatter/gather list. */ for (;;) { KASSERT(cdw->cdw_sg_index < ctl_sg_count, ("cdw->cdw_sg_index >= ctl_sg_count")); if (cdw->cdw_sg_len == 0) { cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; } KASSERT(off <= len, ("len > off")); copy_len = len - off; if (copy_len > cdw->cdw_sg_len) copy_len = cdw->cdw_sg_len; icl_pdu_get_data(request, off, cdw->cdw_sg_addr, copy_len); cdw->cdw_sg_addr += copy_len; cdw->cdw_sg_len -= copy_len; off += copy_len; io->scsiio.ext_data_filled += copy_len; io->scsiio.kern_data_resid -= copy_len; if (cdw->cdw_sg_len == 0) { /* * End of current segment. */ if (cdw->cdw_sg_index == ctl_sg_count - 1) { /* * Last segment in scatter/gather list. */ break; } cdw->cdw_sg_index++; } if (off == len) { /* * End of PDU payload. */ break; } } if (len > off) { /* * In case of unsolicited data, it's possible that the buffer * provided by CTL is smaller than negotiated FirstBurstLength. * Just ignore the superfluous data; will ask for them with R2T * on next call to cfiscsi_datamove(). * * This obviously can only happen with SCSI Command PDU. */ if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND) return (true); CFISCSI_SESSION_WARN(cs, "received too much data: got %zd bytes, " "expected %zd; dropping connection", icl_pdu_data_segment_length(request), off); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end && (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) == 0) { CFISCSI_SESSION_WARN(cs, "got the final packet without " "the F flag; flags = 0x%x; dropping connection", bhsdo->bhsdo_flags); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } if (io->scsiio.ext_data_filled != cdw->cdw_r2t_end && (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) != 0) { if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) { CFISCSI_SESSION_WARN(cs, "got the final packet, but the " "transmitted size was %zd bytes instead of %d; " "dropping connection", (size_t)io->scsiio.ext_data_filled, cdw->cdw_r2t_end); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } else { /* * For SCSI Command PDU, this just means we need to * solicit more data by sending R2T. */ return (false); } } if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end) { #if 0 CFISCSI_SESSION_DEBUG(cs, "no longer expecting Data-Out with target " "transfer tag 0x%x", cdw->cdw_target_transfer_tag); #endif return (true); } return (false); } static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request) { struct iscsi_bhs_data_out *bhsdo; struct cfiscsi_session *cs; struct cfiscsi_data_wait *cdw = NULL; union ctl_io *io; bool done; cs = PDU_SESSION(request); bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) { #if 0 CFISCSI_SESSION_DEBUG(cs, "have ttt 0x%x, itt 0x%x; looking for " "ttt 0x%x, itt 0x%x", bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag, cdw->cdw_target_transfer_tag, cdw->cdw_initiator_task_tag)); #endif if (bhsdo->bhsdo_target_transfer_tag == cdw->cdw_target_transfer_tag) break; } CFISCSI_SESSION_UNLOCK(cs); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "data transfer tag 0x%x, initiator task tag " "0x%x, not found; dropping connection", bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } if (cdw->cdw_datasn != ntohl(bhsdo->bhsdo_datasn)) { CFISCSI_SESSION_WARN(cs, "received Data-Out PDU with " "DataSN %u, while expected %u; dropping connection", ntohl(bhsdo->bhsdo_datasn), cdw->cdw_datasn); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } cdw->cdw_datasn++; io = cdw->cdw_ctl_io; KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN, ("CTL_FLAG_DATA_IN")); done = cfiscsi_handle_data_segment(request, cdw); if (done) { CFISCSI_SESSION_LOCK(cs); TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); done = (io->scsiio.ext_data_filled != cdw->cdw_r2t_end || io->scsiio.ext_data_filled == io->scsiio.kern_data_len); cfiscsi_data_wait_free(cs, cdw); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; if (done) io->scsiio.be_move_done(io); else cfiscsi_datamove_out(io); } icl_pdu_free(request); } static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request) { struct iscsi_bhs_logout_request *bhslr; struct iscsi_bhs_logout_response *bhslr2; struct icl_pdu *response; struct cfiscsi_session *cs; cs = PDU_SESSION(request); bhslr = (struct iscsi_bhs_logout_request *)request->ip_bhs; switch (bhslr->bhslr_reason & 0x7f) { case BHSLR_REASON_CLOSE_SESSION: case BHSLR_REASON_CLOSE_CONNECTION: response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_DEBUG(cs, "failed to allocate memory"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs; bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE; bhslr2->bhslr_flags = 0x80; bhslr2->bhslr_response = BHSLR_RESPONSE_CLOSED_SUCCESSFULLY; bhslr2->bhslr_initiator_task_tag = bhslr->bhslr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); cfiscsi_session_terminate(cs); break; case BHSLR_REASON_REMOVE_FOR_RECOVERY: response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs; bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE; bhslr2->bhslr_flags = 0x80; bhslr2->bhslr_response = BHSLR_RESPONSE_RECOVERY_NOT_SUPPORTED; bhslr2->bhslr_initiator_task_tag = bhslr->bhslr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); break; default: CFISCSI_SESSION_WARN(cs, "invalid reason 0%x; dropping connection", bhslr->bhslr_reason); icl_pdu_free(request); cfiscsi_session_terminate(cs); break; } } static void cfiscsi_callout(void *context) { struct icl_pdu *cp; struct iscsi_bhs_nop_in *bhsni; struct cfiscsi_session *cs; cs = context; if (cs->cs_terminating) return; callout_schedule(&cs->cs_callout, 1 * hz); atomic_add_int(&cs->cs_timeout, 1); #ifdef ICL_KERNEL_PROXY if (cs->cs_waiting_for_ctld || cs->cs_login_phase) { if (login_timeout > 0 && cs->cs_timeout > login_timeout) { CFISCSI_SESSION_WARN(cs, "login timed out after " "%d seconds; dropping connection", cs->cs_timeout); cfiscsi_session_terminate(cs); } return; } #endif if (ping_timeout <= 0) { /* * Pings are disabled. Don't send NOP-In in this case; * user might have disabled pings to work around problems * with certain initiators that can't properly handle * NOP-In, such as iPXE. Reset the timeout, to avoid * triggering reconnection, should the user decide to * reenable them. */ cs->cs_timeout = 0; return; } if (cs->cs_timeout >= ping_timeout) { CFISCSI_SESSION_WARN(cs, "no ping reply (NOP-Out) after %d seconds; " "dropping connection", ping_timeout); cfiscsi_session_terminate(cs); return; } /* * If the ping was reset less than one second ago - which means * that we've received some PDU during the last second - assume * the traffic flows correctly and don't bother sending a NOP-Out. * * (It's 2 - one for one second, and one for incrementing is_timeout * earlier in this routine.) */ if (cs->cs_timeout < 2) return; cp = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (cp == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory"); return; } bhsni = (struct iscsi_bhs_nop_in *)cp->ip_bhs; bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN; bhsni->bhsni_flags = 0x80; bhsni->bhsni_initiator_task_tag = 0xffffffff; cfiscsi_pdu_queue(cp); } static struct cfiscsi_data_wait * cfiscsi_data_wait_new(struct cfiscsi_session *cs, union ctl_io *io, uint32_t initiator_task_tag, uint32_t *target_transfer_tagp) { struct cfiscsi_data_wait *cdw; int error; cdw = uma_zalloc(cfiscsi_data_wait_zone, M_NOWAIT | M_ZERO); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate %zd bytes", sizeof(*cdw)); return (NULL); } error = icl_conn_transfer_setup(cs->cs_conn, io, target_transfer_tagp, &cdw->cdw_icl_prv); if (error != 0) { CFISCSI_SESSION_WARN(cs, "icl_conn_transfer_setup() failed with error %d", error); uma_zfree(cfiscsi_data_wait_zone, cdw); return (NULL); } cdw->cdw_ctl_io = io; cdw->cdw_target_transfer_tag = *target_transfer_tagp; cdw->cdw_initiator_task_tag = initiator_task_tag; return (cdw); } static void cfiscsi_data_wait_free(struct cfiscsi_session *cs, struct cfiscsi_data_wait *cdw) { icl_conn_transfer_done(cs->cs_conn, cdw->cdw_icl_prv); uma_zfree(cfiscsi_data_wait_zone, cdw); } static void cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs) { struct cfiscsi_data_wait *cdw; union ctl_io *io; int error, last, wait; if (cs->cs_target == NULL) return; /* No target yet, so nothing to do. */ io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = cs; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = 0; io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */ io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; wait = cs->cs_outstanding_ctl_pdus; refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d", error); refcount_release(&cs->cs_outstanding_ctl_pdus); ctl_free_io(io); } CFISCSI_SESSION_LOCK(cs); while ((cdw = TAILQ_FIRST(&cs->cs_waiting_for_data_out)) != NULL) { TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); /* * Set nonzero port status; this prevents backends from * assuming that the data transfer actually succeeded * and writing uninitialized data to disk. */ cdw->cdw_ctl_io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 42; cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io); cfiscsi_data_wait_free(cs, cdw); CFISCSI_SESSION_LOCK(cs); } CFISCSI_SESSION_UNLOCK(cs); /* * Wait for CTL to terminate all the tasks. */ if (wait > 0) CFISCSI_SESSION_WARN(cs, "waiting for CTL to terminate %d tasks", wait); for (;;) { refcount_acquire(&cs->cs_outstanding_ctl_pdus); last = refcount_release(&cs->cs_outstanding_ctl_pdus); if (last != 0) break; tsleep(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus), 0, "cfiscsi_terminate", hz / 100); } if (wait > 0) CFISCSI_SESSION_WARN(cs, "tasks terminated"); } static void cfiscsi_maintenance_thread(void *arg) { struct cfiscsi_session *cs; cs = arg; for (;;) { CFISCSI_SESSION_LOCK(cs); if (cs->cs_terminating == false) cv_wait(&cs->cs_maintenance_cv, &cs->cs_lock); CFISCSI_SESSION_UNLOCK(cs); if (cs->cs_terminating) { /* * We used to wait up to 30 seconds to deliver queued * PDUs to the initiator. We also tried hard to deliver * SCSI Responses for the aborted PDUs. We don't do * that anymore. We might need to revisit that. */ callout_drain(&cs->cs_callout); icl_conn_close(cs->cs_conn); /* * At this point ICL receive thread is no longer * running; no new tasks can be queued. */ cfiscsi_session_terminate_tasks(cs); cfiscsi_session_delete(cs); kthread_exit(); return; } CFISCSI_SESSION_DEBUG(cs, "nothing to do"); } } static void cfiscsi_session_terminate(struct cfiscsi_session *cs) { if (cs->cs_terminating) return; cs->cs_terminating = true; cv_signal(&cs->cs_maintenance_cv); #ifdef ICL_KERNEL_PROXY cv_signal(&cs->cs_login_cv); #endif } static int cfiscsi_session_register_initiator(struct cfiscsi_session *cs) { struct cfiscsi_target *ct; char *name; int i; KASSERT(cs->cs_ctl_initid == -1, ("already registered")); ct = cs->cs_target; name = strdup(cs->cs_initiator_id, M_CTL); i = ctl_add_initiator(&ct->ct_port, -1, 0, name); if (i < 0) { CFISCSI_SESSION_WARN(cs, "ctl_add_initiator failed with error %d", i); cs->cs_ctl_initid = -1; return (1); } cs->cs_ctl_initid = i; #if 0 CFISCSI_SESSION_DEBUG(cs, "added initiator id %d", i); #endif return (0); } static void cfiscsi_session_unregister_initiator(struct cfiscsi_session *cs) { int error; if (cs->cs_ctl_initid == -1) return; error = ctl_remove_initiator(&cs->cs_target->ct_port, cs->cs_ctl_initid); if (error != 0) { CFISCSI_SESSION_WARN(cs, "ctl_remove_initiator failed with error %d", error); } cs->cs_ctl_initid = -1; } static struct cfiscsi_session * cfiscsi_session_new(struct cfiscsi_softc *softc, const char *offload) { struct cfiscsi_session *cs; int error; cs = malloc(sizeof(*cs), M_CFISCSI, M_NOWAIT | M_ZERO); if (cs == NULL) { CFISCSI_WARN("malloc failed"); return (NULL); } cs->cs_ctl_initid = -1; refcount_init(&cs->cs_outstanding_ctl_pdus, 0); TAILQ_INIT(&cs->cs_waiting_for_data_out); mtx_init(&cs->cs_lock, "cfiscsi_lock", NULL, MTX_DEF); cv_init(&cs->cs_maintenance_cv, "cfiscsi_mt"); #ifdef ICL_KERNEL_PROXY cv_init(&cs->cs_login_cv, "cfiscsi_login"); #endif cs->cs_conn = icl_new_conn(offload, false, "cfiscsi", &cs->cs_lock); if (cs->cs_conn == NULL) { free(cs, M_CFISCSI); return (NULL); } cs->cs_conn->ic_receive = cfiscsi_receive_callback; cs->cs_conn->ic_error = cfiscsi_error_callback; cs->cs_conn->ic_prv0 = cs; error = kthread_add(cfiscsi_maintenance_thread, cs, NULL, NULL, 0, 0, "cfiscsimt"); if (error != 0) { CFISCSI_SESSION_WARN(cs, "kthread_add(9) failed with error %d", error); free(cs, M_CFISCSI); return (NULL); } mtx_lock(&softc->lock); cs->cs_id = ++softc->last_session_id; TAILQ_INSERT_TAIL(&softc->sessions, cs, cs_next); mtx_unlock(&softc->lock); /* * Start pinging the initiator. */ callout_init(&cs->cs_callout, 1); callout_reset(&cs->cs_callout, 1 * hz, cfiscsi_callout, cs); return (cs); } static void cfiscsi_session_delete(struct cfiscsi_session *cs) { struct cfiscsi_softc *softc; softc = &cfiscsi_softc; KASSERT(cs->cs_outstanding_ctl_pdus == 0, ("destroying session with outstanding CTL pdus")); KASSERT(TAILQ_EMPTY(&cs->cs_waiting_for_data_out), ("destroying session with non-empty queue")); mtx_lock(&softc->lock); TAILQ_REMOVE(&softc->sessions, cs, cs_next); mtx_unlock(&softc->lock); cfiscsi_session_unregister_initiator(cs); if (cs->cs_target != NULL) cfiscsi_target_release(cs->cs_target); icl_conn_close(cs->cs_conn); icl_conn_free(cs->cs_conn); free(cs, M_CFISCSI); cv_signal(&softc->sessions_cv); } static int cfiscsi_init(void) { struct cfiscsi_softc *softc; softc = &cfiscsi_softc; bzero(softc, sizeof(*softc)); mtx_init(&softc->lock, "cfiscsi", NULL, MTX_DEF); cv_init(&softc->sessions_cv, "cfiscsi_sessions"); #ifdef ICL_KERNEL_PROXY cv_init(&softc->accept_cv, "cfiscsi_accept"); #endif TAILQ_INIT(&softc->sessions); TAILQ_INIT(&softc->targets); cfiscsi_data_wait_zone = uma_zcreate("cfiscsi_data_wait", sizeof(struct cfiscsi_data_wait), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); return (0); } static int cfiscsi_shutdown(void) { struct cfiscsi_softc *softc = &cfiscsi_softc; if (!TAILQ_EMPTY(&softc->sessions) || !TAILQ_EMPTY(&softc->targets)) return (EBUSY); uma_zdestroy(cfiscsi_data_wait_zone); #ifdef ICL_KERNEL_PROXY cv_destroy(&softc->accept_cv); #endif cv_destroy(&softc->sessions_cv); mtx_destroy(&softc->lock); return (0); } #ifdef ICL_KERNEL_PROXY static void cfiscsi_accept(struct socket *so, struct sockaddr *sa, int portal_id) { struct cfiscsi_session *cs; cs = cfiscsi_session_new(&cfiscsi_softc, NULL); if (cs == NULL) { CFISCSI_WARN("failed to create session"); return; } icl_conn_handoff_sock(cs->cs_conn, so); cs->cs_initiator_sa = sa; cs->cs_portal_id = portal_id; cs->cs_waiting_for_ctld = true; cv_signal(&cfiscsi_softc.accept_cv); } #endif static void cfiscsi_online(void *arg) { struct cfiscsi_softc *softc; struct cfiscsi_target *ct; int online; ct = (struct cfiscsi_target *)arg; softc = ct->ct_softc; mtx_lock(&softc->lock); if (ct->ct_online) { mtx_unlock(&softc->lock); return; } ct->ct_online = 1; online = softc->online++; mtx_unlock(&softc->lock); if (online > 0) return; #ifdef ICL_KERNEL_PROXY if (softc->listener != NULL) icl_listen_free(softc->listener); softc->listener = icl_listen_new(cfiscsi_accept); #endif } static void cfiscsi_offline(void *arg) { struct cfiscsi_softc *softc; struct cfiscsi_target *ct; struct cfiscsi_session *cs; int online; ct = (struct cfiscsi_target *)arg; softc = ct->ct_softc; mtx_lock(&softc->lock); if (!ct->ct_online) { mtx_unlock(&softc->lock); return; } ct->ct_online = 0; online = --softc->online; TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == ct) cfiscsi_session_terminate(cs); } do { TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == ct) break; } if (cs != NULL) cv_wait(&softc->sessions_cv, &softc->lock); } while (cs != NULL && ct->ct_online == 0); mtx_unlock(&softc->lock); if (online > 0) return; #ifdef ICL_KERNEL_PROXY icl_listen_free(softc->listener); softc->listener = NULL; #endif } static int cfiscsi_info(void *arg, struct sbuf *sb) { struct cfiscsi_target *ct = (struct cfiscsi_target *)arg; int retval; retval = sbuf_printf(sb, "\t%d\n", ct->ct_state); return (retval); } static void cfiscsi_ioctl_handoff(struct ctl_iscsi *ci) { struct cfiscsi_softc *softc; struct cfiscsi_session *cs, *cs2; struct cfiscsi_target *ct; struct ctl_iscsi_handoff_params *cihp; int error; cihp = (struct ctl_iscsi_handoff_params *)&(ci->data); softc = &cfiscsi_softc; CFISCSI_DEBUG("new connection from %s (%s) to %s", cihp->initiator_name, cihp->initiator_addr, cihp->target_name); ct = cfiscsi_target_find(softc, cihp->target_name, cihp->portal_group_tag); if (ct == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: target not found", __func__); return; } #ifdef ICL_KERNEL_PROXY if (cihp->socket > 0 && cihp->connection_id > 0) { snprintf(ci->error_str, sizeof(ci->error_str), "both socket and connection_id set"); ci->status = CTL_ISCSI_ERROR; cfiscsi_target_release(ct); return; } if (cihp->socket == 0) { mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cihp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; cfiscsi_target_release(ct); return; } mtx_unlock(&cfiscsi_softc.lock); } else { #endif cs = cfiscsi_session_new(softc, cihp->offload); if (cs == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: cfiscsi_session_new failed", __func__); cfiscsi_target_release(ct); return; } #ifdef ICL_KERNEL_PROXY } #endif /* * First PDU of Full Feature phase has the same CmdSN as the last * PDU from the Login Phase received from the initiator. Thus, * the -1 below. */ cs->cs_cmdsn = cihp->cmdsn; cs->cs_statsn = cihp->statsn; cs->cs_max_recv_data_segment_length = cihp->max_recv_data_segment_length; cs->cs_max_send_data_segment_length = cihp->max_send_data_segment_length; cs->cs_max_burst_length = cihp->max_burst_length; cs->cs_first_burst_length = cihp->first_burst_length; cs->cs_immediate_data = !!cihp->immediate_data; if (cihp->header_digest == CTL_ISCSI_DIGEST_CRC32C) cs->cs_conn->ic_header_crc32c = true; if (cihp->data_digest == CTL_ISCSI_DIGEST_CRC32C) cs->cs_conn->ic_data_crc32c = true; strlcpy(cs->cs_initiator_name, cihp->initiator_name, sizeof(cs->cs_initiator_name)); strlcpy(cs->cs_initiator_addr, cihp->initiator_addr, sizeof(cs->cs_initiator_addr)); strlcpy(cs->cs_initiator_alias, cihp->initiator_alias, sizeof(cs->cs_initiator_alias)); memcpy(cs->cs_initiator_isid, cihp->initiator_isid, sizeof(cs->cs_initiator_isid)); snprintf(cs->cs_initiator_id, sizeof(cs->cs_initiator_id), "%s,i,0x%02x%02x%02x%02x%02x%02x", cs->cs_initiator_name, cihp->initiator_isid[0], cihp->initiator_isid[1], cihp->initiator_isid[2], cihp->initiator_isid[3], cihp->initiator_isid[4], cihp->initiator_isid[5]); mtx_lock(&softc->lock); if (ct->ct_online == 0) { mtx_unlock(&softc->lock); cfiscsi_session_terminate(cs); cfiscsi_target_release(ct); ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: port offline", __func__); return; } cs->cs_target = ct; mtx_unlock(&softc->lock); refcount_acquire(&cs->cs_outstanding_ctl_pdus); restart: if (!cs->cs_terminating) { mtx_lock(&softc->lock); TAILQ_FOREACH(cs2, &softc->sessions, cs_next) { if (cs2 != cs && cs2->cs_tasks_aborted == false && cs->cs_target == cs2->cs_target && strcmp(cs->cs_initiator_id, cs2->cs_initiator_id) == 0) { if (strcmp(cs->cs_initiator_addr, cs2->cs_initiator_addr) != 0) { CFISCSI_SESSION_WARN(cs2, "session reinstatement from " "different address %s", cs->cs_initiator_addr); } else { CFISCSI_SESSION_DEBUG(cs2, "session reinstatement"); } cfiscsi_session_terminate(cs2); mtx_unlock(&softc->lock); pause("cfiscsi_reinstate", 1); goto restart; } } mtx_unlock(&softc->lock); } /* * Register initiator with CTL. */ cfiscsi_session_register_initiator(cs); #ifdef ICL_KERNEL_PROXY if (cihp->socket > 0) { #endif error = icl_conn_handoff(cs->cs_conn, cihp->socket); if (error != 0) { cfiscsi_session_terminate(cs); refcount_release(&cs->cs_outstanding_ctl_pdus); ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: icl_conn_handoff failed with error %d", __func__, error); return; } #ifdef ICL_KERNEL_PROXY } #endif #ifdef ICL_KERNEL_PROXY cs->cs_login_phase = false; /* * First PDU of the Full Feature phase has likely already arrived. * We have to pick it up and execute properly. */ if (cs->cs_login_pdu != NULL) { CFISCSI_SESSION_DEBUG(cs, "picking up first PDU"); cfiscsi_pdu_handle(cs->cs_login_pdu); cs->cs_login_pdu = NULL; } #endif refcount_release(&cs->cs_outstanding_ctl_pdus); ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_list(struct ctl_iscsi *ci) { struct ctl_iscsi_list_params *cilp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; struct sbuf *sb; int error; cilp = (struct ctl_iscsi_list_params *)&(ci->data); softc = &cfiscsi_softc; sb = sbuf_new(NULL, NULL, cilp->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Unable to allocate %d bytes for iSCSI session list", cilp->alloc_len); return; } sbuf_printf(sb, "\n"); mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { #ifdef ICL_KERNEL_PROXY if (cs->cs_target == NULL) continue; #endif error = sbuf_printf(sb, "" "%s" "%s" "%s" "%s" "%s" "%u" "%s" "%s" "%d" "%d" "%d" "%d" "%d" "%d" "%s" "\n", cs->cs_id, cs->cs_initiator_name, cs->cs_initiator_addr, cs->cs_initiator_alias, cs->cs_target->ct_name, cs->cs_target->ct_alias, cs->cs_target->ct_tag, cs->cs_conn->ic_header_crc32c ? "CRC32C" : "None", cs->cs_conn->ic_data_crc32c ? "CRC32C" : "None", cs->cs_max_recv_data_segment_length, cs->cs_max_send_data_segment_length, cs->cs_max_burst_length, cs->cs_first_burst_length, cs->cs_immediate_data, cs->cs_conn->ic_iser, cs->cs_conn->ic_offload); if (error != 0) break; } mtx_unlock(&softc->lock); error = sbuf_printf(sb, "\n"); if (error != 0) { sbuf_delete(sb); ci->status = CTL_ISCSI_LIST_NEED_MORE_SPACE; snprintf(ci->error_str, sizeof(ci->error_str), "Out of space, %d bytes is too small", cilp->alloc_len); return; } sbuf_finish(sb); error = copyout(sbuf_data(sb), cilp->conn_xml, sbuf_len(sb) + 1); if (error != 0) { sbuf_delete(sb); snprintf(ci->error_str, sizeof(ci->error_str), "copyout failed with error %d", error); ci->status = CTL_ISCSI_ERROR; return; } cilp->fill_len = sbuf_len(sb) + 1; ci->status = CTL_ISCSI_OK; sbuf_delete(sb); } static void cfiscsi_ioctl_logout(struct ctl_iscsi *ci) { struct icl_pdu *response; struct iscsi_bhs_asynchronous_message *bhsam; struct ctl_iscsi_logout_params *cilp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; int found = 0; cilp = (struct ctl_iscsi_logout_params *)&(ci->data); softc = &cfiscsi_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cilp->all == 0 && cs->cs_id != cilp->connection_id && strcmp(cs->cs_initiator_name, cilp->initiator_name) != 0 && strcmp(cs->cs_initiator_addr, cilp->initiator_addr) != 0) continue; response = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (response == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Unable to allocate memory"); mtx_unlock(&softc->lock); return; } bhsam = (struct iscsi_bhs_asynchronous_message *)response->ip_bhs; bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE; bhsam->bhsam_flags = 0x80; bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_REQUESTS_LOGOUT; bhsam->bhsam_parameter3 = htons(10); cfiscsi_pdu_queue(response); found++; } mtx_unlock(&softc->lock); if (found == 0) { ci->status = CTL_ISCSI_SESSION_NOT_FOUND; snprintf(ci->error_str, sizeof(ci->error_str), "No matching connections found"); return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_terminate(struct ctl_iscsi *ci) { struct icl_pdu *response; struct iscsi_bhs_asynchronous_message *bhsam; struct ctl_iscsi_terminate_params *citp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; int found = 0; citp = (struct ctl_iscsi_terminate_params *)&(ci->data); softc = &cfiscsi_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (citp->all == 0 && cs->cs_id != citp->connection_id && strcmp(cs->cs_initiator_name, citp->initiator_name) != 0 && strcmp(cs->cs_initiator_addr, citp->initiator_addr) != 0) continue; response = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (response == NULL) { /* * Oh well. Just terminate the connection. */ } else { bhsam = (struct iscsi_bhs_asynchronous_message *) response->ip_bhs; bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE; bhsam->bhsam_flags = 0x80; bhsam->bhsam_0xffffffff = 0xffffffff; bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_TERMINATES_SESSION; cfiscsi_pdu_queue(response); } cfiscsi_session_terminate(cs); found++; } mtx_unlock(&softc->lock); if (found == 0) { ci->status = CTL_ISCSI_SESSION_NOT_FOUND; snprintf(ci->error_str, sizeof(ci->error_str), "No matching connections found"); return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_limits(struct ctl_iscsi *ci) { struct ctl_iscsi_limits_params *cilp; struct icl_drv_limits idl; int error; cilp = (struct ctl_iscsi_limits_params *)&(ci->data); error = icl_limits(cilp->offload, false, &idl); if (error != 0) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: icl_limits failed with error %d", __func__, error); return; } cilp->max_recv_data_segment_length = idl.idl_max_recv_data_segment_length; cilp->max_send_data_segment_length = idl.idl_max_send_data_segment_length; cilp->max_burst_length = idl.idl_max_burst_length; cilp->first_burst_length = idl.idl_first_burst_length; ci->status = CTL_ISCSI_OK; } #ifdef ICL_KERNEL_PROXY static void cfiscsi_ioctl_listen(struct ctl_iscsi *ci) { struct ctl_iscsi_listen_params *cilp; struct sockaddr *sa; int error; cilp = (struct ctl_iscsi_listen_params *)&(ci->data); if (cfiscsi_softc.listener == NULL) { CFISCSI_DEBUG("no listener"); snprintf(ci->error_str, sizeof(ci->error_str), "no listener"); ci->status = CTL_ISCSI_ERROR; return; } error = getsockaddr(&sa, (void *)cilp->addr, cilp->addrlen); if (error != 0) { CFISCSI_DEBUG("getsockaddr, error %d", error); snprintf(ci->error_str, sizeof(ci->error_str), "getsockaddr failed"); ci->status = CTL_ISCSI_ERROR; return; } error = icl_listen_add(cfiscsi_softc.listener, cilp->iser, cilp->domain, cilp->socktype, cilp->protocol, sa, cilp->portal_id); if (error != 0) { free(sa, M_SONAME); CFISCSI_DEBUG("icl_listen_add, error %d", error); snprintf(ci->error_str, sizeof(ci->error_str), "icl_listen_add failed, error %d", error); ci->status = CTL_ISCSI_ERROR; return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_accept(struct ctl_iscsi *ci) { struct ctl_iscsi_accept_params *ciap; struct cfiscsi_session *cs; int error; ciap = (struct ctl_iscsi_accept_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); for (;;) { TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_waiting_for_ctld) break; } if (cs != NULL) break; error = cv_wait_sig(&cfiscsi_softc.accept_cv, &cfiscsi_softc.lock); if (error != 0) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "interrupted"); ci->status = CTL_ISCSI_ERROR; return; } } mtx_unlock(&cfiscsi_softc.lock); cs->cs_waiting_for_ctld = false; cs->cs_login_phase = true; ciap->connection_id = cs->cs_id; ciap->portal_id = cs->cs_portal_id; ciap->initiator_addrlen = cs->cs_initiator_sa->sa_len; error = copyout(cs->cs_initiator_sa, ciap->initiator_addr, cs->cs_initiator_sa->sa_len); if (error != 0) { snprintf(ci->error_str, sizeof(ci->error_str), "copyout failed with error %d", error); ci->status = CTL_ISCSI_ERROR; return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_send(struct ctl_iscsi *ci) { struct ctl_iscsi_send_params *cisp; struct cfiscsi_session *cs; struct icl_pdu *ip; size_t datalen; void *data; int error; cisp = (struct ctl_iscsi_send_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cisp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; return; } mtx_unlock(&cfiscsi_softc.lock); #if 0 if (cs->cs_login_phase == false) return (EBUSY); #endif if (cs->cs_terminating) { snprintf(ci->error_str, sizeof(ci->error_str), "connection is terminating"); ci->status = CTL_ISCSI_ERROR; return; } datalen = cisp->data_segment_len; /* * XXX */ //if (datalen > CFISCSI_MAX_DATA_SEGMENT_LENGTH) { if (datalen > 65535) { snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big"); ci->status = CTL_ISCSI_ERROR; return; } if (datalen > 0) { data = malloc(datalen, M_CFISCSI, M_WAITOK); error = copyin(cisp->data_segment, data, datalen); if (error != 0) { free(data, M_CFISCSI); snprintf(ci->error_str, sizeof(ci->error_str), "copyin error %d", error); ci->status = CTL_ISCSI_ERROR; return; } } ip = icl_pdu_new(cs->cs_conn, M_WAITOK); memcpy(ip->ip_bhs, cisp->bhs, sizeof(*ip->ip_bhs)); if (datalen > 0) { icl_pdu_append_data(ip, data, datalen, M_WAITOK); free(data, M_CFISCSI); } CFISCSI_SESSION_LOCK(cs); icl_pdu_queue(ip); CFISCSI_SESSION_UNLOCK(cs); ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_receive(struct ctl_iscsi *ci) { struct ctl_iscsi_receive_params *cirp; struct cfiscsi_session *cs; struct icl_pdu *ip; void *data; int error; cirp = (struct ctl_iscsi_receive_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cirp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; return; } mtx_unlock(&cfiscsi_softc.lock); #if 0 if (is->is_login_phase == false) return (EBUSY); #endif CFISCSI_SESSION_LOCK(cs); while (cs->cs_login_pdu == NULL && cs->cs_terminating == false) { error = cv_wait_sig(&cs->cs_login_cv, &cs->cs_lock); if (error != 0) { CFISCSI_SESSION_UNLOCK(cs); snprintf(ci->error_str, sizeof(ci->error_str), "interrupted by signal"); ci->status = CTL_ISCSI_ERROR; return; } } if (cs->cs_terminating) { CFISCSI_SESSION_UNLOCK(cs); snprintf(ci->error_str, sizeof(ci->error_str), "connection terminating"); ci->status = CTL_ISCSI_ERROR; return; } ip = cs->cs_login_pdu; cs->cs_login_pdu = NULL; CFISCSI_SESSION_UNLOCK(cs); if (ip->ip_data_len > cirp->data_segment_len) { icl_pdu_free(ip); snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big"); ci->status = CTL_ISCSI_ERROR; return; } copyout(ip->ip_bhs, cirp->bhs, sizeof(*ip->ip_bhs)); if (ip->ip_data_len > 0) { data = malloc(ip->ip_data_len, M_CFISCSI, M_WAITOK); icl_pdu_get_data(ip, 0, data, ip->ip_data_len); copyout(data, cirp->data_segment, ip->ip_data_len); free(data, M_CFISCSI); } icl_pdu_free(ip); ci->status = CTL_ISCSI_OK; } #endif /* !ICL_KERNEL_PROXY */ static void cfiscsi_ioctl_port_create(struct ctl_req *req) { struct cfiscsi_target *ct; struct ctl_port *port; const char *target, *alias, *tags; struct scsi_vpd_id_descriptor *desc; ctl_options_t opts; int retval, len, idlen; uint16_t tag; ctl_init_opts(&opts, req->num_args, req->kern_args); target = ctl_get_opt(&opts, "cfiscsi_target"); alias = ctl_get_opt(&opts, "cfiscsi_target_alias"); tags = ctl_get_opt(&opts, "cfiscsi_portal_group_tag"); if (target == NULL || tags == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Missing required argument"); ctl_free_opts(&opts); return; } tag = strtol(tags, (char **)NULL, 10); ct = cfiscsi_target_find_or_create(&cfiscsi_softc, target, alias, tag); if (ct == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "failed to create target \"%s\"", target); ctl_free_opts(&opts); return; } if (ct->ct_state == CFISCSI_TARGET_STATE_ACTIVE) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "target \"%s\" for portal group tag %u already exists", target, tag); cfiscsi_target_release(ct); ctl_free_opts(&opts); return; } port = &ct->ct_port; // WAT if (ct->ct_state == CFISCSI_TARGET_STATE_DYING) goto done; port->frontend = &cfiscsi_frontend; port->port_type = CTL_PORT_ISCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; port->port_name = "iscsi"; port->physical_port = tag; port->virtual_port = ct->ct_target_id; port->port_online = cfiscsi_online; port->port_offline = cfiscsi_offline; port->port_info = cfiscsi_info; port->onoff_arg = ct; port->fe_datamove = cfiscsi_datamove; port->fe_done = cfiscsi_done; port->targ_port = -1; port->options = opts; STAILQ_INIT(&opts); /* Generate Port ID. */ idlen = strlen(target) + strlen(",t,0x0001") + 1; idlen = roundup2(idlen, 4); len = sizeof(struct scsi_vpd_device_id) + idlen; port->port_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->port_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data; desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen; snprintf(desc->identifier, idlen, "%s,t,0x%4.4x", target, tag); /* Generate Target ID. */ idlen = strlen(target) + 1; idlen = roundup2(idlen, 4); len = sizeof(struct scsi_vpd_device_id) + idlen; port->target_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->target_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data; desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen; strlcpy(desc->identifier, target, idlen); retval = ctl_port_register(port); if (retval != 0) { ctl_free_opts(&port->options); free(port->port_devid, M_CFISCSI); free(port->target_devid, M_CFISCSI); cfiscsi_target_release(ct); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "ctl_port_register() failed with error %d", retval); return; } done: ct->ct_state = CFISCSI_TARGET_STATE_ACTIVE; req->status = CTL_LUN_OK; memcpy(req->kern_args[0].kvalue, &port->targ_port, sizeof(port->targ_port)); //XXX } static void cfiscsi_ioctl_port_remove(struct ctl_req *req) { struct cfiscsi_target *ct; const char *target, *tags; ctl_options_t opts; uint16_t tag; ctl_init_opts(&opts, req->num_args, req->kern_args); target = ctl_get_opt(&opts, "cfiscsi_target"); tags = ctl_get_opt(&opts, "cfiscsi_portal_group_tag"); if (target == NULL || tags == NULL) { ctl_free_opts(&opts); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Missing required argument"); return; } tag = strtol(tags, (char **)NULL, 10); ct = cfiscsi_target_find(&cfiscsi_softc, target, tag); if (ct == NULL) { ctl_free_opts(&opts); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "can't find target \"%s\"", target); return; } if (ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE) { ctl_free_opts(&opts); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "target \"%s\" is already dying", target); return; } ctl_free_opts(&opts); ct->ct_state = CFISCSI_TARGET_STATE_DYING; ctl_port_offline(&ct->ct_port); cfiscsi_target_release(ct); cfiscsi_target_release(ct); req->status = CTL_LUN_OK; } static int cfiscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_iscsi *ci; struct ctl_req *req; if (cmd == CTL_PORT_REQ) { req = (struct ctl_req *)addr; switch (req->reqtype) { case CTL_REQ_CREATE: cfiscsi_ioctl_port_create(req); break; case CTL_REQ_REMOVE: cfiscsi_ioctl_port_remove(req); break; default: req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Unsupported request type %d", req->reqtype); } return (0); } if (cmd != CTL_ISCSI) return (ENOTTY); ci = (struct ctl_iscsi *)addr; switch (ci->type) { case CTL_ISCSI_HANDOFF: cfiscsi_ioctl_handoff(ci); break; case CTL_ISCSI_LIST: cfiscsi_ioctl_list(ci); break; case CTL_ISCSI_LOGOUT: cfiscsi_ioctl_logout(ci); break; case CTL_ISCSI_TERMINATE: cfiscsi_ioctl_terminate(ci); break; case CTL_ISCSI_LIMITS: cfiscsi_ioctl_limits(ci); break; #ifdef ICL_KERNEL_PROXY case CTL_ISCSI_LISTEN: cfiscsi_ioctl_listen(ci); break; case CTL_ISCSI_ACCEPT: cfiscsi_ioctl_accept(ci); break; case CTL_ISCSI_SEND: cfiscsi_ioctl_send(ci); break; case CTL_ISCSI_RECEIVE: cfiscsi_ioctl_receive(ci); break; #else case CTL_ISCSI_LISTEN: case CTL_ISCSI_ACCEPT: case CTL_ISCSI_SEND: case CTL_ISCSI_RECEIVE: ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: CTL compiled without ICL_KERNEL_PROXY", __func__); break; #endif /* !ICL_KERNEL_PROXY */ default: ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: invalid iSCSI request type %d", __func__, ci->type); break; } return (0); } static void cfiscsi_target_hold(struct cfiscsi_target *ct) { refcount_acquire(&ct->ct_refcount); } static void cfiscsi_target_release(struct cfiscsi_target *ct) { struct cfiscsi_softc *softc; softc = ct->ct_softc; mtx_lock(&softc->lock); if (refcount_release(&ct->ct_refcount)) { TAILQ_REMOVE(&softc->targets, ct, ct_next); mtx_unlock(&softc->lock); if (ct->ct_state != CFISCSI_TARGET_STATE_INVALID) { ct->ct_state = CFISCSI_TARGET_STATE_INVALID; if (ctl_port_deregister(&ct->ct_port) != 0) printf("%s: ctl_port_deregister() failed\n", __func__); } free(ct, M_CFISCSI); return; } mtx_unlock(&softc->lock); } static struct cfiscsi_target * cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name, uint16_t tag) { struct cfiscsi_target *ct; mtx_lock(&softc->lock); TAILQ_FOREACH(ct, &softc->targets, ct_next) { if (ct->ct_tag != tag || strcmp(name, ct->ct_name) != 0 || ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE) continue; cfiscsi_target_hold(ct); mtx_unlock(&softc->lock); return (ct); } mtx_unlock(&softc->lock); return (NULL); } static struct cfiscsi_target * cfiscsi_target_find_or_create(struct cfiscsi_softc *softc, const char *name, const char *alias, uint16_t tag) { struct cfiscsi_target *ct, *newct; if (name[0] == '\0' || strlen(name) >= CTL_ISCSI_NAME_LEN) return (NULL); newct = malloc(sizeof(*newct), M_CFISCSI, M_WAITOK | M_ZERO); mtx_lock(&softc->lock); TAILQ_FOREACH(ct, &softc->targets, ct_next) { if (ct->ct_tag != tag || strcmp(name, ct->ct_name) != 0 || ct->ct_state == CFISCSI_TARGET_STATE_INVALID) continue; cfiscsi_target_hold(ct); mtx_unlock(&softc->lock); free(newct, M_CFISCSI); return (ct); } strlcpy(newct->ct_name, name, sizeof(newct->ct_name)); if (alias != NULL) strlcpy(newct->ct_alias, alias, sizeof(newct->ct_alias)); newct->ct_tag = tag; refcount_init(&newct->ct_refcount, 1); newct->ct_softc = softc; if (TAILQ_EMPTY(&softc->targets)) softc->last_target_id = 0; newct->ct_target_id = ++softc->last_target_id; TAILQ_INSERT_TAIL(&softc->targets, newct, ct_next); mtx_unlock(&softc->lock); return (newct); } static void cfiscsi_datamove_in(union ctl_io *io) { struct cfiscsi_session *cs; struct icl_pdu *request, *response; const struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_data_in *bhsdi; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; size_t len, expected_len, sg_len, buffer_offset; const char *sg_addr; int ctl_sg_count, error, i; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND")); if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } /* * This is the total amount of data to be transferred within the current * SCSI command. We need to record it so that we can properly report * underflow/underflow. */ PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len; /* * This is the offset within the current SCSI command; for the first * call to cfiscsi_datamove() it will be 0, and for subsequent ones * it will be the sum of lengths of previous ones. */ buffer_offset = io->scsiio.kern_rel_offset; /* * This is the transfer length expected by the initiator. In theory, * it could be different from the correct amount of data from the SCSI * point of view, even if that doesn't make any sense. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); #if 0 if (expected_len != io->scsiio.kern_total_len) { CFISCSI_SESSION_DEBUG(cs, "expected transfer length %zd, " "actual length %zd", expected_len, (size_t)io->scsiio.kern_total_len); } #endif if (buffer_offset >= expected_len) { #if 0 CFISCSI_SESSION_DEBUG(cs, "buffer_offset = %zd, " "already sent the expected len", buffer_offset); #endif io->scsiio.be_move_done(io); return; } i = 0; sg_addr = NULL; sg_len = 0; response = NULL; bhsdi = NULL; for (;;) { if (response == NULL) { response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } bhsdi = (struct iscsi_bhs_data_in *)response->ip_bhs; bhsdi->bhsdi_opcode = ISCSI_BHS_OPCODE_SCSI_DATA_IN; bhsdi->bhsdi_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhsdi->bhsdi_target_transfer_tag = 0xffffffff; bhsdi->bhsdi_datasn = htonl(PDU_EXPDATASN(request)); PDU_EXPDATASN(request)++; bhsdi->bhsdi_buffer_offset = htonl(buffer_offset); } KASSERT(i < ctl_sg_count, ("i >= ctl_sg_count")); if (sg_len == 0) { sg_addr = ctl_sglist[i].addr; sg_len = ctl_sglist[i].len; KASSERT(sg_len > 0, ("sg_len <= 0")); } len = sg_len; /* * Truncate to maximum data segment length. */ KASSERT(response->ip_data_len < cs->cs_max_send_data_segment_length, ("ip_data_len %zd >= max_send_data_segment_length %d", response->ip_data_len, cs->cs_max_send_data_segment_length)); if (response->ip_data_len + len > cs->cs_max_send_data_segment_length) { len = cs->cs_max_send_data_segment_length - response->ip_data_len; KASSERT(len <= sg_len, ("len %zd > sg_len %zd", len, sg_len)); } /* * Truncate to expected data transfer length. */ KASSERT(buffer_offset + response->ip_data_len < expected_len, ("buffer_offset %zd + ip_data_len %zd >= expected_len %zd", buffer_offset, response->ip_data_len, expected_len)); if (buffer_offset + response->ip_data_len + len > expected_len) { CFISCSI_SESSION_DEBUG(cs, "truncating from %zd " "to expected data transfer length %zd", buffer_offset + response->ip_data_len + len, expected_len); len = expected_len - (buffer_offset + response->ip_data_len); KASSERT(len <= sg_len, ("len %zd > sg_len %zd", len, sg_len)); } error = icl_pdu_append_data(response, sg_addr, len, M_NOWAIT); if (error != 0) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); icl_pdu_free(response); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } sg_addr += len; sg_len -= len; io->scsiio.kern_data_resid -= len; KASSERT(buffer_offset + response->ip_data_len <= expected_len, ("buffer_offset %zd + ip_data_len %zd > expected_len %zd", buffer_offset, response->ip_data_len, expected_len)); if (buffer_offset + response->ip_data_len == expected_len) { /* * Already have the amount of data the initiator wanted. */ break; } if (sg_len == 0) { /* * End of scatter-gather segment; * proceed to the next one... */ if (i == ctl_sg_count - 1) { /* * ... unless this was the last one. */ break; } i++; } if (response->ip_data_len == cs->cs_max_send_data_segment_length) { /* * Can't stuff more data into the current PDU; * queue it. Note that's not enough to check * for kern_data_resid == 0 instead; there * may be several Data-In PDUs for the final * call to cfiscsi_datamove(), and we want * to set the F flag only on the last of them. */ buffer_offset += response->ip_data_len; if (buffer_offset == io->scsiio.kern_total_len || buffer_offset == expected_len) { buffer_offset -= response->ip_data_len; break; } cfiscsi_pdu_queue(response); response = NULL; bhsdi = NULL; } } if (response != NULL) { buffer_offset += response->ip_data_len; if (buffer_offset == io->scsiio.kern_total_len || buffer_offset == expected_len) { bhsdi->bhsdi_flags |= BHSDI_FLAGS_F; if (io->io_hdr.status == CTL_SUCCESS) { bhsdi->bhsdi_flags |= BHSDI_FLAGS_S; if (PDU_TOTAL_TRANSFER_LEN(request) < ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW; bhsdi->bhsdi_residual_count = htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) - PDU_TOTAL_TRANSFER_LEN(request)); } else if (PDU_TOTAL_TRANSFER_LEN(request) > ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW; bhsdi->bhsdi_residual_count = htonl(PDU_TOTAL_TRANSFER_LEN(request) - ntohl(bhssc->bhssc_expected_data_transfer_length)); } bhsdi->bhsdi_status = io->scsiio.scsi_status; io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; } } KASSERT(response->ip_data_len > 0, ("sending empty Data-In")); cfiscsi_pdu_queue(response); } io->scsiio.be_move_done(io); } static void cfiscsi_datamove_out(union ctl_io *io) { struct cfiscsi_session *cs; struct icl_pdu *request, *response; const struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_r2t *bhsr2t; struct cfiscsi_data_wait *cdw; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; uint32_t expected_len, datamove_len, r2t_off, r2t_len; uint32_t target_transfer_tag; bool done; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND")); /* * We need to record it so that we can properly report * underflow/underflow. */ PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len; /* * Complete write underflow. Not a single byte to read. Return. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); if (io->scsiio.kern_rel_offset >= expected_len) { io->scsiio.be_move_done(io); return; } datamove_len = MIN(io->scsiio.kern_data_len, expected_len - io->scsiio.kern_rel_offset); target_transfer_tag = atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1); cdw = cfiscsi_data_wait_new(cs, io, bhssc->bhssc_initiator_task_tag, &target_transfer_tag); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } #if 0 CFISCSI_SESSION_DEBUG(cs, "expecting Data-Out with initiator " "task tag 0x%x, target transfer tag 0x%x", bhssc->bhssc_initiator_task_tag, target_transfer_tag); #endif cdw->cdw_ctl_io = io; cdw->cdw_target_transfer_tag = target_transfer_tag; cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag; cdw->cdw_r2t_end = datamove_len; cdw->cdw_datasn = 0; /* Set initial data pointer for the CDW respecting ext_data_filled. */ if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = datamove_len; } cdw->cdw_sg_index = 0; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; r2t_off = io->scsiio.ext_data_filled; while (r2t_off > 0) { if (r2t_off >= cdw->cdw_sg_len) { r2t_off -= cdw->cdw_sg_len; cdw->cdw_sg_index++; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; continue; } cdw->cdw_sg_addr += r2t_off; cdw->cdw_sg_len -= r2t_off; r2t_off = 0; } if (cs->cs_immediate_data && io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled < icl_pdu_data_segment_length(request)) { done = cfiscsi_handle_data_segment(request, cdw); if (done) { cfiscsi_data_wait_free(cs, cdw); io->scsiio.be_move_done(io); return; } } r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled; r2t_len = MIN(datamove_len - io->scsiio.ext_data_filled, cs->cs_max_burst_length); cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len; CFISCSI_SESSION_LOCK(cs); TAILQ_INSERT_TAIL(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); /* * XXX: We should limit the number of outstanding R2T PDUs * per task to MaxOutstandingR2T. */ response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; bhsr2t = (struct iscsi_bhs_r2t *)response->ip_bhs; bhsr2t->bhsr2t_opcode = ISCSI_BHS_OPCODE_R2T; bhsr2t->bhsr2t_flags = 0x80; bhsr2t->bhsr2t_lun = bhssc->bhssc_lun; bhsr2t->bhsr2t_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhsr2t->bhsr2t_target_transfer_tag = target_transfer_tag; /* * XXX: Here we assume that cfiscsi_datamove() won't ever * be running concurrently on several CPUs for a given * command. */ bhsr2t->bhsr2t_r2tsn = htonl(PDU_R2TSN(request)); PDU_R2TSN(request)++; /* * This is the offset within the current SCSI command; * i.e. for the first call of datamove(), it will be 0, * and for subsequent ones it will be the sum of lengths * of previous ones. * * The ext_data_filled is to account for unsolicited * (immediate) data that might have already arrived. */ bhsr2t->bhsr2t_buffer_offset = htonl(r2t_off); /* * This is the total length (sum of S/G lengths) this call * to cfiscsi_datamove() is supposed to handle, limited by * MaxBurstLength. */ bhsr2t->bhsr2t_desired_data_transfer_length = htonl(r2t_len); cfiscsi_pdu_queue(response); } static void cfiscsi_datamove(union ctl_io *io) { if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) cfiscsi_datamove_in(io); else { /* We hadn't received anything during this datamove yet. */ io->scsiio.ext_data_filled = 0; cfiscsi_datamove_out(io); } } static void cfiscsi_scsi_command_done(union ctl_io *io) { struct icl_pdu *request, *response; struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_scsi_response *bhssr; #ifdef DIAGNOSTIC struct cfiscsi_data_wait *cdw; #endif struct cfiscsi_session *cs; uint16_t sense_length; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("replying to wrong opcode 0x%x", bhssc->bhssc_opcode)); //CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x", // bhssc->bhssc_initiator_task_tag); #ifdef DIAGNOSTIC CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) KASSERT(bhssc->bhssc_initiator_task_tag != cdw->cdw_initiator_task_tag, ("dangling cdw")); CFISCSI_SESSION_UNLOCK(cs); #endif /* * Do not return status for aborted commands. * There are exceptions, but none supported by CTL yet. */ if (((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) || (io->io_hdr.flags & CTL_FLAG_STATUS_SENT)) { ctl_free_io(io); icl_pdu_free(request); return; } response = cfiscsi_pdu_new_response(request, M_WAITOK); bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; bhssr->bhssr_opcode = ISCSI_BHS_OPCODE_SCSI_RESPONSE; bhssr->bhssr_flags = 0x80; /* * XXX: We don't deal with bidirectional under/overflows; * does anything actually support those? */ if (PDU_TOTAL_TRANSFER_LEN(request) < ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW; bhssr->bhssr_residual_count = htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) - PDU_TOTAL_TRANSFER_LEN(request)); //CFISCSI_SESSION_DEBUG(cs, "underflow; residual count %d", // ntohl(bhssr->bhssr_residual_count)); } else if (PDU_TOTAL_TRANSFER_LEN(request) > ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW; bhssr->bhssr_residual_count = htonl(PDU_TOTAL_TRANSFER_LEN(request) - ntohl(bhssc->bhssc_expected_data_transfer_length)); //CFISCSI_SESSION_DEBUG(cs, "overflow; residual count %d", // ntohl(bhssr->bhssr_residual_count)); } bhssr->bhssr_response = BHSSR_RESPONSE_COMMAND_COMPLETED; bhssr->bhssr_status = io->scsiio.scsi_status; bhssr->bhssr_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhssr->bhssr_expdatasn = htonl(PDU_EXPDATASN(request)); if (io->scsiio.sense_len > 0) { #if 0 CFISCSI_SESSION_DEBUG(cs, "returning %d bytes of sense data", io->scsiio.sense_len); #endif sense_length = htons(io->scsiio.sense_len); icl_pdu_append_data(response, &sense_length, sizeof(sense_length), M_WAITOK); icl_pdu_append_data(response, &io->scsiio.sense_data, io->scsiio.sense_len, M_WAITOK); } ctl_free_io(io); icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_task_management_done(union ctl_io *io) { struct icl_pdu *request, *response; struct iscsi_bhs_task_management_request *bhstmr; struct iscsi_bhs_task_management_response *bhstmr2; struct cfiscsi_data_wait *cdw, *tmpcdw; struct cfiscsi_session *cs, *tcs; struct cfiscsi_softc *softc; int cold_reset = 0; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; KASSERT((bhstmr->bhstmr_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_TASK_REQUEST, ("replying to wrong opcode 0x%x", bhstmr->bhstmr_opcode)); #if 0 CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x; referenced task tag 0x%x", bhstmr->bhstmr_initiator_task_tag, bhstmr->bhstmr_referenced_task_tag); #endif if ((bhstmr->bhstmr_function & ~0x80) == BHSTMR_FUNCTION_ABORT_TASK) { /* * Make sure we no longer wait for Data-Out for this command. */ CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH_SAFE(cdw, &cs->cs_waiting_for_data_out, cdw_next, tmpcdw) { if (bhstmr->bhstmr_referenced_task_tag != cdw->cdw_initiator_task_tag) continue; #if 0 CFISCSI_SESSION_DEBUG(cs, "removing csw for initiator task " "tag 0x%x", bhstmr->bhstmr_initiator_task_tag); #endif TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 43; cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io); cfiscsi_data_wait_free(cs, cdw); } CFISCSI_SESSION_UNLOCK(cs); } if ((bhstmr->bhstmr_function & ~0x80) == BHSTMR_FUNCTION_TARGET_COLD_RESET && io->io_hdr.status == CTL_SUCCESS) cold_reset = 1; response = cfiscsi_pdu_new_response(request, M_WAITOK); bhstmr2 = (struct iscsi_bhs_task_management_response *) response->ip_bhs; bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE; bhstmr2->bhstmr_flags = 0x80; switch (io->taskio.task_status) { case CTL_TASK_FUNCTION_COMPLETE: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_COMPLETE; break; case CTL_TASK_FUNCTION_SUCCEEDED: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_SUCCEEDED; break; case CTL_TASK_LUN_DOES_NOT_EXIST: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_LUN_DOES_NOT_EXIST; break; case CTL_TASK_FUNCTION_NOT_SUPPORTED: default: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED; break; } memcpy(bhstmr2->bhstmr_additional_reponse_information, io->taskio.task_resp, sizeof(io->taskio.task_resp)); bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag; ctl_free_io(io); icl_pdu_free(request); cfiscsi_pdu_queue(response); if (cold_reset) { softc = cs->cs_target->ct_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(tcs, &softc->sessions, cs_next) { if (tcs->cs_target == cs->cs_target) cfiscsi_session_terminate(tcs); } mtx_unlock(&softc->lock); } } static void cfiscsi_done(union ctl_io *io) { struct icl_pdu *request; struct cfiscsi_session *cs; KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); if (io->io_hdr.io_type == CTL_IO_TASK && io->taskio.task_action == CTL_TASK_I_T_NEXUS_RESET) { /* * Implicit task termination has just completed; nothing to do. */ cs = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs->cs_tasks_aborted = true; refcount_release(&cs->cs_outstanding_ctl_pdus); wakeup(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus)); ctl_free_io(io); return; } request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) { case ISCSI_BHS_OPCODE_SCSI_COMMAND: cfiscsi_scsi_command_done(io); break; case ISCSI_BHS_OPCODE_TASK_REQUEST: cfiscsi_task_management_done(io); break; default: panic("cfiscsi_done called with wrong opcode 0x%x", request->ip_bhs->bhs_opcode); } refcount_release(&cs->cs_outstanding_ctl_pdus); } Index: head/sys/cam/ctl/ctl_frontend_iscsi.h =================================================================== --- head/sys/cam/ctl/ctl_frontend_iscsi.h (revision 326264) +++ head/sys/cam/ctl/ctl_frontend_iscsi.h (revision 326265) @@ -1,128 +1,130 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef CTL_FRONTEND_ISCSI_H #define CTL_FRONTEND_ISCSI_H #define CFISCSI_TARGET_STATE_INVALID 0 #define CFISCSI_TARGET_STATE_ACTIVE 1 #define CFISCSI_TARGET_STATE_DYING 2 struct cfiscsi_target { TAILQ_ENTRY(cfiscsi_target) ct_next; struct cfiscsi_softc *ct_softc; volatile u_int ct_refcount; char ct_name[CTL_ISCSI_NAME_LEN]; char ct_alias[CTL_ISCSI_ALIAS_LEN]; uint16_t ct_tag; int ct_state; int ct_online; int ct_target_id; struct ctl_port ct_port; }; struct cfiscsi_data_wait { TAILQ_ENTRY(cfiscsi_data_wait) cdw_next; union ctl_io *cdw_ctl_io; uint32_t cdw_target_transfer_tag; uint32_t cdw_initiator_task_tag; int cdw_sg_index; char *cdw_sg_addr; size_t cdw_sg_len; uint32_t cdw_r2t_end; uint32_t cdw_datasn; void *cdw_icl_prv; }; #define CFISCSI_SESSION_STATE_INVALID 0 #define CFISCSI_SESSION_STATE_BHS 1 #define CFISCSI_SESSION_STATE_AHS 2 #define CFISCSI_SESSION_STATE_HEADER_DIGEST 3 #define CFISCSI_SESSION_STATE_DATA 4 #define CFISCSI_SESSION_STATE_DATA_DIGEST 5 struct cfiscsi_session { TAILQ_ENTRY(cfiscsi_session) cs_next; struct mtx cs_lock; struct icl_conn *cs_conn; uint32_t cs_cmdsn; uint32_t cs_statsn; uint32_t cs_target_transfer_tag; volatile u_int cs_outstanding_ctl_pdus; TAILQ_HEAD(, cfiscsi_data_wait) cs_waiting_for_data_out; struct cfiscsi_target *cs_target; struct callout cs_callout; int cs_timeout; struct cv cs_maintenance_cv; bool cs_terminating; bool cs_tasks_aborted; int cs_max_recv_data_segment_length; int cs_max_send_data_segment_length; int cs_max_burst_length; int cs_first_burst_length; bool cs_immediate_data; char cs_initiator_name[CTL_ISCSI_NAME_LEN]; char cs_initiator_addr[CTL_ISCSI_ADDR_LEN]; char cs_initiator_alias[CTL_ISCSI_ALIAS_LEN]; char cs_initiator_isid[6]; char cs_initiator_id[CTL_ISCSI_NAME_LEN + 5 + 6 + 1]; unsigned int cs_id; int cs_ctl_initid; #ifdef ICL_KERNEL_PROXY struct sockaddr *cs_initiator_sa; int cs_portal_id; bool cs_login_phase; bool cs_waiting_for_ctld; struct cv cs_login_cv; struct icl_pdu *cs_login_pdu; #endif }; #ifdef ICL_KERNEL_PROXY struct icl_listen; #endif struct cfiscsi_softc { struct mtx lock; char port_name[32]; int online; int last_target_id; unsigned int last_session_id; TAILQ_HEAD(, cfiscsi_target) targets; TAILQ_HEAD(, cfiscsi_session) sessions; struct cv sessions_cv; #ifdef ICL_KERNEL_PROXY struct icl_listen *listener; struct cv accept_cv; #endif }; #endif /* !CTL_FRONTEND_ISCSI_H */ Index: head/sys/cam/ctl/ctl_ha.h =================================================================== --- head/sys/cam/ctl/ctl_ha.h (revision 326264) +++ head/sys/cam/ctl/ctl_ha.h (revision 326265) @@ -1,141 +1,143 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003-2009 Silicon Graphics International Corp. * Copyright (c) 2011 Spectra Logic Corporation * Copyright (c) 2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_ha.h#1 $ * $FreeBSD$ */ #ifndef _CTL_HA_H_ #define _CTL_HA_H_ /* * CTL High Availability Modes: * * CTL_HA_MODE_ACT_STBY: Commands are serialized to the master side. * No media access commands on slave side (Standby). * CTL_HA_MODE_SER_ONLY: Commands are serialized to the master side. * Media can be accessed on both sides. * CTL_HA_MODE_XFER: Commands and data are forwarded to the * master side for execution. */ typedef enum { CTL_HA_MODE_ACT_STBY, CTL_HA_MODE_SER_ONLY, CTL_HA_MODE_XFER } ctl_ha_mode; /* * Communication channel IDs for various system components. This is to * make sure one CTL instance talks with another, one ZFS instance talks * with another, etc. */ typedef enum { CTL_HA_CHAN_CTL, CTL_HA_CHAN_DATA, CTL_HA_CHAN_MAX } ctl_ha_channel; /* * HA communication event notification. These are events generated by the * HA communication subsystem. * * CTL_HA_EVT_MSG_RECV: Message received by the other node. * CTL_HA_EVT_LINK_CHANGE: Communication channel status changed. */ typedef enum { CTL_HA_EVT_NONE, CTL_HA_EVT_MSG_RECV, CTL_HA_EVT_LINK_CHANGE, CTL_HA_EVT_MAX } ctl_ha_event; typedef enum { CTL_HA_STATUS_WAIT, CTL_HA_STATUS_SUCCESS, CTL_HA_STATUS_ERROR, CTL_HA_STATUS_INVALID, CTL_HA_STATUS_DISCONNECT, CTL_HA_STATUS_BUSY, CTL_HA_STATUS_MAX } ctl_ha_status; typedef enum { CTL_HA_DT_CMD_READ, CTL_HA_DT_CMD_WRITE, } ctl_ha_dt_cmd; struct ctl_ha_dt_req; typedef void (*ctl_ha_dt_cb)(struct ctl_ha_dt_req *); struct ctl_ha_dt_req { ctl_ha_dt_cmd command; void *context; ctl_ha_dt_cb callback; int ret; uint32_t size; uint8_t *local; uint8_t *remote; TAILQ_ENTRY(ctl_ha_dt_req) links; }; struct ctl_softc; ctl_ha_status ctl_ha_msg_init(struct ctl_softc *softc); void ctl_ha_msg_shutdown(struct ctl_softc *softc); ctl_ha_status ctl_ha_msg_destroy(struct ctl_softc *softc); typedef void (*ctl_evt_handler)(ctl_ha_channel channel, ctl_ha_event event, int param); void ctl_ha_register_evthandler(ctl_ha_channel channel, ctl_evt_handler handler); ctl_ha_status ctl_ha_msg_register(ctl_ha_channel channel, ctl_evt_handler handler); ctl_ha_status ctl_ha_msg_recv(ctl_ha_channel channel, void *addr, size_t len, int wait); ctl_ha_status ctl_ha_msg_send(ctl_ha_channel channel, const void *addr, size_t len, int wait); ctl_ha_status ctl_ha_msg_send2(ctl_ha_channel channel, const void *addr, size_t len, const void *addr2, size_t len2, int wait); ctl_ha_status ctl_ha_msg_abort(ctl_ha_channel channel); ctl_ha_status ctl_ha_msg_deregister(ctl_ha_channel channel); struct ctl_ha_dt_req * ctl_dt_req_alloc(void); void ctl_dt_req_free(struct ctl_ha_dt_req *req); ctl_ha_status ctl_dt_single(struct ctl_ha_dt_req *req); typedef enum { CTL_HA_LINK_OFFLINE = 0x00, CTL_HA_LINK_UNKNOWN = 0x01, CTL_HA_LINK_ONLINE = 0x02 } ctl_ha_link_state; #endif /* _CTL_HA_H_ */ Index: head/sys/cam/ctl/ctl_io.h =================================================================== --- head/sys/cam/ctl/ctl_io.h (revision 326264) +++ head/sys/cam/ctl/ctl_io.h (revision 326265) @@ -1,589 +1,591 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_io.h#5 $ * $FreeBSD$ */ /* * CAM Target Layer data movement structures/interface. * * Author: Ken Merry */ #ifndef _CTL_IO_H_ #define _CTL_IO_H_ #define CTL_MAX_CDBLEN 32 /* * Uncomment this next line to enable printing out times for I/Os * that take longer than CTL_TIME_IO_SECS seconds to get to the datamove * and/or done stage. */ #define CTL_TIME_IO #ifdef CTL_TIME_IO #define CTL_TIME_IO_DEFAULT_SECS 90 #endif /* * Uncomment this next line to enable the CTL I/O delay feature. You * can delay I/O at two different points -- datamove and done. This is * useful for diagnosing abort conditions (for hosts that send an abort on a * timeout), and for determining how long a host's timeout is. */ //#define CTL_IO_DELAY typedef enum { CTL_STATUS_NONE, /* No status */ CTL_SUCCESS, /* Transaction completed successfully */ CTL_CMD_TIMEOUT, /* Command timed out, shouldn't happen here */ CTL_SEL_TIMEOUT, /* Selection timeout, shouldn't happen here */ CTL_ERROR, /* General CTL error XXX expand on this? */ CTL_SCSI_ERROR, /* SCSI error, look at status byte/sense data */ CTL_CMD_ABORTED, /* Command aborted, don't return status */ CTL_STATUS_MASK = 0xfff,/* Mask off any status flags */ CTL_AUTOSENSE = 0x1000 /* Autosense performed */ } ctl_io_status; /* * WARNING: Keep the data in/out/none flags where they are. They're used * in conjunction with ctl_cmd_flags. See comment above ctl_cmd_flags * definition in ctl_private.h. */ typedef enum { CTL_FLAG_NONE = 0x00000000, /* no flags */ CTL_FLAG_DATA_IN = 0x00000001, /* DATA IN */ CTL_FLAG_DATA_OUT = 0x00000002, /* DATA OUT */ CTL_FLAG_DATA_NONE = 0x00000003, /* no data */ CTL_FLAG_DATA_MASK = 0x00000003, CTL_FLAG_DO_AUTOSENSE = 0x00000020, /* grab sense info */ CTL_FLAG_USER_REQ = 0x00000040, /* request came from userland */ CTL_FLAG_ALLOCATED = 0x00000100, /* data space allocated */ CTL_FLAG_BLOCKED = 0x00000200, /* on the blocked queue */ CTL_FLAG_ABORT_STATUS = 0x00000400, /* return TASK ABORTED status */ CTL_FLAG_ABORT = 0x00000800, /* this I/O should be aborted */ CTL_FLAG_DMA_INPROG = 0x00001000, /* DMA in progress */ CTL_FLAG_DELAY_DONE = 0x00004000, /* delay injection done */ CTL_FLAG_INT_COPY = 0x00008000, /* internal copy, no done call*/ CTL_FLAG_SENT_2OTHER_SC = 0x00010000, CTL_FLAG_FROM_OTHER_SC = 0x00020000, CTL_FLAG_IS_WAS_ON_RTR = 0x00040000, /* Don't rerun cmd on failover*/ CTL_FLAG_BUS_ADDR = 0x00080000, /* ctl_sglist contains BUS addresses, not virtual ones*/ CTL_FLAG_IO_CONT = 0x00100000, /* Continue I/O instead of completing */ #if 0 CTL_FLAG_ALREADY_DONE = 0x00200000 /* I/O already completed */ #endif CTL_FLAG_NO_DATAMOVE = 0x00400000, CTL_FLAG_DMA_QUEUED = 0x00800000, /* DMA queued but not started*/ CTL_FLAG_STATUS_QUEUED = 0x01000000, /* Status queued but not sent*/ CTL_FLAG_FAILOVER = 0x04000000, /* Killed by a failover */ CTL_FLAG_IO_ACTIVE = 0x08000000, /* I/O active on this SC */ CTL_FLAG_STATUS_SENT = 0x10000000, /* Status sent by datamove */ CTL_FLAG_SERSEQ_DONE = 0x20000000 /* All storage I/O started */ } ctl_io_flags; struct ctl_lba_len { uint64_t lba; uint32_t len; }; struct ctl_lba_len_flags { uint64_t lba; uint32_t len; uint32_t flags; #define CTL_LLF_FUA 0x04000000 #define CTL_LLF_DPO 0x08000000 #define CTL_LLF_READ 0x10000000 #define CTL_LLF_WRITE 0x20000000 #define CTL_LLF_VERIFY 0x40000000 #define CTL_LLF_COMPARE 0x80000000 }; struct ctl_ptr_len_flags { uint8_t *ptr; uint32_t len; uint32_t flags; }; union ctl_priv { uint8_t bytes[sizeof(uint64_t) * 2]; uint64_t integer; uint64_t integers[2]; void *ptr; void *ptrs[2]; }; /* * Number of CTL private areas. */ #define CTL_NUM_PRIV 6 /* * Which private area are we using for a particular piece of data? */ #define CTL_PRIV_LUN 0 /* CTL LUN pointer goes here */ #define CTL_PRIV_LBA_LEN 1 /* Decoded LBA/len for read/write*/ #define CTL_PRIV_MODEPAGE 1 /* Modepage info for config write */ #define CTL_PRIV_BACKEND 2 /* Reserved for block, RAIDCore */ #define CTL_PRIV_BACKEND_LUN 3 /* Backend LUN pointer */ #define CTL_PRIV_FRONTEND 4 /* Frontend storage */ #define CTL_PRIV_FRONTEND2 5 /* Another frontend storage */ #define CTL_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[0]) #define CTL_SOFTC(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[1]) #define CTL_BACKEND_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptrs[0]) #define CTL_PORT(io) (((struct ctl_softc *)CTL_SOFTC(io))-> \ ctl_ports[(io)->io_hdr.nexus.targ_port]) #define CTL_INVALID_PORTNAME 0xFF #define CTL_UNMAPPED_IID 0xFF struct ctl_sg_entry { void *addr; size_t len; }; typedef enum { CTL_IO_NONE, CTL_IO_SCSI, CTL_IO_TASK, } ctl_io_type; struct ctl_nexus { uint32_t initid; /* Initiator ID */ uint32_t targ_port; /* Target port, filled in by PORT */ uint32_t targ_lun; /* Destination lun */ uint32_t targ_mapped_lun; /* Destination lun CTL-wide */ }; typedef enum { CTL_MSG_SERIALIZE, CTL_MSG_R2R, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU, CTL_MSG_MANAGE_TASKS, CTL_MSG_PERS_ACTION, CTL_MSG_DATAMOVE, CTL_MSG_DATAMOVE_DONE, CTL_MSG_UA, /* Set/clear UA on secondary. */ CTL_MSG_PORT_SYNC, /* Information about port. */ CTL_MSG_LUN_SYNC, /* Information about LUN. */ CTL_MSG_IID_SYNC, /* Information about initiator. */ CTL_MSG_LOGIN, /* Information about HA peer. */ CTL_MSG_MODE_SYNC, /* Mode page current content. */ CTL_MSG_FAILOVER /* Fake, never sent though the wire */ } ctl_msg_type; struct ctl_scsiio; struct ctl_io_hdr { uint32_t version; /* interface version XXX */ ctl_io_type io_type; /* task I/O, SCSI I/O, etc. */ ctl_msg_type msg_type; struct ctl_nexus nexus; /* Initiator, port, target, lun */ uint32_t iid_indx; /* the index into the iid mapping */ uint32_t flags; /* transaction flags */ uint32_t status; /* transaction status */ uint32_t port_status; /* trans status, set by PORT, 0 = good*/ uint32_t timeout; /* timeout in ms */ uint32_t retries; /* retry count */ #ifdef CTL_IO_DELAY struct callout delay_callout; #endif /* CTL_IO_DELAY */ #ifdef CTL_TIME_IO time_t start_time; /* I/O start time */ struct bintime start_bt; /* Timer start ticks */ struct bintime dma_start_bt; /* DMA start ticks */ struct bintime dma_bt; /* DMA total ticks */ #endif /* CTL_TIME_IO */ uint32_t num_dmas; /* Number of DMAs */ union ctl_io *original_sc; union ctl_io *serializing_sc; void *pool; /* I/O pool */ union ctl_priv ctl_private[CTL_NUM_PRIV];/* CTL private area */ struct ctl_sg_entry *remote_sglist; struct ctl_sg_entry *local_sglist; STAILQ_ENTRY(ctl_io_hdr) links; /* linked list pointer */ TAILQ_ENTRY(ctl_io_hdr) ooa_links; TAILQ_ENTRY(ctl_io_hdr) blocked_links; }; typedef enum { CTL_TAG_UNTAGGED, CTL_TAG_SIMPLE, CTL_TAG_ORDERED, CTL_TAG_HEAD_OF_QUEUE, CTL_TAG_ACA } ctl_tag_type; union ctl_io; /* * SCSI passthrough I/O structure for the CAM Target Layer. Note * that some of these fields are here for completeness, but they aren't * used in the CTL implementation. e.g., timeout and retries won't be * used. * * Note: Make sure the io_hdr is *always* the first element in this * structure. */ struct ctl_scsiio { struct ctl_io_hdr io_hdr; /* common to all I/O types */ /* * The ext_* fields are generally intended for frontend use; CTL itself * doesn't modify or use them. */ uint32_t ext_sg_entries; /* 0 = no S/G list, > 0 = num entries */ uint8_t *ext_data_ptr; /* data buffer or S/G list */ uint32_t ext_data_len; /* Data transfer length */ uint32_t ext_data_filled; /* Amount of data filled so far */ /* * The number of scatter/gather entries in the list pointed to * by kern_data_ptr. 0 means there is no list, just a data pointer. */ uint32_t kern_sg_entries; uint32_t rem_sg_entries; /* Unused. */ /* * The data pointer or a pointer to the scatter/gather list. */ uint8_t *kern_data_ptr; /* * Length of the data buffer or scatter/gather list. It's also * the length of this particular piece of the data transfer, * ie. number of bytes expected to be transferred by the current * invocation of frontend's datamove() callback. It's always * less than or equal to kern_total_len. */ uint32_t kern_data_len; /* * Total length of data to be transferred during this particular * SCSI command, as decoded from SCSI CDB. */ uint32_t kern_total_len; /* * Amount of data left after the current data transfer. */ uint32_t kern_data_resid; /* * Byte offset of this transfer, equal to the amount of data * already transferred for this SCSI command during previous * datamove() invocations. */ uint32_t kern_rel_offset; struct scsi_sense_data sense_data; /* sense data */ uint8_t sense_len; /* Returned sense length */ uint8_t scsi_status; /* SCSI status byte */ uint8_t sense_residual; /* Unused. */ uint32_t residual; /* Unused */ uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, head of queue,etc.*/ uint8_t cdb_len; /* CDB length */ uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */ int (*be_move_done)(union ctl_io *io); /* called by fe */ int (*io_cont)(union ctl_io *io); /* to continue processing */ }; typedef enum { CTL_TASK_ABORT_TASK, CTL_TASK_ABORT_TASK_SET, CTL_TASK_CLEAR_ACA, CTL_TASK_CLEAR_TASK_SET, CTL_TASK_I_T_NEXUS_RESET, CTL_TASK_LUN_RESET, CTL_TASK_TARGET_RESET, CTL_TASK_BUS_RESET, CTL_TASK_PORT_LOGIN, CTL_TASK_PORT_LOGOUT, CTL_TASK_QUERY_TASK, CTL_TASK_QUERY_TASK_SET, CTL_TASK_QUERY_ASYNC_EVENT } ctl_task_type; typedef enum { CTL_TASK_FUNCTION_COMPLETE, CTL_TASK_FUNCTION_SUCCEEDED, CTL_TASK_FUNCTION_REJECTED, CTL_TASK_LUN_DOES_NOT_EXIST, CTL_TASK_FUNCTION_NOT_SUPPORTED } ctl_task_status; /* * Task management I/O structure. Aborts, bus resets, etc., are sent using * this structure. * * Note: Make sure the io_hdr is *always* the first element in this * structure. */ struct ctl_taskio { struct ctl_io_hdr io_hdr; /* common to all I/O types */ ctl_task_type task_action; /* Target Reset, Abort, etc. */ uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, etc. */ uint8_t task_status; /* Complete, Succeeded, etc. */ uint8_t task_resp[3];/* Response information */ }; /* * HA link messages. */ #define CTL_HA_VERSION 3 /* * Used for CTL_MSG_LOGIN. */ struct ctl_ha_msg_login { ctl_msg_type msg_type; int version; int ha_mode; int ha_id; int max_luns; int max_ports; int max_init_per_port; }; typedef enum { CTL_PR_REG_KEY, CTL_PR_UNREG_KEY, CTL_PR_PREEMPT, CTL_PR_CLEAR, CTL_PR_RESERVE, CTL_PR_RELEASE } ctl_pr_action; /* * The PR info is specifically for sending Persistent Reserve actions * to the other SC which it must also act on. * * Note: Make sure the io_hdr is *always* the first element in this * structure. */ struct ctl_pr_info { ctl_pr_action action; uint8_t sa_res_key[8]; uint8_t res_type; uint32_t residx; }; struct ctl_ha_msg_hdr { ctl_msg_type msg_type; uint32_t status; /* transaction status */ union ctl_io *original_sc; union ctl_io *serializing_sc; struct ctl_nexus nexus; /* Initiator, port, target, lun */ }; #define CTL_HA_MAX_SG_ENTRIES 16 #define CTL_HA_DATAMOVE_SEGMENT 131072 /* * Used for CTL_MSG_PERS_ACTION. */ struct ctl_ha_msg_pr { struct ctl_ha_msg_hdr hdr; struct ctl_pr_info pr_info; }; /* * Used for CTL_MSG_UA. */ struct ctl_ha_msg_ua { struct ctl_ha_msg_hdr hdr; int ua_all; int ua_set; int ua_type; uint8_t ua_info[8]; }; /* * The S/G handling here is a little different than the standard ctl_scsiio * structure, because we can't pass data by reference in between controllers. * The S/G list in the ctl_scsiio struct is normally passed in the * kern_data_ptr field. So kern_sg_entries here will always be non-zero, * even if there is only one entry. * * Used for CTL_MSG_DATAMOVE. */ struct ctl_ha_msg_dt { struct ctl_ha_msg_hdr hdr; ctl_io_flags flags; /* Only I/O flags are used here */ uint32_t sg_sequence; /* S/G portion number */ uint8_t sg_last; /* last S/G batch = 1 */ uint32_t sent_sg_entries; /* previous S/G count */ uint32_t cur_sg_entries; /* current S/G entries */ uint32_t kern_sg_entries; /* total S/G entries */ uint32_t kern_data_len; /* Length of this S/G list */ uint32_t kern_total_len; /* Total length of this transaction */ uint32_t kern_data_resid; /* Length left to transfer after this*/ uint32_t kern_rel_offset; /* Byte Offset of this transfer */ struct ctl_sg_entry sg_list[CTL_HA_MAX_SG_ENTRIES]; }; /* * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU, * and CTL_MSG_DATAMOVE_DONE. */ struct ctl_ha_msg_scsi { struct ctl_ha_msg_hdr hdr; uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, etc. */ uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */ uint8_t cdb_len; /* CDB length */ uint8_t scsi_status; /* SCSI status byte */ uint8_t sense_len; /* Returned sense length */ uint32_t port_status; /* trans status, set by FETD, 0 = good*/ uint32_t kern_data_resid; /* for DATAMOVE_DONE */ struct scsi_sense_data sense_data; /* sense data */ }; /* * Used for CTL_MSG_MANAGE_TASKS. */ struct ctl_ha_msg_task { struct ctl_ha_msg_hdr hdr; ctl_task_type task_action; /* Target Reset, Abort, etc. */ uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, etc. */ }; /* * Used for CTL_MSG_PORT_SYNC. */ struct ctl_ha_msg_port { struct ctl_ha_msg_hdr hdr; int port_type; int physical_port; int virtual_port; int status; int name_len; int lun_map_len; int port_devid_len; int target_devid_len; int init_devid_len; uint8_t data[]; }; /* * Used for CTL_MSG_LUN_SYNC. */ struct ctl_ha_msg_lun { struct ctl_ha_msg_hdr hdr; int flags; unsigned int pr_generation; uint32_t pr_res_idx; uint8_t pr_res_type; int lun_devid_len; int pr_key_count; uint8_t data[]; }; struct ctl_ha_msg_lun_pr_key { uint32_t pr_iid; uint64_t pr_key; }; /* * Used for CTL_MSG_IID_SYNC. */ struct ctl_ha_msg_iid { struct ctl_ha_msg_hdr hdr; int in_use; int name_len; uint64_t wwpn; uint8_t data[]; }; /* * Used for CTL_MSG_MODE_SYNC. */ struct ctl_ha_msg_mode { struct ctl_ha_msg_hdr hdr; uint8_t page_code; uint8_t subpage; uint16_t page_len; uint8_t data[]; }; union ctl_ha_msg { struct ctl_ha_msg_hdr hdr; struct ctl_ha_msg_task task; struct ctl_ha_msg_scsi scsi; struct ctl_ha_msg_dt dt; struct ctl_ha_msg_pr pr; struct ctl_ha_msg_ua ua; struct ctl_ha_msg_port port; struct ctl_ha_msg_lun lun; struct ctl_ha_msg_iid iid; struct ctl_ha_msg_login login; struct ctl_ha_msg_mode mode; }; struct ctl_prio { struct ctl_io_hdr io_hdr; struct ctl_ha_msg_pr pr_msg; }; union ctl_io { struct ctl_io_hdr io_hdr; /* common to all I/O types */ struct ctl_scsiio scsiio; /* Normal SCSI commands */ struct ctl_taskio taskio; /* SCSI task management/reset */ struct ctl_prio presio; /* update per. res info on other SC */ }; #ifdef _KERNEL union ctl_io *ctl_alloc_io(void *pool_ref); union ctl_io *ctl_alloc_io_nowait(void *pool_ref); void ctl_free_io(union ctl_io *io); void ctl_zero_io(union ctl_io *io); #endif /* _KERNEL */ #endif /* _CTL_IO_H_ */ /* * vim: ts=8 */ Index: head/sys/cam/ctl/ctl_ioctl.h =================================================================== --- head/sys/cam/ctl/ctl_ioctl.h (revision 326264) +++ head/sys/cam/ctl/ctl_ioctl.h (revision 326265) @@ -1,841 +1,843 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2011 Spectra Logic Corporation * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_ioctl.h#4 $ * $FreeBSD$ */ /* * CAM Target Layer ioctl interface. * * Author: Ken Merry */ #ifndef _CTL_IOCTL_H_ #define _CTL_IOCTL_H_ #ifdef ICL_KERNEL_PROXY #include #endif #include #define CTL_DEFAULT_DEV "/dev/cam/ctl" /* * Maximum number of targets we support. */ #define CTL_MAX_TARGETS 1 /* * Maximum target ID we support. */ #define CTL_MAX_TARGID 15 /* * Maximum number of initiators per port. */ #define CTL_MAX_INIT_PER_PORT 2048 /* Hopefully this won't conflict with new misc devices that pop up */ #define CTL_MINOR 225 /* Legacy statistics accumulated for every port for every LU. */ //#define CTL_LEGACY_STATS 1 typedef enum { CTL_DELAY_TYPE_NONE, CTL_DELAY_TYPE_CONT, CTL_DELAY_TYPE_ONESHOT } ctl_delay_type; typedef enum { CTL_DELAY_LOC_NONE, CTL_DELAY_LOC_DATAMOVE, CTL_DELAY_LOC_DONE, } ctl_delay_location; typedef enum { CTL_DELAY_STATUS_NONE, CTL_DELAY_STATUS_OK, CTL_DELAY_STATUS_INVALID_LUN, CTL_DELAY_STATUS_INVALID_TYPE, CTL_DELAY_STATUS_INVALID_LOC, CTL_DELAY_STATUS_NOT_IMPLEMENTED } ctl_delay_status; struct ctl_io_delay_info { uint32_t lun_id; ctl_delay_type delay_type; ctl_delay_location delay_loc; uint32_t delay_secs; ctl_delay_status status; }; typedef enum { CTL_STATS_NO_IO, CTL_STATS_READ, CTL_STATS_WRITE } ctl_stat_types; #define CTL_STATS_NUM_TYPES 3 typedef enum { CTL_SS_OK, CTL_SS_NEED_MORE_SPACE, CTL_SS_ERROR } ctl_stats_status; typedef enum { CTL_STATS_FLAG_NONE = 0x00, CTL_STATS_FLAG_TIME_VALID = 0x01 } ctl_stats_flags; #ifdef CTL_LEGACY_STATS typedef enum { CTL_LUN_STATS_NO_BLOCKSIZE = 0x01 } ctl_lun_stats_flags; struct ctl_lun_io_port_stats { uint32_t targ_port; uint64_t bytes[CTL_STATS_NUM_TYPES]; uint64_t operations[CTL_STATS_NUM_TYPES]; struct bintime time[CTL_STATS_NUM_TYPES]; uint64_t num_dmas[CTL_STATS_NUM_TYPES]; struct bintime dma_time[CTL_STATS_NUM_TYPES]; }; struct ctl_lun_io_stats { uint8_t device_type; uint64_t lun_number; uint32_t blocksize; ctl_lun_stats_flags flags; struct ctl_lun_io_port_stats *ports; }; struct ctl_stats { int alloc_len; /* passed to kernel */ struct ctl_lun_io_stats *lun_stats; /* passed to/from kernel */ int fill_len; /* passed to userland */ int num_luns; /* passed to userland */ ctl_stats_status status; /* passed to userland */ ctl_stats_flags flags; /* passed to userland */ struct timespec timestamp; /* passed to userland */ }; #endif /* CTL_LEGACY_STATS */ struct ctl_io_stats { uint32_t item; uint64_t bytes[CTL_STATS_NUM_TYPES]; uint64_t operations[CTL_STATS_NUM_TYPES]; uint64_t dmas[CTL_STATS_NUM_TYPES]; struct bintime time[CTL_STATS_NUM_TYPES]; struct bintime dma_time[CTL_STATS_NUM_TYPES]; }; struct ctl_get_io_stats { struct ctl_io_stats *stats; /* passed to/from kernel */ size_t alloc_len; /* passed to kernel */ size_t fill_len; /* passed to userland */ int first_item; /* passed to kernel */ int num_items; /* passed to userland */ ctl_stats_status status; /* passed to userland */ ctl_stats_flags flags; /* passed to userland */ struct timespec timestamp; /* passed to userland */ }; /* * The types of errors that can be injected: * * NONE: No error specified. * ABORTED: SSD_KEY_ABORTED_COMMAND, 0x45, 0x00 * MEDIUM_ERR: Medium error, different asc/ascq depending on read/write. * UA: Unit attention. * CUSTOM: User specifies the sense data. * TYPE: Mask to use with error types. * * Flags that affect injection behavior: * CONTINUOUS: This error will stay around until explicitly cleared. * DESCRIPTOR: Use descriptor sense instead of fixed sense. */ typedef enum { CTL_LUN_INJ_NONE = 0x000, CTL_LUN_INJ_ABORTED = 0x001, CTL_LUN_INJ_MEDIUM_ERR = 0x002, CTL_LUN_INJ_UA = 0x003, CTL_LUN_INJ_CUSTOM = 0x004, CTL_LUN_INJ_TYPE = 0x0ff, CTL_LUN_INJ_CONTINUOUS = 0x100, CTL_LUN_INJ_DESCRIPTOR = 0x200 } ctl_lun_error; /* * Flags to specify what type of command the given error pattern will * execute on. The first group of types can be ORed together. * * READ: Any read command. * WRITE: Any write command. * READWRITE: Any read or write command. * READCAP: Any read capacity command. * TUR: Test Unit Ready. * ANY: Any command. * MASK: Mask for basic command patterns. * * Special types: * * CMD: The CDB to act on is specified in struct ctl_error_desc_cmd. * RANGE: For read/write commands, act when the LBA is in the * specified range. */ typedef enum { CTL_LUN_PAT_NONE = 0x000, CTL_LUN_PAT_READ = 0x001, CTL_LUN_PAT_WRITE = 0x002, CTL_LUN_PAT_READWRITE = CTL_LUN_PAT_READ | CTL_LUN_PAT_WRITE, CTL_LUN_PAT_READCAP = 0x004, CTL_LUN_PAT_TUR = 0x008, CTL_LUN_PAT_ANY = 0x0ff, CTL_LUN_PAT_MASK = 0x0ff, CTL_LUN_PAT_CMD = 0x100, CTL_LUN_PAT_RANGE = 0x200 } ctl_lun_error_pattern; /* * This structure allows the user to specify a particular CDB pattern to * look for. * * cdb_pattern: Fill in the relevant bytes to look for in the CDB. * cdb_valid_bytes: Bitmask specifying valid bytes in the cdb_pattern. * flags: Specify any command flags (see ctl_io_flags) that * should be set. */ struct ctl_error_desc_cmd { uint8_t cdb_pattern[CTL_MAX_CDBLEN]; uint32_t cdb_valid_bytes; uint32_t flags; }; /* * Error injection descriptor. * * lun_id LUN to act on. * lun_error: The type of error to inject. See above for descriptions. * error_pattern: What kind of command to act on. See above. * cmd_desc: For CTL_LUN_PAT_CMD only. * lba_range: For CTL_LUN_PAT_RANGE only. * custom_sense: Specify sense. For CTL_LUN_INJ_CUSTOM only. * serial: Serial number returned by the kernel. Use for deletion. * links: Kernel use only. */ struct ctl_error_desc { uint32_t lun_id; /* To kernel */ ctl_lun_error lun_error; /* To kernel */ ctl_lun_error_pattern error_pattern; /* To kernel */ struct ctl_error_desc_cmd cmd_desc; /* To kernel */ struct ctl_lba_len lba_range; /* To kernel */ struct scsi_sense_data custom_sense; /* To kernel */ uint64_t serial; /* From kernel */ STAILQ_ENTRY(ctl_error_desc) links; /* Kernel use only */ }; typedef enum { CTL_OOA_FLAG_NONE = 0x00, CTL_OOA_FLAG_ALL_LUNS = 0x01 } ctl_ooa_flags; typedef enum { CTL_OOA_OK, CTL_OOA_NEED_MORE_SPACE, CTL_OOA_ERROR } ctl_get_ooa_status; typedef enum { CTL_OOACMD_FLAG_NONE = 0x00, CTL_OOACMD_FLAG_DMA = 0x01, CTL_OOACMD_FLAG_BLOCKED = 0x02, CTL_OOACMD_FLAG_ABORT = 0x04, CTL_OOACMD_FLAG_RTR = 0x08, CTL_OOACMD_FLAG_DMA_QUEUED = 0x10 } ctl_ooa_cmd_flags; struct ctl_ooa_entry { ctl_ooa_cmd_flags cmd_flags; uint8_t cdb[CTL_MAX_CDBLEN]; uint8_t cdb_len; uint32_t tag_num; uint32_t lun_num; struct bintime start_bt; }; struct ctl_ooa { ctl_ooa_flags flags; /* passed to kernel */ uint64_t lun_num; /* passed to kernel */ uint32_t alloc_len; /* passed to kernel */ uint32_t alloc_num; /* passed to kernel */ struct ctl_ooa_entry *entries; /* filled in kernel */ uint32_t fill_len; /* passed to userland */ uint32_t fill_num; /* passed to userland */ uint32_t dropped_num; /* passed to userland */ struct bintime cur_bt; /* passed to userland */ ctl_get_ooa_status status; /* passed to userland */ }; typedef enum { CTL_LUN_NOSTATUS, CTL_LUN_OK, CTL_LUN_ERROR, CTL_LUN_WARNING } ctl_lun_status; #define CTL_ERROR_STR_LEN 160 #define CTL_BEARG_RD 0x01 #define CTL_BEARG_WR 0x02 #define CTL_BEARG_RW (CTL_BEARG_RD|CTL_BEARG_WR) #define CTL_BEARG_ASCII 0x04 /* * Backend Argument: * * namelen: Length of the name field, including the terminating NUL. * * name: Name of the parameter. This must be NUL-terminated. * * flags: Flags for the parameter, see above for values. * * vallen: Length of the value in bytes, including the terminating NUL. * * value: Value to be set/fetched. This must be NUL-terminated. * * kname: For kernel use only. * * kvalue: For kernel use only. */ struct ctl_be_arg { unsigned int namelen; char *name; int flags; unsigned int vallen; void *value; char *kname; void *kvalue; }; typedef enum { CTL_LUNREQ_CREATE, CTL_LUNREQ_RM, CTL_LUNREQ_MODIFY, } ctl_lunreq_type; /* * The ID_REQ flag is used to say that the caller has requested a * particular LUN ID in the req_lun_id field. If we cannot allocate that * LUN ID, the ctl_add_lun() call will fail. * * The STOPPED flag tells us that the LUN should default to the powered * off state. It will return 0x04,0x02 until it is powered up. ("Logical * unit not ready, initializing command required.") * * The NO_MEDIA flag tells us that the LUN has no media inserted. * * The PRIMARY flag tells us that this LUN is registered as a Primary LUN * which is accessible via the Master shelf controller in an HA. This flag * being set indicates a Primary LUN. This flag being reset represents a * Secondary LUN controlled by the Secondary controller in an HA * configuration. Flag is applicable at this time to T_DIRECT types. * * The SERIAL_NUM flag tells us that the serial_num field is filled in and * valid for use in SCSI INQUIRY VPD page 0x80. * * The DEVID flag tells us that the device_id field is filled in and * valid for use in SCSI INQUIRY VPD page 0x83. * * The DEV_TYPE flag tells us that the device_type field is filled in. * * The EJECTED flag tells us that the removable LUN has tray open. * * The UNMAP flag tells us that this LUN supports UNMAP. * * The OFFLINE flag tells us that this LUN can not access backing store. */ typedef enum { CTL_LUN_FLAG_ID_REQ = 0x01, CTL_LUN_FLAG_STOPPED = 0x02, CTL_LUN_FLAG_NO_MEDIA = 0x04, CTL_LUN_FLAG_PRIMARY = 0x08, CTL_LUN_FLAG_SERIAL_NUM = 0x10, CTL_LUN_FLAG_DEVID = 0x20, CTL_LUN_FLAG_DEV_TYPE = 0x40, CTL_LUN_FLAG_UNMAP = 0x80, CTL_LUN_FLAG_EJECTED = 0x100, CTL_LUN_FLAG_READONLY = 0x200 } ctl_backend_lun_flags; /* * LUN creation parameters: * * flags: Various LUN flags, see above. * * device_type: The SCSI device type. e.g. 0 for Direct Access, * 3 for Processor, etc. Only certain backends may * support setting this field. The CTL_LUN_FLAG_DEV_TYPE * flag should be set in the flags field if the device * type is set. * * lun_size_bytes: The size of the LUN in bytes. For some backends * this is relevant (e.g. ramdisk), for others, it may * be ignored in favor of using the properties of the * backing store. If specified, this should be a * multiple of the blocksize. * * The actual size of the LUN is returned in this * field. * * blocksize_bytes: The LUN blocksize in bytes. For some backends this * is relevant, for others it may be ignored in * favor of using the properties of the backing store. * * The actual blocksize of the LUN is returned in this * field. * * req_lun_id: The requested LUN ID. The CTL_LUN_FLAG_ID_REQ flag * should be set if this is set. The request will be * granted if the LUN number is available, otherwise * the LUN addition request will fail. * * The allocated LUN number is returned in this field. * * serial_num: This is the value returned in SCSI INQUIRY VPD page * 0x80. If it is specified, the CTL_LUN_FLAG_SERIAL_NUM * flag should be set. * * The serial number value used is returned in this * field. * * device_id: This is the value returned in the T10 vendor ID * based DESIGNATOR field in the SCSI INQUIRY VPD page * 0x83 data. If it is specified, the CTL_LUN_FLAG_DEVID * flag should be set. * * The device id value used is returned in this field. */ struct ctl_lun_create_params { ctl_backend_lun_flags flags; uint8_t device_type; uint64_t lun_size_bytes; uint32_t blocksize_bytes; uint32_t req_lun_id; uint8_t serial_num[CTL_SN_LEN]; uint8_t device_id[CTL_DEVID_LEN]; }; /* * LUN removal parameters: * * lun_id: The number of the LUN to delete. This must be set. * The LUN must be backed by the given backend. */ struct ctl_lun_rm_params { uint32_t lun_id; }; /* * LUN modification parameters: * * lun_id: The number of the LUN to modify. This must be set. * The LUN must be backed by the given backend. * * lun_size_bytes: The size of the LUN in bytes. If zero, update * the size using the backing file size, if possible. */ struct ctl_lun_modify_params { uint32_t lun_id; uint64_t lun_size_bytes; }; /* * Union of request type data. Fill in the appropriate union member for * the request type. */ union ctl_lunreq_data { struct ctl_lun_create_params create; struct ctl_lun_rm_params rm; struct ctl_lun_modify_params modify; }; /* * LUN request interface: * * backend: This is required, and is NUL-terminated a string * that is the name of the backend, like "ramdisk" or * "block". * * reqtype: The type of request, CTL_LUNREQ_CREATE to create a * LUN, CTL_LUNREQ_RM to delete a LUN. * * reqdata: Request type-specific information. See the * description of individual the union members above * for more information. * * num_be_args: This is the number of backend-specific arguments * in the be_args array. * * be_args: This is an array of backend-specific arguments. * See above for a description of the fields in this * structure. * * status: Status of the LUN request. * * error_str: If the status is CTL_LUN_ERROR, this will * contain a string describing the error. * * kern_be_args: For kernel use only. */ struct ctl_lun_req { #define CTL_BE_NAME_LEN 32 char backend[CTL_BE_NAME_LEN]; ctl_lunreq_type reqtype; union ctl_lunreq_data reqdata; int num_be_args; struct ctl_be_arg *be_args; ctl_lun_status status; char error_str[CTL_ERROR_STR_LEN]; struct ctl_be_arg *kern_be_args; }; /* * LUN list status: * * NONE: No status. * * OK: Request completed successfully. * * NEED_MORE_SPACE: The allocated length of the entries field is too * small for the available data. * * ERROR: An error occurred, look at the error string for a * description of the error. */ typedef enum { CTL_LUN_LIST_NONE, CTL_LUN_LIST_OK, CTL_LUN_LIST_NEED_MORE_SPACE, CTL_LUN_LIST_ERROR } ctl_lun_list_status; /* * LUN list interface * * backend_name: This is a NUL-terminated string. If the string * length is 0, then all LUNs on all backends will * be enumerated. Otherwise this is the name of the * backend to be enumerated, like "ramdisk" or "block". * * alloc_len: The length of the data buffer allocated for entries. * In order to properly size the buffer, make one call * with alloc_len set to 0, and then use the returned * dropped_len as the buffer length to allocate and * pass in on a subsequent call. * * lun_xml: XML-formatted information on the requested LUNs. * * fill_len: The amount of data filled in the storage for entries. * * status: The status of the request. See above for the * description of the values of this field. * * error_str: If the status indicates an error, this string will * be filled in to describe the error. */ struct ctl_lun_list { char backend[CTL_BE_NAME_LEN]; /* passed to kernel*/ uint32_t alloc_len; /* passed to kernel */ char *lun_xml; /* filled in kernel */ uint32_t fill_len; /* passed to userland */ ctl_lun_list_status status; /* passed to userland */ char error_str[CTL_ERROR_STR_LEN]; /* passed to userland */ }; /* * Port request interface: * * driver: This is required, and is NUL-terminated a string * that is the name of the frontend, like "iscsi" . * * reqtype: The type of request, CTL_REQ_CREATE to create a * port, CTL_REQ_REMOVE to delete a port. * * num_be_args: This is the number of frontend-specific arguments * in the be_args array. * * be_args: This is an array of frontend-specific arguments. * See above for a description of the fields in this * structure. * * status: Status of the request. * * error_str: If the status is CTL_LUN_ERROR, this will * contain a string describing the error. * * kern_be_args: For kernel use only. */ typedef enum { CTL_REQ_CREATE, CTL_REQ_REMOVE, CTL_REQ_MODIFY, } ctl_req_type; struct ctl_req { char driver[CTL_DRIVER_NAME_LEN]; ctl_req_type reqtype; int num_args; struct ctl_be_arg *args; ctl_lun_status status; char error_str[CTL_ERROR_STR_LEN]; struct ctl_be_arg *kern_args; }; /* * iSCSI status * * OK: Request completed successfully. * * ERROR: An error occurred, look at the error string for a * description of the error. * * CTL_ISCSI_LIST_NEED_MORE_SPACE: * User has to pass larger buffer for CTL_ISCSI_LIST ioctl. */ typedef enum { CTL_ISCSI_OK, CTL_ISCSI_ERROR, CTL_ISCSI_LIST_NEED_MORE_SPACE, CTL_ISCSI_SESSION_NOT_FOUND } ctl_iscsi_status; typedef enum { CTL_ISCSI_HANDOFF, CTL_ISCSI_LIST, CTL_ISCSI_LOGOUT, CTL_ISCSI_TERMINATE, CTL_ISCSI_LIMITS, #if defined(ICL_KERNEL_PROXY) || 1 /* * We actually need those in all cases, but leave the ICL_KERNEL_PROXY, * to remember to remove them along with rest of proxy code, eventually. */ CTL_ISCSI_LISTEN, CTL_ISCSI_ACCEPT, CTL_ISCSI_SEND, CTL_ISCSI_RECEIVE, #endif } ctl_iscsi_type; typedef enum { CTL_ISCSI_DIGEST_NONE, CTL_ISCSI_DIGEST_CRC32C } ctl_iscsi_digest; #define CTL_ISCSI_NAME_LEN 224 /* 223 bytes, by RFC 3720, + '\0' */ #define CTL_ISCSI_ADDR_LEN 47 /* INET6_ADDRSTRLEN + '\0' */ #define CTL_ISCSI_ALIAS_LEN 128 /* Arbitrary. */ #define CTL_ISCSI_OFFLOAD_LEN 8 /* Arbitrary. */ struct ctl_iscsi_handoff_params { char initiator_name[CTL_ISCSI_NAME_LEN]; char initiator_addr[CTL_ISCSI_ADDR_LEN]; char initiator_alias[CTL_ISCSI_ALIAS_LEN]; uint8_t initiator_isid[6]; char target_name[CTL_ISCSI_NAME_LEN]; int socket; int portal_group_tag; /* * Connection parameters negotiated by ctld(8). */ ctl_iscsi_digest header_digest; ctl_iscsi_digest data_digest; uint32_t cmdsn; uint32_t statsn; int max_recv_data_segment_length; int max_burst_length; int first_burst_length; uint32_t immediate_data; char offload[CTL_ISCSI_OFFLOAD_LEN]; #ifdef ICL_KERNEL_PROXY int connection_id; #else int spare; #endif int max_send_data_segment_length; }; struct ctl_iscsi_list_params { uint32_t alloc_len; /* passed to kernel */ char *conn_xml; /* filled in kernel */ uint32_t fill_len; /* passed to userland */ int spare[4]; }; struct ctl_iscsi_logout_params { int connection_id; /* passed to kernel */ char initiator_name[CTL_ISCSI_NAME_LEN]; /* passed to kernel */ char initiator_addr[CTL_ISCSI_ADDR_LEN]; /* passed to kernel */ int all; /* passed to kernel */ int spare[4]; }; struct ctl_iscsi_terminate_params { int connection_id; /* passed to kernel */ char initiator_name[CTL_ISCSI_NAME_LEN]; /* passed to kernel */ char initiator_addr[CTL_ISCSI_NAME_LEN]; /* passed to kernel */ int all; /* passed to kernel */ int spare[4]; }; struct ctl_iscsi_limits_params { /* passed to kernel */ char offload[CTL_ISCSI_OFFLOAD_LEN]; /* passed to userland */ size_t spare; int max_recv_data_segment_length; int max_send_data_segment_length; int max_burst_length; int first_burst_length; }; #ifdef ICL_KERNEL_PROXY struct ctl_iscsi_listen_params { int iser; int domain; int socktype; int protocol; struct sockaddr *addr; socklen_t addrlen; int portal_id; int spare[4]; }; struct ctl_iscsi_accept_params { int connection_id; int portal_id; struct sockaddr *initiator_addr; socklen_t initiator_addrlen; int spare[4]; }; struct ctl_iscsi_send_params { int connection_id; void *bhs; size_t spare; void *spare2; size_t data_segment_len; void *data_segment; int spare3[4]; }; struct ctl_iscsi_receive_params { int connection_id; void *bhs; size_t spare; void *spare2; size_t data_segment_len; void *data_segment; int spare3[4]; }; #endif /* ICL_KERNEL_PROXY */ union ctl_iscsi_data { struct ctl_iscsi_handoff_params handoff; struct ctl_iscsi_list_params list; struct ctl_iscsi_logout_params logout; struct ctl_iscsi_terminate_params terminate; struct ctl_iscsi_limits_params limits; #ifdef ICL_KERNEL_PROXY struct ctl_iscsi_listen_params listen; struct ctl_iscsi_accept_params accept; struct ctl_iscsi_send_params send; struct ctl_iscsi_receive_params receive; #endif }; /* * iSCSI interface * * status: The status of the request. See above for the * description of the values of this field. * * error_str: If the status indicates an error, this string will * be filled in to describe the error. */ struct ctl_iscsi { ctl_iscsi_type type; /* passed to kernel */ union ctl_iscsi_data data; /* passed to kernel */ ctl_iscsi_status status; /* passed to userland */ char error_str[CTL_ERROR_STR_LEN]; /* passed to userland */ }; struct ctl_lun_map { uint32_t port; uint32_t plun; uint32_t lun; }; #define CTL_IO _IOWR(CTL_MINOR, 0x00, union ctl_io) #define CTL_ENABLE_PORT _IOW(CTL_MINOR, 0x04, struct ctl_port_entry) #define CTL_DISABLE_PORT _IOW(CTL_MINOR, 0x05, struct ctl_port_entry) #define CTL_DELAY_IO _IOWR(CTL_MINOR, 0x10, struct ctl_io_delay_info) #define CTL_GETSTATS _IOWR(CTL_MINOR, 0x15, struct ctl_stats) #define CTL_ERROR_INJECT _IOWR(CTL_MINOR, 0x16, struct ctl_error_desc) #define CTL_GET_OOA _IOWR(CTL_MINOR, 0x18, struct ctl_ooa) #define CTL_DUMP_STRUCTS _IO(CTL_MINOR, 0x19) #define CTL_LUN_REQ _IOWR(CTL_MINOR, 0x21, struct ctl_lun_req) #define CTL_LUN_LIST _IOWR(CTL_MINOR, 0x22, struct ctl_lun_list) #define CTL_ERROR_INJECT_DELETE _IOW(CTL_MINOR, 0x23, struct ctl_error_desc) #define CTL_SET_PORT_WWNS _IOW(CTL_MINOR, 0x24, struct ctl_port_entry) #define CTL_ISCSI _IOWR(CTL_MINOR, 0x25, struct ctl_iscsi) #define CTL_PORT_REQ _IOWR(CTL_MINOR, 0x26, struct ctl_req) #define CTL_PORT_LIST _IOWR(CTL_MINOR, 0x27, struct ctl_lun_list) #define CTL_LUN_MAP _IOW(CTL_MINOR, 0x28, struct ctl_lun_map) #define CTL_GET_LUN_STATS _IOWR(CTL_MINOR, 0x29, struct ctl_get_io_stats) #define CTL_GET_PORT_STATS _IOWR(CTL_MINOR, 0x2a, struct ctl_get_io_stats) #endif /* _CTL_IOCTL_H_ */ /* * vim: ts=8 */ Index: head/sys/cam/ctl/ctl_private.h =================================================================== --- head/sys/cam/ctl/ctl_private.h (revision 326264) +++ head/sys/cam/ctl/ctl_private.h (revision 326265) @@ -1,552 +1,554 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003, 2004, 2005, 2008 Silicon Graphics International Corp. * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_private.h#7 $ * $FreeBSD$ */ /* * CAM Target Layer driver private data structures/definitions. * * Author: Ken Merry */ #ifndef _CTL_PRIVATE_H_ #define _CTL_PRIVATE_H_ #include #include #include /* * SCSI vendor and product names. */ #define CTL_VENDOR "FREEBSD " #define CTL_DIRECT_PRODUCT "CTLDISK " #define CTL_PROCESSOR_PRODUCT "CTLPROCESSOR " #define CTL_CDROM_PRODUCT "CTLCDROM " #define CTL_UNKNOWN_PRODUCT "CTLDEVICE " #define CTL_POOL_ENTRIES_OTHER_SC 200 struct ctl_io_pool { char name[64]; uint32_t id; struct ctl_softc *ctl_softc; struct uma_zone *zone; }; typedef enum { CTL_SER_BLOCK, CTL_SER_BLOCKOPT, CTL_SER_EXTENT, CTL_SER_EXTENTOPT, CTL_SER_EXTENTSEQ, CTL_SER_PASS, CTL_SER_SKIP } ctl_serialize_action; typedef enum { CTL_ACTION_BLOCK, CTL_ACTION_OVERLAP, CTL_ACTION_OVERLAP_TAG, CTL_ACTION_PASS, CTL_ACTION_SKIP, CTL_ACTION_ERROR } ctl_action; /* * WARNING: Keep the bottom nibble here free, we OR in the data direction * flags for each command. * * Note: "OK_ON_NO_LUN" == we don't have to have a lun configured * "OK_ON_BOTH" == we have to have a lun configured * "SA5" == command has 5-bit service action at byte 1 */ typedef enum { CTL_CMD_FLAG_NONE = 0x0000, CTL_CMD_FLAG_NO_SENSE = 0x0010, CTL_CMD_FLAG_ALLOW_ON_RESV = 0x0020, CTL_CMD_FLAG_ALLOW_ON_PR_RESV = 0x0040, CTL_CMD_FLAG_ALLOW_ON_PR_WRESV = 0x0080, CTL_CMD_FLAG_OK_ON_PROC = 0x0100, CTL_CMD_FLAG_OK_ON_DIRECT = 0x0200, CTL_CMD_FLAG_OK_ON_CDROM = 0x0400, CTL_CMD_FLAG_OK_ON_BOTH = 0x0700, CTL_CMD_FLAG_OK_ON_NO_LUN = 0x0800, CTL_CMD_FLAG_OK_ON_NO_MEDIA = 0x1000, CTL_CMD_FLAG_OK_ON_STANDBY = 0x2000, CTL_CMD_FLAG_OK_ON_UNAVAIL = 0x4000, CTL_CMD_FLAG_SA5 = 0x8000, CTL_CMD_FLAG_RUN_HERE = 0x10000 } ctl_cmd_flags; typedef enum { CTL_SERIDX_TUR = 0, CTL_SERIDX_READ, CTL_SERIDX_WRITE, CTL_SERIDX_UNMAP, CTL_SERIDX_SYNC, CTL_SERIDX_MD_SNS, CTL_SERIDX_MD_SEL, CTL_SERIDX_RQ_SNS, CTL_SERIDX_INQ, CTL_SERIDX_RD_CAP, CTL_SERIDX_RES, CTL_SERIDX_LOG_SNS, CTL_SERIDX_FORMAT, CTL_SERIDX_START, /* TBD: others to be filled in as needed */ CTL_SERIDX_COUNT, /* LAST, not a normal code, provides # codes */ CTL_SERIDX_INVLD = CTL_SERIDX_COUNT } ctl_seridx; typedef int ctl_opfunc(struct ctl_scsiio *ctsio); struct ctl_cmd_entry { ctl_opfunc *execute; ctl_seridx seridx; ctl_cmd_flags flags; ctl_lun_error_pattern pattern; uint8_t length; /* CDB length */ uint8_t usage[15]; /* Mask of allowed CDB bits * after the opcode byte. */ }; typedef enum { CTL_LUN_NONE = 0x000, CTL_LUN_CONTROL = 0x001, CTL_LUN_RESERVED = 0x002, CTL_LUN_INVALID = 0x004, CTL_LUN_DISABLED = 0x008, CTL_LUN_MALLOCED = 0x010, CTL_LUN_STOPPED = 0x020, CTL_LUN_NO_MEDIA = 0x040, CTL_LUN_EJECTED = 0x080, CTL_LUN_PR_RESERVED = 0x100, CTL_LUN_PRIMARY_SC = 0x200, CTL_LUN_READONLY = 0x800, CTL_LUN_PEER_SC_PRIMARY = 0x1000, CTL_LUN_REMOVABLE = 0x2000 } ctl_lun_flags; typedef enum { CTLBLOCK_FLAG_NONE = 0x00, CTLBLOCK_FLAG_INVALID = 0x01 } ctlblock_flags; union ctl_softcs { struct ctl_softc *ctl_softc; struct ctlblock_softc *ctlblock_softc; }; /* * Mode page defaults. */ #if 0 /* * These values make Solaris trim off some of the capacity. */ #define CTL_DEFAULT_SECTORS_PER_TRACK 63 #define CTL_DEFAULT_HEADS 255 /* * These values seem to work okay. */ #define CTL_DEFAULT_SECTORS_PER_TRACK 63 #define CTL_DEFAULT_HEADS 16 /* * These values work reasonably well. */ #define CTL_DEFAULT_SECTORS_PER_TRACK 512 #define CTL_DEFAULT_HEADS 64 #endif /* * Solaris is somewhat picky about how many heads and sectors per track you * have defined in mode pages 3 and 4. These values seem to cause Solaris * to get the capacity more or less right when you run the format tool. * They still have problems when dealing with devices larger than 1TB, * but there isn't anything we can do about that. * * For smaller LUN sizes, this ends up causing the number of cylinders to * work out to 0. Solaris actually recognizes that and comes up with its * own bogus geometry to fit the actual capacity of the drive. They really * should just give up on geometry and stick to the read capacity * information alone for modern disk drives. * * One thing worth mentioning about Solaris' mkfs command is that it * doesn't like sectors per track values larger than 256. 512 seems to * work okay for format, but causes problems when you try to make a * filesystem. * * Another caveat about these values: the product of these two values * really should be a power of 2. This is because of the simplistic * shift-based calculation that we have to use on the i386 platform to * calculate the number of cylinders here. (If you use a divide, you end * up calling __udivdi3(), which is a hardware FP call on the PC. On the * XScale, it is done in software, so you can do that from inside the * kernel.) * * So for the current values (256 S/T, 128 H), we get 32768, which works * very nicely for calculating cylinders. * * If you want to change these values so that their product is no longer a * power of 2, re-visit the calculation in ctl_init_page_index(). You may * need to make it a bit more complicated to get the number of cylinders * right. */ #define CTL_DEFAULT_SECTORS_PER_TRACK 256 #define CTL_DEFAULT_HEADS 128 #define CTL_DEFAULT_ROTATION_RATE SVPD_NON_ROTATING struct ctl_page_index; typedef int ctl_modesen_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc); typedef int ctl_modesel_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr); typedef enum { CTL_PAGE_FLAG_NONE = 0x00, CTL_PAGE_FLAG_DIRECT = 0x01, CTL_PAGE_FLAG_PROC = 0x02, CTL_PAGE_FLAG_CDROM = 0x04, CTL_PAGE_FLAG_ALL = 0x07 } ctl_page_flags; struct ctl_page_index { uint8_t page_code; uint8_t subpage; uint16_t page_len; uint8_t *page_data; ctl_page_flags page_flags; ctl_modesen_handler *sense_handler; ctl_modesel_handler *select_handler; }; #define CTL_PAGE_CURRENT 0x00 #define CTL_PAGE_CHANGEABLE 0x01 #define CTL_PAGE_DEFAULT 0x02 #define CTL_PAGE_SAVED 0x03 #define CTL_NUM_LBP_PARAMS 4 #define CTL_NUM_LBP_THRESH 4 #define CTL_LBP_EXPONENT 11 /* 2048 sectors */ #define CTL_LBP_PERIOD 10 /* 10 seconds */ #define CTL_LBP_UA_PERIOD 300 /* 5 minutes */ struct ctl_logical_block_provisioning_page { struct scsi_logical_block_provisioning_page main; struct scsi_logical_block_provisioning_page_descr descr[CTL_NUM_LBP_THRESH]; }; static const struct ctl_page_index page_index_template[] = { {SMS_RW_ERROR_RECOVERY_PAGE, 0, sizeof(struct scsi_da_rw_recovery_page), NULL, CTL_PAGE_FLAG_DIRECT | CTL_PAGE_FLAG_CDROM, NULL, ctl_default_page_handler}, {SMS_FORMAT_DEVICE_PAGE, 0, sizeof(struct scsi_format_page), NULL, CTL_PAGE_FLAG_DIRECT, NULL, NULL}, {SMS_RIGID_DISK_PAGE, 0, sizeof(struct scsi_rigid_disk_page), NULL, CTL_PAGE_FLAG_DIRECT, NULL, NULL}, {SMS_VERIFY_ERROR_RECOVERY_PAGE, 0, sizeof(struct scsi_da_verify_recovery_page), NULL, CTL_PAGE_FLAG_DIRECT | CTL_PAGE_FLAG_CDROM, NULL, ctl_default_page_handler}, {SMS_CACHING_PAGE, 0, sizeof(struct scsi_caching_page), NULL, CTL_PAGE_FLAG_DIRECT | CTL_PAGE_FLAG_CDROM, NULL, ctl_default_page_handler}, {SMS_CONTROL_MODE_PAGE, 0, sizeof(struct scsi_control_page), NULL, CTL_PAGE_FLAG_ALL, NULL, ctl_default_page_handler}, {SMS_CONTROL_MODE_PAGE | SMPH_SPF, 0x01, sizeof(struct scsi_control_ext_page), NULL, CTL_PAGE_FLAG_ALL, NULL, ctl_default_page_handler}, {SMS_INFO_EXCEPTIONS_PAGE, 0, sizeof(struct scsi_info_exceptions_page), NULL, CTL_PAGE_FLAG_ALL, NULL, ctl_ie_page_handler}, {SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 0x02, sizeof(struct ctl_logical_block_provisioning_page), NULL, CTL_PAGE_FLAG_DIRECT, NULL, ctl_default_page_handler}, {SMS_CDDVD_CAPS_PAGE, 0, sizeof(struct scsi_cddvd_capabilities_page), NULL, CTL_PAGE_FLAG_CDROM, NULL, NULL}, }; #define CTL_NUM_MODE_PAGES sizeof(page_index_template)/ \ sizeof(page_index_template[0]) struct ctl_mode_pages { struct scsi_da_rw_recovery_page rw_er_page[4]; struct scsi_format_page format_page[4]; struct scsi_rigid_disk_page rigid_disk_page[4]; struct scsi_da_verify_recovery_page verify_er_page[4]; struct scsi_caching_page caching_page[4]; struct scsi_control_page control_page[4]; struct scsi_control_ext_page control_ext_page[4]; struct scsi_info_exceptions_page ie_page[4]; struct ctl_logical_block_provisioning_page lbp_page[4]; struct scsi_cddvd_capabilities_page cddvd_page[4]; struct ctl_page_index index[CTL_NUM_MODE_PAGES]; }; #define MODE_RWER mode_pages.rw_er_page[CTL_PAGE_CURRENT] #define MODE_FMT mode_pages.format_page[CTL_PAGE_CURRENT] #define MODE_RDISK mode_pages.rigid_disk_page[CTL_PAGE_CURRENT] #define MODE_VER mode_pages.verify_er_page[CTL_PAGE_CURRENT] #define MODE_CACHING mode_pages.caching_page[CTL_PAGE_CURRENT] #define MODE_CTRL mode_pages.control_page[CTL_PAGE_CURRENT] #define MODE_CTRLE mode_pages.control_ext_page[CTL_PAGE_CURRENT] #define MODE_IE mode_pages.ie_page[CTL_PAGE_CURRENT] #define MODE_LBP mode_pages.lbp_page[CTL_PAGE_CURRENT] #define MODE_CDDVD mode_pages.cddvd_page[CTL_PAGE_CURRENT] static const struct ctl_page_index log_page_index_template[] = { {SLS_SUPPORTED_PAGES_PAGE, 0, 0, NULL, CTL_PAGE_FLAG_ALL, NULL, NULL}, {SLS_SUPPORTED_PAGES_PAGE, SLS_SUPPORTED_SUBPAGES_SUBPAGE, 0, NULL, CTL_PAGE_FLAG_ALL, NULL, NULL}, {SLS_LOGICAL_BLOCK_PROVISIONING, 0, 0, NULL, CTL_PAGE_FLAG_DIRECT, ctl_lbp_log_sense_handler, NULL}, {SLS_STAT_AND_PERF, 0, 0, NULL, CTL_PAGE_FLAG_ALL, ctl_sap_log_sense_handler, NULL}, {SLS_IE_PAGE, 0, 0, NULL, CTL_PAGE_FLAG_ALL, ctl_ie_log_sense_handler, NULL}, }; #define CTL_NUM_LOG_PAGES sizeof(log_page_index_template)/ \ sizeof(log_page_index_template[0]) struct ctl_log_pages { uint8_t pages_page[CTL_NUM_LOG_PAGES]; uint8_t subpages_page[CTL_NUM_LOG_PAGES * 2]; uint8_t lbp_page[12*CTL_NUM_LBP_PARAMS]; struct stat_page { struct scsi_log_stat_and_perf sap; struct scsi_log_idle_time it; struct scsi_log_time_interval ti; } stat_page; struct scsi_log_informational_exceptions ie_page; struct ctl_page_index index[CTL_NUM_LOG_PAGES]; }; struct ctl_lun_delay_info { ctl_delay_type datamove_type; uint32_t datamove_delay; ctl_delay_type done_type; uint32_t done_delay; }; #define CTL_PR_ALL_REGISTRANTS 0xFFFFFFFF #define CTL_PR_NO_RESERVATION 0xFFFFFFF0 struct ctl_devid { int len; uint8_t data[]; }; #define NUM_HA_SHELVES 2 #define CTL_WRITE_BUFFER_SIZE 262144 struct tpc_list; struct ctl_lun { struct mtx lun_lock; uint64_t lun; ctl_lun_flags flags; STAILQ_HEAD(,ctl_error_desc) error_list; uint64_t error_serial; struct ctl_softc *ctl_softc; struct ctl_be_lun *be_lun; struct ctl_backend_driver *backend; struct ctl_lun_delay_info delay_info; #ifdef CTL_TIME_IO sbintime_t idle_time; sbintime_t last_busy; #endif TAILQ_HEAD(ctl_ooaq, ctl_io_hdr) ooa_queue; TAILQ_HEAD(ctl_blockq,ctl_io_hdr) blocked_queue; STAILQ_ENTRY(ctl_lun) links; struct scsi_sense_data **pending_sense; ctl_ua_type **pending_ua; uint8_t ua_tpt_info[8]; time_t lasttpt; uint8_t ie_asc; /* Informational exceptions */ uint8_t ie_ascq; int ie_reported; /* Already reported */ uint32_t ie_reportcnt; /* REPORT COUNT */ struct callout ie_callout; /* INTERVAL TIMER */ struct ctl_mode_pages mode_pages; struct ctl_log_pages log_pages; #ifdef CTL_LEGACY_STATS struct ctl_lun_io_stats legacy_stats; #endif /* CTL_LEGACY_STATS */ struct ctl_io_stats stats; uint32_t res_idx; uint32_t pr_generation; uint64_t **pr_keys; int pr_key_count; uint32_t pr_res_idx; uint8_t pr_res_type; int prevent_count; uint32_t *prevent; uint8_t *write_buffer; struct ctl_devid *lun_devid; TAILQ_HEAD(tpc_lists, tpc_list) tpc_lists; }; typedef enum { CTL_FLAG_ACTIVE_SHELF = 0x04 } ctl_gen_flags; #define CTL_MAX_THREADS 16 struct ctl_thread { struct mtx_padalign queue_lock; struct ctl_softc *ctl_softc; struct thread *thread; STAILQ_HEAD(, ctl_io_hdr) incoming_queue; STAILQ_HEAD(, ctl_io_hdr) rtr_queue; STAILQ_HEAD(, ctl_io_hdr) done_queue; STAILQ_HEAD(, ctl_io_hdr) isc_queue; }; struct tpc_token; struct ctl_softc { struct mtx ctl_lock; struct cdev *dev; int num_luns; ctl_gen_flags flags; ctl_ha_mode ha_mode; int ha_id; int is_single; ctl_ha_link_state ha_link; int port_min; int port_max; int port_cnt; int init_min; int init_max; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; void *othersc_pool; struct proc *ctl_proc; uint32_t *ctl_lun_mask; struct ctl_lun **ctl_luns; uint32_t *ctl_port_mask; STAILQ_HEAD(, ctl_lun) lun_list; STAILQ_HEAD(, ctl_be_lun) pending_lun_queue; uint32_t num_frontends; STAILQ_HEAD(, ctl_frontend) fe_list; uint32_t num_ports; STAILQ_HEAD(, ctl_port) port_list; struct ctl_port **ctl_ports; uint32_t num_backends; STAILQ_HEAD(, ctl_backend_driver) be_list; struct uma_zone *io_zone; uint32_t cur_pool_id; int shutdown; struct ctl_thread threads[CTL_MAX_THREADS]; struct thread *lun_thread; struct thread *thresh_thread; TAILQ_HEAD(tpc_tokens, tpc_token) tpc_tokens; struct callout tpc_timeout; struct mtx tpc_lock; }; #ifdef _KERNEL extern const struct ctl_cmd_entry ctl_cmd_table[256]; uint32_t ctl_get_initindex(struct ctl_nexus *nexus); int ctl_lun_map_init(struct ctl_port *port); int ctl_lun_map_deinit(struct ctl_port *port); int ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun); int ctl_lun_map_unset(struct ctl_port *port, uint32_t plun); uint32_t ctl_lun_map_from_port(struct ctl_port *port, uint32_t plun); uint32_t ctl_lun_map_to_port(struct ctl_port *port, uint32_t glun); int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, uint32_t total_ctl_io, void **npool); void ctl_pool_free(struct ctl_io_pool *pool); int ctl_scsi_release(struct ctl_scsiio *ctsio); int ctl_scsi_reserve(struct ctl_scsiio *ctsio); int ctl_start_stop(struct ctl_scsiio *ctsio); int ctl_prevent_allow(struct ctl_scsiio *ctsio); int ctl_sync_cache(struct ctl_scsiio *ctsio); int ctl_format(struct ctl_scsiio *ctsio); int ctl_read_buffer(struct ctl_scsiio *ctsio); int ctl_write_buffer(struct ctl_scsiio *ctsio); int ctl_write_same(struct ctl_scsiio *ctsio); int ctl_unmap(struct ctl_scsiio *ctsio); int ctl_mode_select(struct ctl_scsiio *ctsio); int ctl_mode_sense(struct ctl_scsiio *ctsio); int ctl_log_sense(struct ctl_scsiio *ctsio); int ctl_read_capacity(struct ctl_scsiio *ctsio); int ctl_read_capacity_16(struct ctl_scsiio *ctsio); int ctl_read_defect(struct ctl_scsiio *ctsio); int ctl_read_toc(struct ctl_scsiio *ctsio); int ctl_read_write(struct ctl_scsiio *ctsio); int ctl_cnw(struct ctl_scsiio *ctsio); int ctl_report_luns(struct ctl_scsiio *ctsio); int ctl_request_sense(struct ctl_scsiio *ctsio); int ctl_tur(struct ctl_scsiio *ctsio); int ctl_verify(struct ctl_scsiio *ctsio); int ctl_inquiry(struct ctl_scsiio *ctsio); int ctl_get_config(struct ctl_scsiio *ctsio); int ctl_get_event_status(struct ctl_scsiio *ctsio); int ctl_mechanism_status(struct ctl_scsiio *ctsio); int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio); int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio); int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio); int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio); int ctl_report_supported_tmf(struct ctl_scsiio *ctsio); int ctl_report_timestamp(struct ctl_scsiio *ctsio); int ctl_get_lba_status(struct ctl_scsiio *ctsio); void ctl_tpc_init(struct ctl_softc *softc); void ctl_tpc_shutdown(struct ctl_softc *softc); void ctl_tpc_lun_init(struct ctl_lun *lun); void ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx); void ctl_tpc_lun_shutdown(struct ctl_lun *lun); int ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len); int ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio); int ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio); int ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio); int ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio); int ctl_extended_copy_lid1(struct ctl_scsiio *ctsio); int ctl_extended_copy_lid4(struct ctl_scsiio *ctsio); int ctl_copy_operation_abort(struct ctl_scsiio *ctsio); int ctl_populate_token(struct ctl_scsiio *ctsio); int ctl_write_using_token(struct ctl_scsiio *ctsio); int ctl_receive_rod_token_information(struct ctl_scsiio *ctsio); int ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio); #endif /* _KERNEL */ #endif /* _CTL_PRIVATE_H_ */ /* * vim: ts=8 */ Index: head/sys/cam/ctl/ctl_scsi_all.c =================================================================== --- head/sys/cam/ctl/ctl_scsi_all.c (revision 326264) +++ head/sys/cam/ctl/ctl_scsi_all.c (revision 326265) @@ -1,205 +1,207 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Implementation of Utility functions for all SCSI device types. * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 2003 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_scsi_all.c#2 $ */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #else #include #include #include #include #include #endif #include #include #include #include #include #include #include #include #ifndef _KERNEL #include #endif const char * ctl_scsi_status_string(struct ctl_scsiio *ctsio) { switch(ctsio->scsi_status) { case SCSI_STATUS_OK: return("OK"); case SCSI_STATUS_CHECK_COND: return("Check Condition"); case SCSI_STATUS_BUSY: return("Busy"); case SCSI_STATUS_INTERMED: return("Intermediate"); case SCSI_STATUS_INTERMED_COND_MET: return("Intermediate-Condition Met"); case SCSI_STATUS_RESERV_CONFLICT: return("Reservation Conflict"); case SCSI_STATUS_CMD_TERMINATED: return("Command Terminated"); case SCSI_STATUS_QUEUE_FULL: return("Queue Full"); case SCSI_STATUS_ACA_ACTIVE: return("ACA Active"); case SCSI_STATUS_TASK_ABORTED: return("Task Aborted"); default: { static char unkstr[64]; snprintf(unkstr, sizeof(unkstr), "Unknown %#x", ctsio->scsi_status); return(unkstr); } } } /* * scsi_command_string() returns 0 for success and -1 for failure. */ int ctl_scsi_command_string(struct ctl_scsiio *ctsio, struct scsi_inquiry_data *inq_data, struct sbuf *sb) { char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; sbuf_printf(sb, "%s. CDB: %s", scsi_op_desc(ctsio->cdb[0], inq_data), scsi_cdb_string(ctsio->cdb, cdb_str, sizeof(cdb_str))); return(0); } void ctl_scsi_path_string(union ctl_io *io, char *path_str, int len) { snprintf(path_str, len, "(%u:%u:%u/%u): ", io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, io->io_hdr.nexus.targ_mapped_lun); } /* * ctl_scsi_sense_sbuf() returns 0 for success and -1 for failure. */ int ctl_scsi_sense_sbuf(struct ctl_scsiio *ctsio, struct scsi_inquiry_data *inq_data, struct sbuf *sb, scsi_sense_string_flags flags) { char path_str[64]; if ((ctsio == NULL) || (sb == NULL)) return(-1); ctl_scsi_path_string((union ctl_io *)ctsio, path_str, sizeof(path_str)); if (flags & SSS_FLAG_PRINT_COMMAND) { sbuf_cat(sb, path_str); ctl_scsi_command_string(ctsio, inq_data, sb); sbuf_printf(sb, "\n"); } scsi_sense_only_sbuf(&ctsio->sense_data, ctsio->sense_len, sb, path_str, inq_data, ctsio->cdb, ctsio->cdb_len); return(0); } char * ctl_scsi_sense_string(struct ctl_scsiio *ctsio, struct scsi_inquiry_data *inq_data, char *str, int str_len) { struct sbuf sb; sbuf_new(&sb, str, str_len, 0); ctl_scsi_sense_sbuf(ctsio, inq_data, &sb, SSS_FLAG_PRINT_COMMAND); sbuf_finish(&sb); return(sbuf_data(&sb)); } #ifdef _KERNEL void ctl_scsi_sense_print(struct ctl_scsiio *ctsio, struct scsi_inquiry_data *inq_data) { struct sbuf sb; char str[512]; sbuf_new(&sb, str, sizeof(str), 0); ctl_scsi_sense_sbuf(ctsio, inq_data, &sb, SSS_FLAG_PRINT_COMMAND); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #else /* _KERNEL */ void ctl_scsi_sense_print(struct ctl_scsiio *ctsio, struct scsi_inquiry_data *inq_data, FILE *ofile) { struct sbuf sb; char str[512]; if ((ctsio == NULL) || (ofile == NULL)) return; sbuf_new(&sb, str, sizeof(str), 0); ctl_scsi_sense_sbuf(ctsio, inq_data, &sb, SSS_FLAG_PRINT_COMMAND); sbuf_finish(&sb); fprintf(ofile, "%s", sbuf_data(&sb)); } #endif /* _KERNEL */ Index: head/sys/cam/ctl/ctl_scsi_all.h =================================================================== --- head/sys/cam/ctl/ctl_scsi_all.h (revision 326264) +++ head/sys/cam/ctl/ctl_scsi_all.h (revision 326265) @@ -1,52 +1,54 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 2003 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_scsi_all.h#2 $ */ __FBSDID("$FreeBSD$"); __BEGIN_DECLS const char * ctl_scsi_status_string(struct ctl_scsiio *ctsio); #ifdef _KERNEL void ctl_scsi_sense_print(struct ctl_scsiio *ctsio, struct scsi_inquiry_data *inq_data); #else /* _KERNEL */ void ctl_scsi_sense_print(struct ctl_scsiio *ctsio, struct scsi_inquiry_data *inq_data, FILE *ofile); #endif /* _KERNEL */ int ctl_scsi_command_string(struct ctl_scsiio *ctsio, struct scsi_inquiry_data *inq_data,struct sbuf *sb); int ctl_scsi_sense_sbuf(struct ctl_scsiio *ctsio, struct scsi_inquiry_data *inq_data, struct sbuf *sb, scsi_sense_string_flags flags); void ctl_scsi_path_string(union ctl_io *io, char *path_str, int strlen); char *ctl_scsi_sense_string(struct ctl_scsiio *ctsio, struct scsi_inquiry_data *inq_data, char *str, int str_len); __END_DECLS Index: head/sys/cam/ctl/ctl_ser_table.c =================================================================== --- head/sys/cam/ctl/ctl_ser_table.c (revision 326264) +++ head/sys/cam/ctl/ctl_ser_table.c (revision 326265) @@ -1,82 +1,84 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_ser_table.c#1 $ * $FreeBSD$ */ /* * CAM Target Layer command serialization table. * * Author: Kim Le */ /****************************************************************************/ /* TABLE ctlSerTbl */ /* */ /* The matrix which drives the serialization algorithm. The major index */ /* (the first) into this table is the command being checked and the minor */ /* index is the command against which the first command is being checked. */ /* i.e., the major index (row) command is ahead of the minor index command */ /* (column) in the queue. This allows the code to optimize by capturing */ /* the result of the first indexing operation into a pointer. */ /* */ /* Whenever a new value is added to the IDX_T type, this matrix must be */ /* expanded by one row AND one column -- Because of this, some effort */ /* should be made to re-use the indexes whenever possible. */ /* */ /****************************************************************************/ #define sK CTL_SER_SKIP /* Skip */ #define pS CTL_SER_PASS /* Pass */ #define bK CTL_SER_BLOCK /* Blocked */ #define bO CTL_SER_BLOCKOPT /* Optional block */ #define xT CTL_SER_EXTENT /* Extent check */ #define xO CTL_SER_EXTENTOPT /* Optional extent check */ #define xS CTL_SER_EXTENTSEQ /* Sequential extent check */ const static ctl_serialize_action ctl_serialize_table[CTL_SERIDX_COUNT][CTL_SERIDX_COUNT] = { /**>IDX_ :: 2nd:TUR RD WRT UNM SYN MDSN MDSL RQSN INQ RDCP RES LSNS FMT STR*/ /*TUR */{ pS, pS, pS, pS, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK}, /*READ */{ pS, xS, xT, bO, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK}, /*WRITE */{ pS, xT, xT, bO, bO, bK, bK, bK, pS, pS, bK, pS, bK, bK}, /*UNMAP */{ pS, xO, xO, pS, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK}, /*SYNC */{ pS, pS, pS, pS, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK}, /*MD_SNS */{ bK, bK, bK, bK, bK, pS, bK, bK, pS, pS, bK, pS, bK, bK}, /*MD_SEL */{ bK, bK, bK, bK, bK, bK, bK, bK, pS, pS, bK, pS, bK, bK}, /*RQ_SNS */{ pS, pS, pS, pS, pS, pS, pS, bK, pS, pS, bK, pS, bK, bK}, /*INQ */{ pS, pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, bK}, /*RD_CAP */{ pS, pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, pS}, /*RES */{ bK, bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK}, /*LOG_SNS */{ pS, pS, pS, pS, pS, pS, bK, bK, pS, pS, bK, pS, bK, bK}, /*FORMAT */{ pS, bK, bK, bK, bK, bK, bK, pS, pS, bK, bK, bK, bK, bK}, /*START */{ bK, bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK}, }; Index: head/sys/cam/ctl/ctl_util.c =================================================================== --- head/sys/cam/ctl/ctl_util.c (revision 326264) +++ head/sys/cam/ctl/ctl_util.c (revision 326265) @@ -1,897 +1,899 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_util.c#2 $ */ /* * CAM Target Layer SCSI library * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #ifdef _KERNEL #include #include #include #include #include #else /* __KERNEL__ */ #include #include #include #include #include #include #endif /* __KERNEL__ */ #include #include #include #include #include #include #include struct ctl_status_desc { ctl_io_status status; const char *description; }; struct ctl_task_desc { ctl_task_type task_action; const char *description; }; static struct ctl_status_desc ctl_status_table[] = { {CTL_STATUS_NONE, "No Status"}, {CTL_SUCCESS, "Command Completed Successfully"}, {CTL_CMD_TIMEOUT, "Command Timed Out"}, {CTL_SEL_TIMEOUT, "Selection Timeout"}, {CTL_ERROR, "Command Failed"}, {CTL_SCSI_ERROR, "SCSI Error"}, {CTL_CMD_ABORTED, "Command Aborted"}, }; static struct ctl_task_desc ctl_task_table[] = { {CTL_TASK_ABORT_TASK, "Abort Task"}, {CTL_TASK_ABORT_TASK_SET, "Abort Task Set"}, {CTL_TASK_CLEAR_ACA, "Clear ACA"}, {CTL_TASK_CLEAR_TASK_SET, "Clear Task Set"}, {CTL_TASK_I_T_NEXUS_RESET, "I_T Nexus Reset"}, {CTL_TASK_LUN_RESET, "LUN Reset"}, {CTL_TASK_TARGET_RESET, "Target Reset"}, {CTL_TASK_BUS_RESET, "Bus Reset"}, {CTL_TASK_PORT_LOGIN, "Port Login"}, {CTL_TASK_PORT_LOGOUT, "Port Logout"}, {CTL_TASK_QUERY_TASK, "Query Task"}, {CTL_TASK_QUERY_TASK_SET, "Query Task Set"}, {CTL_TASK_QUERY_ASYNC_EVENT, "Query Async Event"} }; void ctl_scsi_tur(union ctl_io *io, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_test_unit_ready *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_test_unit_ready *)ctsio->cdb; cdb->opcode = TEST_UNIT_READY; cdb->control = control; io->io_hdr.flags = CTL_FLAG_DATA_NONE; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_len = 0; ctsio->ext_data_ptr = NULL; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_inquiry(union ctl_io *io, uint8_t *data_ptr, int32_t data_len, uint8_t byte2, uint8_t page_code, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_inquiry *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_inquiry *)ctsio->cdb; cdb->opcode = INQUIRY; cdb->byte2 = byte2; cdb->page_code = page_code; cdb->control = control; scsi_ulto2b(data_len, cdb->length); io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_len = data_len; ctsio->ext_data_ptr = data_ptr; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_request_sense(union ctl_io *io, uint8_t *data_ptr, int32_t data_len, uint8_t byte2, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_request_sense *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_request_sense *)ctsio->cdb; cdb->opcode = REQUEST_SENSE; cdb->byte2 = byte2; cdb->control = control; cdb->length = data_len; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_report_luns(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t select_report, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_report_luns *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_report_luns *)ctsio->cdb; cdb->opcode = REPORT_LUNS; cdb->select_report = select_report; scsi_ulto4b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_read_write_buffer(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int read_buffer, uint8_t mode, uint8_t buffer_id, uint32_t buffer_offset, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_write_buffer *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; cdb = (struct scsi_write_buffer *)ctsio->cdb; if (read_buffer != 0) cdb->opcode = READ_BUFFER; else cdb->opcode = WRITE_BUFFER; cdb->byte2 = mode & RWB_MODE; cdb->buffer_id = buffer_id; scsi_ulto3b(buffer_offset, cdb->offset); scsi_ulto3b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; if (read_buffer != 0) io->io_hdr.flags = CTL_FLAG_DATA_IN; else io->io_hdr.flags = CTL_FLAG_DATA_OUT; ctsio->tag_type = tag_type; ctsio->cdb_len = sizeof(*cdb); ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_read_write(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int read_op, uint8_t byte2, int minimum_cdb_size, uint64_t lba, uint32_t num_blocks, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; /* * Pick out the smallest CDB that will hold the user's request. * minimum_cdb_size allows cranking the CDB size up, even for * requests that would not normally need a large CDB. This can be * useful for testing (e.g. to make sure READ_16 support works without * having an array larger than 2TB) and for compatibility -- e.g. * if your device doesn't support READ_6. (ATAPI drives don't.) */ if ((minimum_cdb_size < 10) && ((lba & 0x1fffff) == lba) && ((num_blocks & 0xff) == num_blocks) && (byte2 == 0)) { struct scsi_rw_6 *cdb; /* * Note that according to SBC-2, the target should return 256 * blocks if the transfer length in a READ(6) or WRITE(6) CDB * is set to 0. Since it's possible that some targets * won't do the right thing, we only send a READ(6) or * WRITE(6) for transfer sizes up to and including 255 blocks. */ cdb = (struct scsi_rw_6 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_6 : WRITE_6; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks & 0xff; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else if ((minimum_cdb_size < 12) && ((num_blocks & 0xffff) == num_blocks) && ((lba & 0xffffffff) == lba)) { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_10 : WRITE_10; cdb->byte2 = byte2; scsi_ulto4b(lba, cdb->addr); cdb->reserved = 0; scsi_ulto2b(num_blocks, cdb->length); cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else if ((minimum_cdb_size < 16) && ((num_blocks & 0xffffffff) == num_blocks) && ((lba & 0xffffffff) == lba)) { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_12 : WRITE_12; cdb->byte2 = byte2; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->reserved = 0; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } else { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; cdb->opcode = (read_op) ? READ_16 : WRITE_16; cdb->byte2 = byte2; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->reserved = 0; cdb->control = control; ctsio->cdb_len = sizeof(*cdb); } io->io_hdr.io_type = CTL_IO_SCSI; if (read_op != 0) io->io_hdr.flags = CTL_FLAG_DATA_IN; else io->io_hdr.flags = CTL_FLAG_DATA_OUT; ctsio->tag_type = tag_type; ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_write_same(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t byte2, uint64_t lba, uint32_t num_blocks, ctl_tag_type tag_type, uint8_t control) { struct ctl_scsiio *ctsio; struct scsi_write_same_16 *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; ctsio = &io->scsiio; ctsio->cdb_len = sizeof(*cdb); cdb = (struct scsi_write_same_16 *)ctsio->cdb; cdb->opcode = WRITE_SAME_16; cdb->byte2 = byte2; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); cdb->group = 0; cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_OUT; ctsio->tag_type = tag_type; ctsio->ext_data_ptr = data_ptr; ctsio->ext_data_len = data_len; ctsio->ext_sg_entries = 0; ctsio->ext_data_filled = 0; ctsio->sense_len = SSD_FULL_SIZE; } void ctl_scsi_read_capacity(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint32_t addr, int reladr, int pmi, ctl_tag_type tag_type, uint8_t control) { struct scsi_read_capacity *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; cdb = (struct scsi_read_capacity *)io->scsiio.cdb; cdb->opcode = READ_CAPACITY; if (reladr) cdb->byte2 = SRC_RELADR; if (pmi) cdb->pmi = SRC_PMI; scsi_ulto4b(addr, cdb->addr); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_read_capacity_16(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint64_t addr, int reladr, int pmi, ctl_tag_type tag_type, uint8_t control) { struct scsi_read_capacity_16 *cdb; ctl_scsi_zero_io(io); io->io_hdr.io_type = CTL_IO_SCSI; cdb = (struct scsi_read_capacity_16 *)io->scsiio.cdb; cdb->opcode = SERVICE_ACTION_IN; cdb->service_action = SRC16_SERVICE_ACTION; if (reladr) cdb->reladr |= SRC16_RELADR; if (pmi) cdb->reladr |= SRC16_PMI; scsi_u64to8b(addr, cdb->addr); scsi_ulto4b(data_len, cdb->alloc_len); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_mode_sense(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int dbd, int llbaa, uint8_t page_code, uint8_t pc, uint8_t subpage, int minimum_cdb_size, ctl_tag_type tag_type, uint8_t control) { ctl_scsi_zero_io(io); if ((minimum_cdb_size < 10) && (llbaa == 0) && (data_len < 256)) { struct scsi_mode_sense_6 *cdb; cdb = (struct scsi_mode_sense_6 *)io->scsiio.cdb; cdb->opcode = MODE_SENSE_6; if (dbd) cdb->byte2 |= SMS_DBD; cdb->page = page_code | pc; cdb->subpage = subpage; cdb->length = data_len; cdb->control = control; } else { struct scsi_mode_sense_10 *cdb; cdb = (struct scsi_mode_sense_10 *)io->scsiio.cdb; cdb->opcode = MODE_SENSE_10; if (dbd) cdb->byte2 |= SMS_DBD; if (llbaa) cdb->byte2 |= SMS10_LLBAA; cdb->page = page_code | pc; cdb->subpage = subpage; scsi_ulto2b(data_len, cdb->length); cdb->control = control; } io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_start_stop(union ctl_io *io, int start, int load_eject, int immediate, int power_conditions, ctl_tag_type tag_type, uint8_t control) { struct scsi_start_stop_unit *cdb; cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; ctl_scsi_zero_io(io); cdb->opcode = START_STOP_UNIT; if (immediate) cdb->byte2 |= SSS_IMMED; cdb->how = power_conditions; if (load_eject) cdb->how |= SSS_LOEJ; if (start) cdb->how |= SSS_START; cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_NONE; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = NULL; io->scsiio.ext_data_len = 0; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_sync_cache(union ctl_io *io, int immed, int reladr, int minimum_cdb_size, uint64_t starting_lba, uint32_t block_count, ctl_tag_type tag_type, uint8_t control) { ctl_scsi_zero_io(io); if ((minimum_cdb_size < 16) && ((block_count & 0xffff) == block_count) && ((starting_lba & 0xffffffff) == starting_lba)) { struct scsi_sync_cache *cdb; cdb = (struct scsi_sync_cache *)io->scsiio.cdb; cdb->opcode = SYNCHRONIZE_CACHE; if (reladr) cdb->byte2 |= SSC_RELADR; if (immed) cdb->byte2 |= SSC_IMMED; scsi_ulto4b(starting_lba, cdb->begin_lba); scsi_ulto2b(block_count, cdb->lb_count); cdb->control = control; } else { struct scsi_sync_cache_16 *cdb; cdb = (struct scsi_sync_cache_16 *)io->scsiio.cdb; cdb->opcode = SYNCHRONIZE_CACHE_16; if (reladr) cdb->byte2 |= SSC_RELADR; if (immed) cdb->byte2 |= SSC_IMMED; scsi_u64to8b(starting_lba, cdb->begin_lba); scsi_ulto4b(block_count, cdb->lb_count); cdb->control = control; } io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_NONE; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = NULL; io->scsiio.ext_data_len = 0; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_persistent_res_in(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int action, ctl_tag_type tag_type, uint8_t control) { struct scsi_per_res_in *cdb; ctl_scsi_zero_io(io); cdb = (struct scsi_per_res_in *)io->scsiio.cdb; cdb->opcode = PERSISTENT_RES_IN; cdb->action = action; scsi_ulto2b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_persistent_res_out(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int action, int type, uint64_t key, uint64_t sa_key, ctl_tag_type tag_type, uint8_t control) { struct scsi_per_res_out *cdb; struct scsi_per_res_out_parms *params; ctl_scsi_zero_io(io); cdb = (struct scsi_per_res_out *)io->scsiio.cdb; params = (struct scsi_per_res_out_parms *)data_ptr; cdb->opcode = PERSISTENT_RES_OUT; if (action == 5) cdb->action = 6; else cdb->action = action; switch(type) { case 0: cdb->scope_type = 1; break; case 1: cdb->scope_type = 3; break; case 2: cdb->scope_type = 5; break; case 3: cdb->scope_type = 6; break; case 4: cdb->scope_type = 7; break; case 5: cdb->scope_type = 8; break; } scsi_ulto4b(data_len, cdb->length); cdb->control = control; scsi_u64to8b(key, params->res_key.key); scsi_u64to8b(sa_key, params->serv_act_res_key); io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_OUT; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } void ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t action, ctl_tag_type tag_type, uint8_t control) { struct scsi_maintenance_in *cdb; ctl_scsi_zero_io(io); cdb = (struct scsi_maintenance_in *)io->scsiio.cdb; cdb->opcode = MAINTENANCE_IN; cdb->byte2 = action; scsi_ulto4b(data_len, cdb->length); cdb->control = control; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.flags = CTL_FLAG_DATA_IN; io->scsiio.tag_type = tag_type; io->scsiio.ext_data_ptr = data_ptr; io->scsiio.ext_data_len = data_len; io->scsiio.ext_sg_entries = 0; io->scsiio.ext_data_filled = 0; io->scsiio.sense_len = SSD_FULL_SIZE; } #ifndef _KERNEL union ctl_io * ctl_scsi_alloc_io(uint32_t initid) { union ctl_io *io; io = (union ctl_io *)malloc(sizeof(*io)); if (io == NULL) goto bailout; io->io_hdr.nexus.initid = initid; bailout: return (io); } void ctl_scsi_free_io(union ctl_io *io) { free(io); } void ctl_scsi_zero_io(union ctl_io *io) { void *pool_ref; if (io == NULL) return; pool_ref = io->io_hdr.pool; memset(io, 0, sizeof(*io)); io->io_hdr.pool = pool_ref; } #endif /* !_KERNEL */ const char * ctl_scsi_task_string(struct ctl_taskio *taskio) { unsigned int i; for (i = 0; i < (sizeof(ctl_task_table)/sizeof(ctl_task_table[0])); i++) { if (taskio->task_action == ctl_task_table[i].task_action) { return (ctl_task_table[i].description); } } return (NULL); } void ctl_io_sbuf(union ctl_io *io, struct sbuf *sb) { const char *task_desc; char path_str[64]; ctl_scsi_path_string(io, path_str, sizeof(path_str)); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: sbuf_cat(sb, path_str); ctl_scsi_command_string(&io->scsiio, NULL, sb); sbuf_printf(sb, " Tag: %#x/%d\n", io->scsiio.tag_num, io->scsiio.tag_type); break; case CTL_IO_TASK: sbuf_cat(sb, path_str); task_desc = ctl_scsi_task_string(&io->taskio); if (task_desc == NULL) sbuf_printf(sb, "Unknown Task Action %d (%#x)", io->taskio.task_action, io->taskio.task_action); else sbuf_printf(sb, "Task Action: %s", task_desc); switch (io->taskio.task_action) { case CTL_TASK_ABORT_TASK: sbuf_printf(sb, " Tag: %#x/%d\n", io->taskio.tag_num, io->taskio.tag_type); break; default: sbuf_printf(sb, "\n"); break; } break; default: break; } } void ctl_io_error_sbuf(union ctl_io *io, struct scsi_inquiry_data *inq_data, struct sbuf *sb) { struct ctl_status_desc *status_desc; char path_str[64]; unsigned int i; ctl_io_sbuf(io, sb); status_desc = NULL; for (i = 0; i < (sizeof(ctl_status_table)/sizeof(ctl_status_table[0])); i++) { if ((io->io_hdr.status & CTL_STATUS_MASK) == ctl_status_table[i].status) { status_desc = &ctl_status_table[i]; break; } } ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_cat(sb, path_str); if (status_desc == NULL) sbuf_printf(sb, "CTL Status: Unknown status %#x\n", io->io_hdr.status); else sbuf_printf(sb, "CTL Status: %s\n", status_desc->description); if ((io->io_hdr.io_type == CTL_IO_SCSI) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR)) { sbuf_cat(sb, path_str); sbuf_printf(sb, "SCSI Status: %s\n", ctl_scsi_status_string(&io->scsiio)); if (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND) ctl_scsi_sense_sbuf(&io->scsiio, inq_data, sb, SSS_FLAG_NONE); } } char * ctl_io_string(union ctl_io *io, char *str, int str_len) { struct sbuf sb; sbuf_new(&sb, str, str_len, SBUF_FIXEDLEN); ctl_io_sbuf(io, &sb); sbuf_finish(&sb); return (sbuf_data(&sb)); } char * ctl_io_error_string(union ctl_io *io, struct scsi_inquiry_data *inq_data, char *str, int str_len) { struct sbuf sb; sbuf_new(&sb, str, str_len, SBUF_FIXEDLEN); ctl_io_error_sbuf(io, inq_data, &sb); sbuf_finish(&sb); return (sbuf_data(&sb)); } #ifdef _KERNEL void ctl_io_print(union ctl_io *io) { char str[512]; printf("%s", ctl_io_string(io, str, sizeof(str))); } void ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data) { char str[512]; printf("%s", ctl_io_error_string(io, inq_data, str, sizeof(str))); } void ctl_data_print(union ctl_io *io) { char str[128]; char path_str[64]; struct sbuf sb; int i, j, len; if (io->io_hdr.io_type != CTL_IO_SCSI) return; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) return; if (io->scsiio.ext_sg_entries > 0) /* XXX: Implement */ return; ctl_scsi_path_string(io, path_str, sizeof(path_str)); len = min(io->scsiio.kern_data_len, 4096); for (i = 0; i < len; ) { sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); sbuf_printf(&sb, " %#6x:%04x:", io->scsiio.tag_num, i); for (j = 0; j < 16 && i < len; i++, j++) { if (j == 8) sbuf_cat(&sb, " "); sbuf_printf(&sb, " %02x", io->scsiio.kern_data_ptr[i]); } sbuf_cat(&sb, "\n"); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } } #else /* _KERNEL */ void ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data, FILE *ofile) { char str[512]; fprintf(ofile, "%s", ctl_io_error_string(io, inq_data, str, sizeof(str))); } #endif /* _KERNEL */ /* * vim: ts=8 */ Index: head/sys/cam/ctl/ctl_util.h =================================================================== --- head/sys/cam/ctl/ctl_util.h (revision 326264) +++ head/sys/cam/ctl/ctl_util.h (revision 326265) @@ -1,126 +1,128 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2003 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_util.h#2 $ * $FreeBSD$ */ /* * CAM Target Layer SCSI library interface * * Author: Ken Merry */ #ifndef _CTL_UTIL_H #define _CTL_UTIL_H 1 __BEGIN_DECLS void ctl_scsi_tur(union ctl_io *io, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_inquiry(union ctl_io *io, uint8_t *data_ptr, int32_t data_len, uint8_t byte2, uint8_t page_code, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_request_sense(union ctl_io *io, uint8_t *data_ptr, int32_t data_len, uint8_t byte2, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_report_luns(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t select_report, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_read_write_buffer(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int read_buffer, uint8_t mode, uint8_t buffer_id, uint32_t buffer_offset, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_read_write(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int read_op, uint8_t byte2, int minimum_cdb_size, uint64_t lba, uint32_t num_blocks, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_write_same(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t byte2, uint64_t lba, uint32_t num_blocks, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_read_capacity(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint32_t addr, int reladr, int pmi, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_read_capacity_16(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint64_t addr, int reladr, int pmi, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_mode_sense(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int dbd, int llbaa, uint8_t page_code, uint8_t pc, uint8_t subpage, int minimum_cdb_size, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_start_stop(union ctl_io *io, int start, int load_eject, int immediate, int power_conditions, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_sync_cache(union ctl_io *io, int immed, int reladr, int minimum_cdb_size, uint64_t starting_lba, uint32_t block_count, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_persistent_res_in(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int action, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_persistent_res_out(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, int action, int type, uint64_t key, uint64_t sa_key, ctl_tag_type tag_type, uint8_t control); void ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len, uint8_t action, ctl_tag_type tag_type, uint8_t control); #ifndef _KERNEL union ctl_io *ctl_scsi_alloc_io(uint32_t initid); void ctl_scsi_free_io(union ctl_io *io); void ctl_scsi_zero_io(union ctl_io *io); #else #define ctl_scsi_zero_io(io) ctl_zero_io(io) #endif /* !_KERNEL */ const char *ctl_scsi_task_string(struct ctl_taskio *taskio); void ctl_io_sbuf(union ctl_io *io, struct sbuf *sb); void ctl_io_error_sbuf(union ctl_io *io, struct scsi_inquiry_data *inq_data, struct sbuf *sb); char *ctl_io_string(union ctl_io *io, char *str, int str_len); char *ctl_io_error_string(union ctl_io *io, struct scsi_inquiry_data *inq_data, char *str, int str_len); #ifdef _KERNEL void ctl_io_print(union ctl_io *io); void ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data); void ctl_data_print(union ctl_io *io); #else /* _KERNEL */ void ctl_io_error_print(union ctl_io *io, struct scsi_inquiry_data *inq_data, FILE *ofile); #endif /* _KERNEL */ __END_DECLS #endif /* _CTL_UTIL_H */ /* * vim: ts=8 */ Index: head/sys/cam/ctl/scsi_ctl.c =================================================================== --- head/sys/cam/ctl/scsi_ctl.c (revision 326264) +++ head/sys/cam/ctl/scsi_ctl.c (revision 326265) @@ -1,1993 +1,1995 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2008, 2009 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ */ /* * Peripheral driver interface between CAM and CTL (CAM Target Layer). * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctlfe_softc { struct ctl_port port; path_id_t path_id; target_id_t target_id; uint32_t hba_misc; u_int maxio; struct cam_sim *sim; char port_name[DEV_IDLEN]; struct mtx lun_softc_mtx; STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; STAILQ_ENTRY(ctlfe_softc) links; }; STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; struct mtx ctlfe_list_mtx; static char ctlfe_mtx_desc[] = "ctlfelist"; typedef enum { CTLFE_LUN_NONE = 0x00, CTLFE_LUN_WILDCARD = 0x01 } ctlfe_lun_flags; struct ctlfe_lun_softc { struct ctlfe_softc *parent_softc; struct cam_periph *periph; ctlfe_lun_flags flags; int ctios_sent; /* Number of active CTIOs */ int refcount; /* Number of active xpt_action() */ int atios_alloced; /* Number of ATIOs not freed */ int inots_alloced; /* Number of INOTs not freed */ struct task refdrain_task; STAILQ_HEAD(, ccb_hdr) work_queue; LIST_HEAD(, ccb_hdr) atio_list; /* List of ATIOs queued to SIM. */ LIST_HEAD(, ccb_hdr) inot_list; /* List of INOTs queued to SIM. */ STAILQ_ENTRY(ctlfe_lun_softc) links; }; typedef enum { CTLFE_CMD_NONE = 0x00, CTLFE_CMD_PIECEWISE = 0x01 } ctlfe_cmd_flags; struct ctlfe_cmd_info { int cur_transfer_index; size_t cur_transfer_off; ctlfe_cmd_flags flags; /* * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 * bytes on amd64. So with 32 elements, this is 256 bytes on * i386 and 512 bytes on amd64. */ #define CTLFE_MAX_SEGS 32 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; }; /* * When we register the adapter/bus, request that this many ctl_ios be * allocated. This should be the maximum supported by the adapter, but we * currently don't have a way to get that back from the path inquiry. * XXX KDM add that to the path inquiry. */ #define CTLFE_REQ_CTL_IO 4096 /* * Number of Accept Target I/O CCBs to allocate and queue down to the * adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_ATIO_PER_LUN 1024 /* * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to * allocate and queue down to the adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_IN_PER_LUN 1024 /* * Timeout (in seconds) on CTIO CCB doing DMA or sending status */ #define CTLFE_TIMEOUT 5 /* * Turn this on to enable extra debugging prints. */ #if 0 #define CTLFE_DEBUG #endif MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); #define io_ptr ppriv_ptr0 /* This is only used in the CTIO */ #define ccb_atio ppriv_ptr1 #define PRIV_CCB(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0]) #define PRIV_INFO(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1]) static int ctlfeinitialize(void); static int ctlfeshutdown(void); static periph_init_t ctlfeperiphinit; static periph_deinit_t ctlfeperiphdeinit; static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static periph_ctor_t ctlferegister; static periph_oninv_t ctlfeoninvalidate; static periph_dtor_t ctlfecleanup; static periph_start_t ctlfestart; static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb); static void ctlfe_onoffline(void *arg, int online); static void ctlfe_online(void *arg); static void ctlfe_offline(void *arg); static int ctlfe_lun_enable(void *arg, int lun_id); static int ctlfe_lun_disable(void *arg, int lun_id); static void ctlfe_dump_sim(struct cam_sim *sim); static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); static void ctlfe_datamove(union ctl_io *io); static void ctlfe_done(union ctl_io *io); static void ctlfe_dump(void); static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb); static void ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock); static struct periph_driver ctlfe_driver = { ctlfeperiphinit, "ctl", TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, CAM_PERIPH_DRV_EARLY, ctlfeperiphdeinit }; static struct ctl_frontend ctlfe_frontend = { .name = "camtgt", .init = ctlfeinitialize, .fe_dump = ctlfe_dump, .shutdown = ctlfeshutdown, }; CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); static int ctlfeinitialize(void) { STAILQ_INIT(&ctlfe_softc_list); mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); periphdriver_register(&ctlfe_driver); return (0); } static int ctlfeshutdown(void) { int error; error = periphdriver_unregister(&ctlfe_driver); if (error != 0) return (error); mtx_destroy(&ctlfe_list_mtx); return (0); } static void ctlfeperiphinit(void) { cam_status status; status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | AC_CONTRACT, ctlfeasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ctl: Failed to attach async callback due to CAM " "status 0x%x!\n", status); } } static int ctlfeperiphdeinit(void) { /* XXX: It would be good to tear down active ports here. */ if (!TAILQ_EMPTY(&ctlfe_driver.units)) return (EBUSY); xpt_register_async(0, ctlfeasync, NULL, NULL); return (0); } static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct ctlfe_softc *softc; #ifdef CTLFEDEBUG printf("%s: entered\n", __func__); #endif mtx_lock(&ctlfe_list_mtx); STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { if (softc->path_id == xpt_path_path_id(path)) break; } mtx_unlock(&ctlfe_list_mtx); /* * When a new path gets registered, and it is capable of target * mode, go ahead and attach. Later on, we may need to be more * selective, but for now this will be sufficient. */ switch (code) { case AC_PATH_REGISTERED: { struct ctl_port *port; struct ccb_pathinq *cpi; int retval; cpi = (struct ccb_pathinq *)arg; /* Don't attach if it doesn't support target mode */ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { #ifdef CTLFEDEBUG printf("%s: SIM %s%d doesn't support target mode\n", __func__, cpi->dev_name, cpi->unit_number); #endif break; } if (softc != NULL) { #ifdef CTLFEDEBUG printf("%s: CTL port for CAM path %u already exists\n", __func__, xpt_path_path_id(path)); #endif break; } /* * We're in an interrupt context here, so we have to * use M_NOWAIT. Of course this means trouble if we * can't allocate memory. */ softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("%s: unable to malloc %zd bytes for softc\n", __func__, sizeof(*softc)); return; } softc->path_id = cpi->ccb_h.path_id; softc->target_id = cpi->initiator_id; softc->sim = xpt_path_sim(path); softc->hba_misc = cpi->hba_misc; if (cpi->maxio != 0) softc->maxio = cpi->maxio; else softc->maxio = DFLTPHYS; mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); STAILQ_INIT(&softc->lun_softc_list); port = &softc->port; port->frontend = &ctlfe_frontend; /* * XXX KDM should we be more accurate here ? */ if (cpi->transport == XPORT_FC) port->port_type = CTL_PORT_FC; else if (cpi->transport == XPORT_SAS) port->port_type = CTL_PORT_SAS; else port->port_type = CTL_PORT_SCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = CTLFE_REQ_CTL_IO; snprintf(softc->port_name, sizeof(softc->port_name), "%s%d", cpi->dev_name, cpi->unit_number); /* * XXX KDM it would be nice to allocate storage in the * frontend structure itself. */ port->port_name = softc->port_name; port->physical_port = cpi->bus_id; port->virtual_port = 0; port->port_online = ctlfe_online; port->port_offline = ctlfe_offline; port->onoff_arg = softc; port->lun_enable = ctlfe_lun_enable; port->lun_disable = ctlfe_lun_disable; port->targ_lun_arg = softc; port->fe_datamove = ctlfe_datamove; port->fe_done = ctlfe_done; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with " "error %d!\n", __func__, retval); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); break; } else { mtx_lock(&ctlfe_list_mtx); STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); mtx_unlock(&ctlfe_list_mtx); } break; } case AC_PATH_DEREGISTERED: { if (softc != NULL) { /* * XXX KDM are we certain at this point that there * are no outstanding commands for this frontend? */ mtx_lock(&ctlfe_list_mtx); STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, links); mtx_unlock(&ctlfe_list_mtx); ctl_port_deregister(&softc->port); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); } break; } case AC_CONTRACT: { struct ac_contract *ac; ac = (struct ac_contract *)arg; switch (ac->contract_number) { case AC_CONTRACT_DEV_CHG: { struct ac_device_changed *dev_chg; int retval; dev_chg = (struct ac_device_changed *)ac->contract_data; printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", __func__, dev_chg->wwpn, dev_chg->port, xpt_path_path_id(path), dev_chg->target, (dev_chg->arrived == 0) ? "left" : "arrived"); if (softc == NULL) { printf("%s: CTL port for CAM path %u not " "found!\n", __func__, xpt_path_path_id(path)); break; } if (dev_chg->arrived != 0) { retval = ctl_add_initiator(&softc->port, dev_chg->target, dev_chg->wwpn, NULL); } else { retval = ctl_remove_initiator(&softc->port, dev_chg->target); } if (retval < 0) { printf("%s: could not %s port %d iid %u " "WWPN %#jx!\n", __func__, (dev_chg->arrived != 0) ? "add" : "remove", softc->port.targ_port, dev_chg->target, (uintmax_t)dev_chg->wwpn); } break; } default: printf("%s: unsupported contract number %ju\n", __func__, (uintmax_t)ac->contract_number); break; } break; } default: break; } } static cam_status ctlferegister(struct cam_periph *periph, void *arg) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; union ccb ccb; cam_status status; int i; softc = (struct ctlfe_lun_softc *)arg; bus_softc = softc->parent_softc; STAILQ_INIT(&softc->work_queue); LIST_INIT(&softc->atio_list); LIST_INIT(&softc->inot_list); softc->periph = periph; periph->softc = softc; /* Increase device openings to maximum for the SIM. */ if (bus_softc->sim->max_tagged_dev_openings > bus_softc->sim->max_dev_openings) { cam_release_devq(periph->path, /*relsim_flags*/RELSIM_ADJUST_OPENINGS, /*openings*/bus_softc->sim->max_tagged_dev_openings, /*timeout*/0, /*getcount_only*/1); } xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); ccb.ccb_h.func_code = XPT_EN_LUN; ccb.cel.grp6_len = 0; ccb.cel.grp7_len = 0; ccb.cel.enable = 1; xpt_action(&ccb); status = (ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", __func__, ccb.ccb_h.status); return (status); } status = CAM_REQ_CMP; for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; struct ctlfe_cmd_info *cmd_info; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } cmd_info = malloc(sizeof(*cmd_info), M_CTLFE, M_ZERO | M_NOWAIT); if (cmd_info == NULL) { ctl_free_io(new_io); free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } PRIV_INFO(new_io) = cmd_info; softc->atios_alloced++; new_ccb->ccb_h.io_ptr = new_io; LIST_INSERT_HEAD(&softc->atio_list, &new_ccb->ccb_h, periph_links.le); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { free(cmd_info, M_CTLFE); ctl_free_io(new_io); free(new_ccb, M_CTLFE); break; } } status = cam_periph_acquire(periph); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: could not acquire reference " "count, status = %#x\n", __func__, status); return (status); } if (i == 0) { xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " "status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } for (i = 0; i < CTLFE_IN_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } softc->inots_alloced++; new_ccb->ccb_h.io_ptr = new_io; LIST_INSERT_HEAD(&softc->inot_list, &new_ccb->ccb_h, periph_links.le); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { /* * Note that we don't free the CCB here. If the * status is not CAM_REQ_INPROG, then we're * probably talking to a SIM that says it is * target-capable but doesn't support the * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the * older API. In that case, it'll call xpt_done() * on the CCB, and we need to free it in our done * routine as a result. */ break; } } if ((i == 0) || (status != CAM_REQ_INPROG)) { xpt_print(periph->path, "%s: could not allocate immediate " "notify CCBs, status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); return (CAM_REQ_CMP); } static void ctlfeoninvalidate(struct cam_periph *periph) { struct ctlfe_lun_softc *softc = (struct ctlfe_lun_softc *)periph->softc; struct ctlfe_softc *bus_softc; union ccb ccb; struct ccb_hdr *hdr; cam_status status; /* Abort all ATIOs and INOTs queued to SIM. */ xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); ccb.ccb_h.func_code = XPT_ABORT; LIST_FOREACH(hdr, &softc->atio_list, periph_links.le) { ccb.cab.abort_ccb = (union ccb *)hdr; xpt_action(&ccb); } LIST_FOREACH(hdr, &softc->inot_list, periph_links.le) { ccb.cab.abort_ccb = (union ccb *)hdr; xpt_action(&ccb); } /* Disable the LUN in SIM. */ ccb.ccb_h.func_code = XPT_EN_LUN; ccb.cel.grp6_len = 0; ccb.cel.grp7_len = 0; ccb.cel.enable = 0; xpt_action(&ccb); status = (ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", __func__, ccb.ccb_h.status); /* * XXX KDM what do we do now? */ } bus_softc = softc->parent_softc; mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); } static void ctlfecleanup(struct cam_periph *periph) { struct ctlfe_lun_softc *softc; softc = (struct ctlfe_lun_softc *)periph->softc; KASSERT(softc->ctios_sent == 0, ("%s: ctios_sent %d != 0", __func__, softc->ctios_sent)); KASSERT(softc->refcount == 0, ("%s: refcount %d != 0", __func__, softc->refcount)); KASSERT(softc->atios_alloced == 0, ("%s: atios_alloced %d != 0", __func__, softc->atios_alloced)); KASSERT(softc->inots_alloced == 0, ("%s: inots_alloced %d != 0", __func__, softc->inots_alloced)); free(softc, M_CTLFE); } static void ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, u_int16_t *sglist_cnt) { struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ctl_sg_entry *ctl_sglist; bus_dma_segment_t *cam_sglist; size_t off; int i, idx; cmd_info = PRIV_INFO(io); bus_softc = softc->parent_softc; /* * Set the direction, relative to the initiator. */ *flags &= ~CAM_DIR_MASK; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) *flags |= CAM_DIR_IN; else *flags |= CAM_DIR_OUT; *flags &= ~CAM_DATA_MASK; idx = cmd_info->cur_transfer_index; off = cmd_info->cur_transfer_off; cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */ /* One time shift for SRR offset. */ off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; *data_ptr = io->scsiio.kern_data_ptr + off; if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { *dxfer_len = io->scsiio.kern_data_len - off; } else { *dxfer_len = bus_softc->maxio; cmd_info->cur_transfer_off += bus_softc->maxio; cmd_info->flags |= CTLFE_CMD_PIECEWISE; } *sglist_cnt = 0; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_PADDR; else *flags |= CAM_DATA_VADDR; } else { /* S/G list with physical or virtual pointers. */ ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; /* One time shift for SRR offset. */ while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) { io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off; idx++; off = 0; } off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; cam_sglist = cmd_info->cam_sglist; *dxfer_len = 0; for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off; if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; *dxfer_len += cam_sglist[i].ds_len; } else { cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; cmd_info->cur_transfer_index = idx + i; cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; cmd_info->flags |= CTLFE_CMD_PIECEWISE; *dxfer_len += cam_sglist[i].ds_len; if (ctl_sglist[i].len != 0) i++; break; } if (i == (CTLFE_MAX_SEGS - 1) && idx + i < (io->scsiio.kern_sg_entries - 1)) { cmd_info->cur_transfer_index = idx + i + 1; cmd_info->cur_transfer_off = 0; cmd_info->flags |= CTLFE_CMD_PIECEWISE; i++; break; } off = 0; } *sglist_cnt = i; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_SG_PADDR; else *flags |= CAM_DATA_SG; *data_ptr = (uint8_t *)cam_sglist; } } static void ctlfestart(struct cam_periph *periph, union ccb *start_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_cmd_info *cmd_info; struct ccb_hdr *ccb_h; struct ccb_accept_tio *atio; struct ccb_scsiio *csio; uint8_t *data_ptr; uint32_t dxfer_len; ccb_flags flags; union ctl_io *io; uint8_t scsi_status; softc = (struct ctlfe_lun_softc *)periph->softc; next: /* Take the ATIO off the work queue */ ccb_h = STAILQ_FIRST(&softc->work_queue); if (ccb_h == NULL) { xpt_release_ccb(start_ccb); return; } STAILQ_REMOVE_HEAD(&softc->work_queue, periph_links.stqe); atio = (struct ccb_accept_tio *)ccb_h; io = (union ctl_io *)ccb_h->io_ptr; csio = &start_ccb->csio; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); cmd_info = PRIV_INFO(io); cmd_info->cur_transfer_index = 0; cmd_info->cur_transfer_off = 0; cmd_info->flags = 0; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { /* * Datamove call, we need to setup the S/G list. */ ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); } else { /* * We're done, send status back. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; /* Tell the SIM that we've aborted this ATIO */ #ifdef CTLFEDEBUG printf("%s: tag %04x abort\n", __func__, atio->tag_id); #endif KASSERT(atio->ccb_h.func_code == XPT_ACCEPT_TARGET_IO, ("func_code %#x is not ATIO", atio->ccb_h.func_code)); start_ccb->ccb_h.func_code = XPT_ABORT; start_ccb->cab.abort_ccb = (union ccb *)atio; xpt_action(start_ccb); ctlfe_requeue_ccb(periph, (union ccb *)atio, /* unlock */0); /* XPT_ABORT is not queued, so we can take next I/O. */ goto next; } data_ptr = NULL; dxfer_len = 0; csio->sglist_cnt = 0; } scsi_status = 0; if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || io->io_hdr.status == CTL_SUCCESS)) { flags |= CAM_SEND_STATUS; scsi_status = io->scsiio.scsi_status; csio->sense_len = io->scsiio.sense_len; #ifdef CTLFEDEBUG printf("%s: tag %04x status %x\n", __func__, atio->tag_id, io->io_hdr.status); #endif if (csio->sense_len != 0) { csio->sense_data = io->scsiio.sense_data; flags |= CAM_SEND_SENSE; } } #ifdef CTLFEDEBUG printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, (flags & CAM_SEND_STATUS) ? "done" : "datamove", atio->tag_id, flags, data_ptr, dxfer_len); #endif /* * Valid combinations: * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, * sglist_cnt != 0 */ #ifdef CTLFEDEBUG if (((flags & CAM_SEND_STATUS) && (((flags & CAM_DATA_SG) != 0) || (dxfer_len != 0) || (csio->sglist_cnt != 0))) || (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) || ((flags & CAM_DATA_SG) && (csio->sglist_cnt == 0)) || (((flags & CAM_DATA_SG) == 0) && (csio->sglist_cnt != 0))) { printf("%s: tag %04x cdb %02x flags %#x dxfer_len " "%d sg %u\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0], flags, dxfer_len, csio->sglist_cnt); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } #endif cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, scsi_status, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ CTLFE_TIMEOUT * 1000); start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.ccb_atio = atio; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); softc->ctios_sent++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* * If we still have work to do, ask for another CCB. */ if (!STAILQ_EMPTY(&softc->work_queue)) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } static void ctlfe_drain(void *context, int pending) { struct cam_periph *periph = context; struct ctlfe_lun_softc *softc = periph->softc; cam_periph_lock(periph); while (softc->refcount != 0) { cam_periph_sleep(periph, &softc->refcount, PRIBIO, "ctlfe_drain", 1); } cam_periph_unlock(periph); cam_periph_release(periph); } static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) { struct ctlfe_lun_softc *softc; union ctl_io *io; struct ctlfe_cmd_info *cmd_info; softc = (struct ctlfe_lun_softc *)periph->softc; io = ccb->ccb_h.io_ptr; switch (ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: softc->atios_alloced--; cmd_info = PRIV_INFO(io); free(cmd_info, M_CTLFE); break; case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: softc->inots_alloced--; break; default: break; } ctl_free_io(io); free(ccb, M_CTLFE); KASSERT(softc->atios_alloced >= 0, ("%s: atios_alloced %d < 0", __func__, softc->atios_alloced)); KASSERT(softc->inots_alloced >= 0, ("%s: inots_alloced %d < 0", __func__, softc->inots_alloced)); /* * If we have received all of our CCBs, we can release our * reference on the peripheral driver. It will probably go away * now. */ if (softc->atios_alloced == 0 && softc->inots_alloced == 0) { if (softc->refcount == 0) { cam_periph_release_locked(periph); } else { TASK_INIT(&softc->refdrain_task, 0, ctlfe_drain, periph); taskqueue_enqueue(taskqueue_thread, &softc->refdrain_task); } } } /* * Send the ATIO/INOT back to the SIM, or free it if periph was invalidated. */ static void ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock) { struct ctlfe_lun_softc *softc; struct mtx *mtx; if (periph->flags & CAM_PERIPH_INVALID) { mtx = cam_periph_mtx(periph); ctlfe_free_ccb(periph, ccb); if (unlock) mtx_unlock(mtx); return; } softc = (struct ctlfe_lun_softc *)periph->softc; if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) LIST_INSERT_HEAD(&softc->atio_list, &ccb->ccb_h, periph_links.le); else LIST_INSERT_HEAD(&softc->inot_list, &ccb->ccb_h, periph_links.le); if (unlock) cam_periph_unlock(periph); /* * For a wildcard attachment, commands can come in with a specific * target/lun. Reset the target and LUN fields back to the wildcard * values before we send them back down to the SIM. */ xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, CAM_PRIORITY_NONE, ccb->ccb_h.flags); xpt_action(ccb); } static int ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) { uint64_t lba; uint32_t num_blocks, nbc; uint8_t *cmdbyt = atio_cdb_ptr(atio); nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ switch (cmdbyt[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; lba = scsi_3btoul(cdb->addr); lba &= 0x1fffff; num_blocks = cdb->length; if (num_blocks == 0) num_blocks = 256; lba += nbc; num_blocks -= nbc; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto2b(num_blocks, cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } default: return -1; } return (0); } static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ccb_accept_tio *atio = NULL; union ctl_io *io = NULL; struct mtx *mtx; cam_status status; KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, ("CCB in ctlfedone() without CAM_UNLOCKED flag")); #ifdef CTLFE_DEBUG printf("%s: entered, func_code = %#x\n", __func__, done_ccb->ccb_h.func_code); #endif /* * At this point CTL has no known use case for device queue freezes. * In case some SIM think different -- drop its freeze right here. */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } softc = (struct ctlfe_lun_softc *)periph->softc; bus_softc = softc->parent_softc; mtx = cam_periph_mtx(periph); mtx_lock(mtx); switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); atio = &done_ccb->atio; status = atio->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_CDB_RECVD) { ctlfe_free_ccb(periph, done_ccb); goto out; } resubmit: /* * Allocate a ctl_io, pass it to CTL, and wait for the * datamove or done. */ mtx_unlock(mtx); io = done_ccb->ccb_h.io_ptr; cmd_info = PRIV_INFO(io); ctl_zero_io(io); /* Save pointers on both sides */ PRIV_CCB(io) = done_ccb; PRIV_INFO(io) = cmd_info; done_ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down the immediate notify path below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = atio->init_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; } io->scsiio.tag_num = atio->tag_id; switch (atio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, atio->tag_action); break; } if (atio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len); #ifdef CTLFEDEBUG printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, io->scsiio.tag_num, io->scsiio.cdb[0]); #endif ctl_queue(io); return; } case XPT_CONT_TARGET_IO: { int srr = 0; uint32_t srr_off = 0; atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; io = (union ctl_io *)atio->ccb_h.io_ptr; softc->ctios_sent--; #ifdef CTLFEDEBUG printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", __func__, atio->tag_id, done_ccb->ccb_h.flags); #endif /* * Handle SRR case were the data pointer is pushed back hack */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV && done_ccb->csio.msg_ptr != NULL && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED && done_ccb->csio.msg_ptr[1] == 5 && done_ccb->csio.msg_ptr[2] == 0) { srr = 1; srr_off = (done_ccb->csio.msg_ptr[3] << 24) | (done_ccb->csio.msg_ptr[4] << 16) | (done_ccb->csio.msg_ptr[5] << 8) | (done_ccb->csio.msg_ptr[6]); } /* * If we have an SRR and we're still sending data, we * should be able to adjust offsets and cycle again. * It is possible only if offset is from this datamove. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) && srr_off >= io->scsiio.kern_rel_offset && srr_off < io->scsiio.kern_rel_offset + io->scsiio.kern_data_len) { io->scsiio.kern_data_resid = io->scsiio.kern_rel_offset + io->scsiio.kern_data_len - srr_off; io->scsiio.ext_data_filled = srr_off; io->scsiio.io_hdr.status = CTL_STATUS_NONE; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; xpt_release_ccb(done_ccb); STAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } /* * If status was being sent, the back end data is now history. * Hack it up and resubmit a new command with the CDB adjusted. * If the SIM does the right thing, all of the resid math * should work. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { xpt_release_ccb(done_ccb); if (ctlfe_adjust_cdb(atio, srr_off) == 0) { done_ccb = (union ccb *)atio; goto resubmit; } /* * Fall through to doom.... */ } if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; /* * If we were sending status back to the initiator, free up * resources. If we were doing a datamove, call the * datamove done routine. */ if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { /* * If we asked to send sense data but it wasn't sent, * queue the I/O back to CTL for later REQUEST SENSE. */ if ((done_ccb->ccb_h.flags & CAM_SEND_SENSE) != 0 && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (done_ccb->ccb_h.status & CAM_SENT_SENSE) == 0 && (io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref)) != NULL) { PRIV_INFO(io) = PRIV_INFO( (union ctl_io *)atio->ccb_h.io_ptr); ctl_queue_sense(atio->ccb_h.io_ptr); atio->ccb_h.io_ptr = io; } /* Abort ATIO if CTIO sending status has failed. */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { done_ccb->ccb_h.func_code = XPT_ABORT; done_ccb->cab.abort_ccb = (union ccb *)atio; xpt_action(done_ccb); } xpt_release_ccb(done_ccb); ctlfe_requeue_ccb(periph, (union ccb *)atio, /* unlock */1); return; } else { struct ctlfe_cmd_info *cmd_info; struct ccb_scsiio *csio; csio = &done_ccb->csio; cmd_info = PRIV_INFO(io); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; /* * Translate CAM status to CTL status. Success * does not change the overall, ctl_io status. In * that case we just set port_status to 0. If we * have a failure, though, set a data phase error * for the overall ctl_io. */ switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_REQ_CMP: io->scsiio.kern_data_resid -= csio->dxfer_len - csio->resid; io->io_hdr.port_status = 0; break; default: /* * XXX KDM we probably need to figure out a * standard set of errors that the SIM * drivers should return in the event of a * data transfer failure. A data phase * error will at least point the user to a * data transfer error of some sort. * Hopefully the SIM printed out some * additional information to give the user * a clue what happened. */ io->io_hdr.port_status = 0xbad1; ctl_set_data_phase_error(&io->scsiio); /* * XXX KDM figure out residual. */ break; } /* * If we had to break this S/G list into multiple * pieces, figure out where we are in the list, and * continue sending pieces if necessary. */ if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) && io->io_hdr.port_status == 0 && csio->resid == 0) { ccb_flags flags; uint8_t *data_ptr; uint32_t dxfer_len; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT| CAM_TAG_ACTION_VALID); ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); if (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) { printf("%s: tag %04x no status or " "len cdb = %02x\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0]); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, CTLFE_TIMEOUT * 1000); csio->ccb_h.flags |= CAM_UNLOCKED; csio->resid = 0; csio->ccb_h.ccb_atio = atio; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; softc->ctios_sent++; mtx_unlock(mtx); xpt_action((union ccb *)csio); } else { /* * Release the CTIO. The ATIO will be sent back * down to the SIM once we send status. */ xpt_release_ccb(done_ccb); mtx_unlock(mtx); /* Call the backend move done callback */ io->scsiio.be_move_done(io); } return; } break; } case XPT_IMMEDIATE_NOTIFY: { union ctl_io *io; struct ccb_immediate_notify *inot; int send_ctl_io; LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); inot = &done_ccb->cin1; io = done_ccb->ccb_h.io_ptr; ctl_zero_io(io); send_ctl_io = 1; io->io_hdr.io_type = CTL_IO_TASK; PRIV_CCB(io) = done_ccb; inot->ccb_h.io_ptr = io; io->io_hdr.nexus.initid = inot->initiator_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; } /* XXX KDM should this be the tag_id? */ io->taskio.tag_num = inot->seq_id; status = inot->ccb_h.status & CAM_STATUS_MASK; switch (status) { case CAM_SCSI_BUS_RESET: io->taskio.task_action = CTL_TASK_BUS_RESET; break; case CAM_BDR_SENT: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case CAM_MESSAGE_RECV: switch (inot->arg) { case MSG_ABORT_TASK_SET: io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case MSG_TARGET_RESET: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case MSG_ABORT_TASK: io->taskio.task_action = CTL_TASK_ABORT_TASK; break; case MSG_LOGICAL_UNIT_RESET: io->taskio.task_action = CTL_TASK_LUN_RESET; break; case MSG_CLEAR_TASK_SET: io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case MSG_CLEAR_ACA: io->taskio.task_action = CTL_TASK_CLEAR_ACA; break; case MSG_QUERY_TASK: io->taskio.task_action = CTL_TASK_QUERY_TASK; break; case MSG_QUERY_TASK_SET: io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; break; case MSG_QUERY_ASYNC_EVENT: io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT; break; case MSG_NOOP: send_ctl_io = 0; break; default: xpt_print(periph->path, "%s: unsupported INOT message 0x%x\n", __func__, inot->arg); send_ctl_io = 0; break; } break; default: xpt_print(periph->path, "%s: unsupported INOT status 0x%x\n", __func__, status); /* FALLTHROUGH */ case CAM_REQ_ABORTED: case CAM_REQ_INVALID: case CAM_DEV_NOT_THERE: case CAM_PROVIDE_FAIL: ctlfe_free_ccb(periph, done_ccb); goto out; } if (send_ctl_io != 0) { ctl_queue(io); } else { done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; xpt_action(done_ccb); } break; } case XPT_NOTIFY_ACKNOWLEDGE: /* Queue this back down to the SIM as an immediate notify. */ done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; ctlfe_requeue_ccb(periph, done_ccb, /* unlock */1); return; case XPT_SET_SIM_KNOB: case XPT_GET_SIM_KNOB: case XPT_GET_SIM_KNOB_OLD: break; default: panic("%s: unexpected CCB type %#x", __func__, done_ccb->ccb_h.func_code); break; } out: mtx_unlock(mtx); } static void ctlfe_onoffline(void *arg, int online) { struct ctlfe_softc *bus_softc = arg; union ccb *ccb; cam_status status; struct cam_path *path; int set_wwnn = 0; status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path!\n", __func__); return; } ccb = xpt_alloc_ccb(); xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; xpt_action(ccb); /* Check whether we should change WWNs. */ if (online != 0) { if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ printf("%s: %s current WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s current WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); /* * If the user has specified a WWNN/WWPN, send them * down to the SIM. Otherwise, record what the SIM * has reported. */ if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn != ccb->knob.xport_specific.fc.wwnn) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, true, ccb->knob.xport_specific.fc.wwnn, false, 0); } if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn != ccb->knob.xport_specific.fc.wwpn) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, false, 0, true, ccb->knob.xport_specific.fc.wwpn); } } else { printf("%s: %s has no valid WWNN/WWPN\n", __func__, bus_softc->port_name); if (bus_softc->port.wwnn != 0) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } if (bus_softc->port.wwpn != 0) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } } } if (set_wwnn) { ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ADDRESS; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: %s (path id %d) failed set WWNs: %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, ccb->ccb_h.status); } else { printf("%s: %s new WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s new WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); } } /* Check whether we should change role. */ if ((ccb->knob.xport_specific.valid & KNOB_VALID_ROLE) == 0 || ((online != 0) ^ ((ccb->knob.xport_specific.fc.role & KNOB_ROLE_TARGET) != 0)) != 0) { ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; if (online) ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET; else ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: %s (path id %d) failed %s target role: %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, online ? "enable" : "disable", ccb->ccb_h.status); } else { printf("%s: %s (path id %d) target role %s succeeded\n", __func__, bus_softc->port_name, bus_softc->path_id, online ? "enable" : "disable"); } } xpt_free_path(path); xpt_free_ccb(ccb); } static void ctlfe_online(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct ctlfe_lun_softc *lun_softc; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; /* * Create the wildcard LUN before bringing the port online. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(lun_softc, M_CTLFE); return; } lun_softc->parent_softc = bus_softc; lun_softc->flags |= CTLFE_LUN_WILDCARD; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, lun_softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(lun_softc, M_CTLFE); } xpt_path_unlock(path); ctlfe_onoffline(arg, /*online*/ 1); xpt_free_path(path); } static void ctlfe_offline(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; ctlfe_onoffline(arg, /*online*/ 0); /* * Disable the wildcard LUN for this port now that we have taken * the port offline. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } xpt_path_lock(path); if ((periph = cam_periph_find(path, "ctl")) != NULL) cam_periph_invalidate(periph); xpt_path_unlock(path); xpt_free_path(path); } /* * This will get called to enable a LUN on every bus that is attached to * CTL. So we only need to create a path/periph for this particular bus. */ static int ctlfe_lun_enable(void *arg, int lun_id) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; struct cam_path *path; struct cam_periph *periph; cam_status status; bus_softc = (struct ctlfe_softc *)arg; if (bus_softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, bus_softc->target_id, lun_id); /* XXX KDM need some way to return status to CTL here? */ if (status != CAM_REQ_CMP) { printf("%s: could not create path, status %#x\n", __func__, status); return (1); } softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(softc, M_CTLFE); return (0); } softc->parent_softc = bus_softc; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(softc, M_CTLFE); } xpt_path_unlock(path); xpt_free_path(path); return (0); } /* * This will get called when the user removes a LUN to disable that LUN * on every bus that is attached to CTL. */ static int ctlfe_lun_disable(void *arg, int lun_id) { struct ctlfe_softc *softc; struct ctlfe_lun_softc *lun_softc; softc = (struct ctlfe_softc *)arg; if (softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); mtx_lock(&softc->lun_softc_mtx); STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { struct cam_path *path; path = lun_softc->periph->path; if ((xpt_path_target_id(path) == softc->target_id) && (xpt_path_lun_id(path) == lun_id)) { break; } } if (lun_softc == NULL) { mtx_unlock(&softc->lun_softc_mtx); printf("%s: can't find lun %d\n", __func__, lun_id); return (1); } cam_periph_acquire(lun_softc->periph); mtx_unlock(&softc->lun_softc_mtx); cam_periph_lock(lun_softc->periph); cam_periph_invalidate(lun_softc->periph); cam_periph_unlock(lun_softc->periph); cam_periph_release(lun_softc->periph); return (0); } static void ctlfe_dump_sim(struct cam_sim *sim) { printf("%s%d: max dev openings: %d, max tagged dev openings: %d\n", sim->sim_name, sim->unit_number, sim->max_dev_openings, sim->max_tagged_dev_openings); } /* * Assumes that the SIM lock is held. */ static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc) { struct cam_periph *periph = softc->periph; struct ccb_hdr *hdr; struct ccb_getdevstats cgds; int num_items; xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgds.ccb_h.func_code = XPT_GDEV_STATS; xpt_action((union ccb *)&cgds); if ((cgds.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { xpt_print(periph->path, "devq: openings %d, active %d, " "allocated %d, queued %d, held %d\n", cgds.dev_openings, cgds.dev_active, cgds.allocated, cgds.queued, cgds.held); } num_items = 0; STAILQ_FOREACH(hdr, &softc->work_queue, periph_links.stqe) { union ctl_io *io = hdr->io_ptr; num_items++; /* * Only regular SCSI I/O is put on the work * queue, so we can print sense here. There may be no * sense if it's no the queue for a DMA, but this serves to * print out the CCB as well. * * XXX KDM switch this over to scsi_sense_print() when * CTL is merged in with CAM. */ ctl_io_error_print(io, NULL); /* * Print DMA status if we are DMA_QUEUED. */ if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", io->scsiio.kern_total_len, io->scsiio.kern_data_len, io->scsiio.kern_data_resid); } } xpt_print(periph->path, "%d requests waiting for CCBs\n", num_items); xpt_print(periph->path, "%d CTIOs outstanding\n", softc->ctios_sent); } /* * Datamove/done routine called by CTL. Put ourselves on the queue to * receive a CCB from CAM so we can queue the continue I/O request down * to the adapter. */ static void ctlfe_datamove(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type)); io->scsiio.ext_data_filled = 0; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); } static void ctlfe_done(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; if (io->io_hdr.io_type == CTL_IO_TASK) { /* * Send the notify acknowledge down to the SIM, to let it * know we processed the task management command. */ ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; switch (io->taskio.task_status) { case CTL_TASK_FUNCTION_COMPLETE: ccb->cna2.arg = CAM_RSP_TMF_COMPLETE; break; case CTL_TASK_FUNCTION_SUCCEEDED: ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_REJECTED: ccb->cna2.arg = CAM_RSP_TMF_REJECTED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_LUN_DOES_NOT_EXIST: ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_NOT_SUPPORTED: ccb->cna2.arg = CAM_RSP_TMF_FAILED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; } ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8; xpt_action(ccb); } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { ctlfe_requeue_ccb(periph, ccb, /* unlock */1); return; } else { io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); } cam_periph_unlock(periph); } static void ctlfe_dump(void) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *lun_softc; STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { ctlfe_dump_sim(bus_softc->sim); STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) ctlfe_dump_queue(lun_softc); } } Index: head/sys/cam/scsi/scsi_all.c =================================================================== --- head/sys/cam/scsi/scsi_all.c (revision 326264) +++ head/sys/cam/scsi/scsi_all.c (revision 326265) @@ -1,9231 +1,9233 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Implementation of Utility functions for all SCSI device types. * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 2003 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #ifdef _KERNEL #include "opt_scsi.h" #include #include #include #include #include #include #include #include #else #include #include #include #include #include #endif #include #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #else #include #include #ifndef FALSE #define FALSE 0 #endif /* FALSE */ #ifndef TRUE #define TRUE 1 #endif /* TRUE */ #define ERESTART -1 /* restart syscall */ #define EJUSTRETURN -2 /* don't modify regs, just return */ #endif /* !_KERNEL */ /* * This is the default number of milliseconds we wait for devices to settle * after a SCSI bus reset. */ #ifndef SCSI_DELAY #define SCSI_DELAY 2000 #endif /* * All devices need _some_ sort of bus settle delay, so we'll set it to * a minimum value of 100ms. Note that this is pertinent only for SPI- * not transport like Fibre Channel or iSCSI where 'delay' is completely * meaningless. */ #ifndef SCSI_MIN_DELAY #define SCSI_MIN_DELAY 100 #endif /* * Make sure the user isn't using seconds instead of milliseconds. */ #if (SCSI_DELAY < SCSI_MIN_DELAY && SCSI_DELAY != 0) #error "SCSI_DELAY is in milliseconds, not seconds! Please use a larger value" #endif int scsi_delay; static int ascentrycomp(const void *key, const void *member); static int senseentrycomp(const void *key, const void *member); static void fetchtableentries(int sense_key, int asc, int ascq, struct scsi_inquiry_data *, const struct sense_key_table_entry **, const struct asc_table_entry **); #ifdef _KERNEL static void init_scsi_delay(void); static int sysctl_scsi_delay(SYSCTL_HANDLER_ARGS); static int set_scsi_delay(int delay); #endif #if !defined(SCSI_NO_OP_STRINGS) #define D (1 << T_DIRECT) #define T (1 << T_SEQUENTIAL) #define L (1 << T_PRINTER) #define P (1 << T_PROCESSOR) #define W (1 << T_WORM) #define R (1 << T_CDROM) #define O (1 << T_OPTICAL) #define M (1 << T_CHANGER) #define A (1 << T_STORARRAY) #define E (1 << T_ENCLOSURE) #define B (1 << T_RBC) #define K (1 << T_OCRW) #define V (1 << T_ADC) #define F (1 << T_OSD) #define S (1 << T_SCANNER) #define C (1 << T_COMM) #define ALL (D | T | L | P | W | R | O | M | A | E | B | K | V | F | S | C) static struct op_table_entry plextor_cd_ops[] = { { 0xD8, R, "CD-DA READ" } }; static struct scsi_op_quirk_entry scsi_op_quirk_table[] = { { /* * I believe that 0xD8 is the Plextor proprietary command * to read CD-DA data. I'm not sure which Plextor CDROM * models support the command, though. I know for sure * that the 4X, 8X, and 12X models do, and presumably the * 12-20X does. I don't know about any earlier models, * though. If anyone has any more complete information, * feel free to change this quirk entry. */ {T_CDROM, SIP_MEDIA_REMOVABLE, "PLEXTOR", "CD-ROM PX*", "*"}, nitems(plextor_cd_ops), plextor_cd_ops } }; static struct op_table_entry scsi_op_codes[] = { /* * From: http://www.t10.org/lists/op-num.txt * Modifications by Kenneth Merry (ken@FreeBSD.ORG) * and Jung-uk Kim (jkim@FreeBSD.org) * * Note: order is important in this table, scsi_op_desc() currently * depends on the opcodes in the table being in order to save * search time. * Note: scanner and comm. devices are carried over from the previous * version because they were removed in the latest spec. */ /* File: OP-NUM.TXT * * SCSI Operation Codes * Numeric Sorted Listing * as of 5/26/15 * * D - DIRECT ACCESS DEVICE (SBC-2) device column key * .T - SEQUENTIAL ACCESS DEVICE (SSC-2) ----------------- * . L - PRINTER DEVICE (SSC) M = Mandatory * . P - PROCESSOR DEVICE (SPC) O = Optional * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC-2) V = Vendor spec. * . . R - CD/DVE DEVICE (MMC-3) Z = Obsolete * . . O - OPTICAL MEMORY DEVICE (SBC-2) * . . .M - MEDIA CHANGER DEVICE (SMC-2) * . . . A - STORAGE ARRAY DEVICE (SCC-2) * . . . .E - ENCLOSURE SERVICES DEVICE (SES) * . . . .B - SIMPLIFIED DIRECT-ACCESS DEVICE (RBC) * . . . . K - OPTICAL CARD READER/WRITER DEVICE (OCRW) * . . . . V - AUTOMATION/DRIVE INTERFACE (ADC) * . . . . .F - OBJECT-BASED STORAGE (OSD) * OP DTLPWROMAEBKVF Description * -- -------------- ---------------------------------------------- */ /* 00 MMMMMMMMMMMMMM TEST UNIT READY */ { 0x00, ALL, "TEST UNIT READY" }, /* 01 M REWIND */ { 0x01, T, "REWIND" }, /* 01 Z V ZZZZ REZERO UNIT */ { 0x01, D | W | R | O | M, "REZERO UNIT" }, /* 02 VVVVVV V */ /* 03 MMMMMMMMMMOMMM REQUEST SENSE */ { 0x03, ALL, "REQUEST SENSE" }, /* 04 M OO FORMAT UNIT */ { 0x04, D | R | O, "FORMAT UNIT" }, /* 04 O FORMAT MEDIUM */ { 0x04, T, "FORMAT MEDIUM" }, /* 04 O FORMAT */ { 0x04, L, "FORMAT" }, /* 05 VMVVVV V READ BLOCK LIMITS */ { 0x05, T, "READ BLOCK LIMITS" }, /* 06 VVVVVV V */ /* 07 OVV O OV REASSIGN BLOCKS */ { 0x07, D | W | O, "REASSIGN BLOCKS" }, /* 07 O INITIALIZE ELEMENT STATUS */ { 0x07, M, "INITIALIZE ELEMENT STATUS" }, /* 08 MOV O OV READ(6) */ { 0x08, D | T | W | O, "READ(6)" }, /* 08 O RECEIVE */ { 0x08, P, "RECEIVE" }, /* 08 GET MESSAGE(6) */ { 0x08, C, "GET MESSAGE(6)" }, /* 09 VVVVVV V */ /* 0A OO O OV WRITE(6) */ { 0x0A, D | T | W | O, "WRITE(6)" }, /* 0A M SEND(6) */ { 0x0A, P, "SEND(6)" }, /* 0A SEND MESSAGE(6) */ { 0x0A, C, "SEND MESSAGE(6)" }, /* 0A M PRINT */ { 0x0A, L, "PRINT" }, /* 0B Z ZOZV SEEK(6) */ { 0x0B, D | W | R | O, "SEEK(6)" }, /* 0B O SET CAPACITY */ { 0x0B, T, "SET CAPACITY" }, /* 0B O SLEW AND PRINT */ { 0x0B, L, "SLEW AND PRINT" }, /* 0C VVVVVV V */ /* 0D VVVVVV V */ /* 0E VVVVVV V */ /* 0F VOVVVV V READ REVERSE(6) */ { 0x0F, T, "READ REVERSE(6)" }, /* 10 VM VVV WRITE FILEMARKS(6) */ { 0x10, T, "WRITE FILEMARKS(6)" }, /* 10 O SYNCHRONIZE BUFFER */ { 0x10, L, "SYNCHRONIZE BUFFER" }, /* 11 VMVVVV SPACE(6) */ { 0x11, T, "SPACE(6)" }, /* 12 MMMMMMMMMMMMMM INQUIRY */ { 0x12, ALL, "INQUIRY" }, /* 13 V VVVV */ /* 13 O VERIFY(6) */ { 0x13, T, "VERIFY(6)" }, /* 14 VOOVVV RECOVER BUFFERED DATA */ { 0x14, T | L, "RECOVER BUFFERED DATA" }, /* 15 OMO O OOOO OO MODE SELECT(6) */ { 0x15, ALL & ~(P | R | B | F), "MODE SELECT(6)" }, /* 16 ZZMZO OOOZ O RESERVE(6) */ { 0x16, ALL & ~(R | B | V | F | C), "RESERVE(6)" }, /* 16 Z RESERVE ELEMENT(6) */ { 0x16, M, "RESERVE ELEMENT(6)" }, /* 17 ZZMZO OOOZ O RELEASE(6) */ { 0x17, ALL & ~(R | B | V | F | C), "RELEASE(6)" }, /* 17 Z RELEASE ELEMENT(6) */ { 0x17, M, "RELEASE ELEMENT(6)" }, /* 18 ZZZZOZO Z COPY */ { 0x18, D | T | L | P | W | R | O | K | S, "COPY" }, /* 19 VMVVVV ERASE(6) */ { 0x19, T, "ERASE(6)" }, /* 1A OMO O OOOO OO MODE SENSE(6) */ { 0x1A, ALL & ~(P | R | B | F), "MODE SENSE(6)" }, /* 1B O OOO O MO O START STOP UNIT */ { 0x1B, D | W | R | O | A | B | K | F, "START STOP UNIT" }, /* 1B O M LOAD UNLOAD */ { 0x1B, T | V, "LOAD UNLOAD" }, /* 1B SCAN */ { 0x1B, S, "SCAN" }, /* 1B O STOP PRINT */ { 0x1B, L, "STOP PRINT" }, /* 1B O OPEN/CLOSE IMPORT/EXPORT ELEMENT */ { 0x1B, M, "OPEN/CLOSE IMPORT/EXPORT ELEMENT" }, /* 1C OOOOO OOOM OOO RECEIVE DIAGNOSTIC RESULTS */ { 0x1C, ALL & ~(R | B), "RECEIVE DIAGNOSTIC RESULTS" }, /* 1D MMMMM MMOM MMM SEND DIAGNOSTIC */ { 0x1D, ALL & ~(R | B), "SEND DIAGNOSTIC" }, /* 1E OO OOOO O O PREVENT ALLOW MEDIUM REMOVAL */ { 0x1E, D | T | W | R | O | M | K | F, "PREVENT ALLOW MEDIUM REMOVAL" }, /* 1F */ /* 20 V VVV V */ /* 21 V VVV V */ /* 22 V VVV V */ /* 23 V V V V */ /* 23 O READ FORMAT CAPACITIES */ { 0x23, R, "READ FORMAT CAPACITIES" }, /* 24 V VV SET WINDOW */ { 0x24, S, "SET WINDOW" }, /* 25 M M M M READ CAPACITY(10) */ { 0x25, D | W | O | B, "READ CAPACITY(10)" }, /* 25 O READ CAPACITY */ { 0x25, R, "READ CAPACITY" }, /* 25 M READ CARD CAPACITY */ { 0x25, K, "READ CARD CAPACITY" }, /* 25 GET WINDOW */ { 0x25, S, "GET WINDOW" }, /* 26 V VV */ /* 27 V VV */ /* 28 M MOM MM READ(10) */ { 0x28, D | W | R | O | B | K | S, "READ(10)" }, /* 28 GET MESSAGE(10) */ { 0x28, C, "GET MESSAGE(10)" }, /* 29 V VVO READ GENERATION */ { 0x29, O, "READ GENERATION" }, /* 2A O MOM MO WRITE(10) */ { 0x2A, D | W | R | O | B | K, "WRITE(10)" }, /* 2A SEND(10) */ { 0x2A, S, "SEND(10)" }, /* 2A SEND MESSAGE(10) */ { 0x2A, C, "SEND MESSAGE(10)" }, /* 2B Z OOO O SEEK(10) */ { 0x2B, D | W | R | O | K, "SEEK(10)" }, /* 2B O LOCATE(10) */ { 0x2B, T, "LOCATE(10)" }, /* 2B O POSITION TO ELEMENT */ { 0x2B, M, "POSITION TO ELEMENT" }, /* 2C V OO ERASE(10) */ { 0x2C, R | O, "ERASE(10)" }, /* 2D O READ UPDATED BLOCK */ { 0x2D, O, "READ UPDATED BLOCK" }, /* 2D V */ /* 2E O OOO MO WRITE AND VERIFY(10) */ { 0x2E, D | W | R | O | B | K, "WRITE AND VERIFY(10)" }, /* 2F O OOO VERIFY(10) */ { 0x2F, D | W | R | O, "VERIFY(10)" }, /* 30 Z ZZZ SEARCH DATA HIGH(10) */ { 0x30, D | W | R | O, "SEARCH DATA HIGH(10)" }, /* 31 Z ZZZ SEARCH DATA EQUAL(10) */ { 0x31, D | W | R | O, "SEARCH DATA EQUAL(10)" }, /* 31 OBJECT POSITION */ { 0x31, S, "OBJECT POSITION" }, /* 32 Z ZZZ SEARCH DATA LOW(10) */ { 0x32, D | W | R | O, "SEARCH DATA LOW(10)" }, /* 33 Z OZO SET LIMITS(10) */ { 0x33, D | W | R | O, "SET LIMITS(10)" }, /* 34 O O O O PRE-FETCH(10) */ { 0x34, D | W | O | K, "PRE-FETCH(10)" }, /* 34 M READ POSITION */ { 0x34, T, "READ POSITION" }, /* 34 GET DATA BUFFER STATUS */ { 0x34, S, "GET DATA BUFFER STATUS" }, /* 35 O OOO MO SYNCHRONIZE CACHE(10) */ { 0x35, D | W | R | O | B | K, "SYNCHRONIZE CACHE(10)" }, /* 36 Z O O O LOCK UNLOCK CACHE(10) */ { 0x36, D | W | O | K, "LOCK UNLOCK CACHE(10)" }, /* 37 O O READ DEFECT DATA(10) */ { 0x37, D | O, "READ DEFECT DATA(10)" }, /* 37 O INITIALIZE ELEMENT STATUS WITH RANGE */ { 0x37, M, "INITIALIZE ELEMENT STATUS WITH RANGE" }, /* 38 O O O MEDIUM SCAN */ { 0x38, W | O | K, "MEDIUM SCAN" }, /* 39 ZZZZOZO Z COMPARE */ { 0x39, D | T | L | P | W | R | O | K | S, "COMPARE" }, /* 3A ZZZZOZO Z COPY AND VERIFY */ { 0x3A, D | T | L | P | W | R | O | K | S, "COPY AND VERIFY" }, /* 3B OOOOOOOOOOMOOO WRITE BUFFER */ { 0x3B, ALL, "WRITE BUFFER" }, /* 3C OOOOOOOOOO OOO READ BUFFER */ { 0x3C, ALL & ~(B), "READ BUFFER" }, /* 3D O UPDATE BLOCK */ { 0x3D, O, "UPDATE BLOCK" }, /* 3E O O O READ LONG(10) */ { 0x3E, D | W | O, "READ LONG(10)" }, /* 3F O O O WRITE LONG(10) */ { 0x3F, D | W | O, "WRITE LONG(10)" }, /* 40 ZZZZOZOZ CHANGE DEFINITION */ { 0x40, D | T | L | P | W | R | O | M | S | C, "CHANGE DEFINITION" }, /* 41 O WRITE SAME(10) */ { 0x41, D, "WRITE SAME(10)" }, /* 42 O UNMAP */ { 0x42, D, "UNMAP" }, /* 42 O READ SUB-CHANNEL */ { 0x42, R, "READ SUB-CHANNEL" }, /* 43 O READ TOC/PMA/ATIP */ { 0x43, R, "READ TOC/PMA/ATIP" }, /* 44 M M REPORT DENSITY SUPPORT */ { 0x44, T | V, "REPORT DENSITY SUPPORT" }, /* 44 READ HEADER */ /* 45 O PLAY AUDIO(10) */ { 0x45, R, "PLAY AUDIO(10)" }, /* 46 M GET CONFIGURATION */ { 0x46, R, "GET CONFIGURATION" }, /* 47 O PLAY AUDIO MSF */ { 0x47, R, "PLAY AUDIO MSF" }, /* 48 */ /* 49 */ /* 4A M GET EVENT STATUS NOTIFICATION */ { 0x4A, R, "GET EVENT STATUS NOTIFICATION" }, /* 4B O PAUSE/RESUME */ { 0x4B, R, "PAUSE/RESUME" }, /* 4C OOOOO OOOO OOO LOG SELECT */ { 0x4C, ALL & ~(R | B), "LOG SELECT" }, /* 4D OOOOO OOOO OMO LOG SENSE */ { 0x4D, ALL & ~(R | B), "LOG SENSE" }, /* 4E O STOP PLAY/SCAN */ { 0x4E, R, "STOP PLAY/SCAN" }, /* 4F */ /* 50 O XDWRITE(10) */ { 0x50, D, "XDWRITE(10)" }, /* 51 O XPWRITE(10) */ { 0x51, D, "XPWRITE(10)" }, /* 51 O READ DISC INFORMATION */ { 0x51, R, "READ DISC INFORMATION" }, /* 52 O XDREAD(10) */ { 0x52, D, "XDREAD(10)" }, /* 52 O READ TRACK INFORMATION */ { 0x52, R, "READ TRACK INFORMATION" }, /* 53 O RESERVE TRACK */ { 0x53, R, "RESERVE TRACK" }, /* 54 O SEND OPC INFORMATION */ { 0x54, R, "SEND OPC INFORMATION" }, /* 55 OOO OMOOOOMOMO MODE SELECT(10) */ { 0x55, ALL & ~(P), "MODE SELECT(10)" }, /* 56 ZZMZO OOOZ RESERVE(10) */ { 0x56, ALL & ~(R | B | K | V | F | C), "RESERVE(10)" }, /* 56 Z RESERVE ELEMENT(10) */ { 0x56, M, "RESERVE ELEMENT(10)" }, /* 57 ZZMZO OOOZ RELEASE(10) */ { 0x57, ALL & ~(R | B | K | V | F | C), "RELEASE(10)" }, /* 57 Z RELEASE ELEMENT(10) */ { 0x57, M, "RELEASE ELEMENT(10)" }, /* 58 O REPAIR TRACK */ { 0x58, R, "REPAIR TRACK" }, /* 59 */ /* 5A OOO OMOOOOMOMO MODE SENSE(10) */ { 0x5A, ALL & ~(P), "MODE SENSE(10)" }, /* 5B O CLOSE TRACK/SESSION */ { 0x5B, R, "CLOSE TRACK/SESSION" }, /* 5C O READ BUFFER CAPACITY */ { 0x5C, R, "READ BUFFER CAPACITY" }, /* 5D O SEND CUE SHEET */ { 0x5D, R, "SEND CUE SHEET" }, /* 5E OOOOO OOOO M PERSISTENT RESERVE IN */ { 0x5E, ALL & ~(R | B | K | V | C), "PERSISTENT RESERVE IN" }, /* 5F OOOOO OOOO M PERSISTENT RESERVE OUT */ { 0x5F, ALL & ~(R | B | K | V | C), "PERSISTENT RESERVE OUT" }, /* 7E OO O OOOO O extended CDB */ { 0x7E, D | T | R | M | A | E | B | V, "extended CDB" }, /* 7F O M variable length CDB (more than 16 bytes) */ { 0x7F, D | F, "variable length CDB (more than 16 bytes)" }, /* 80 Z XDWRITE EXTENDED(16) */ { 0x80, D, "XDWRITE EXTENDED(16)" }, /* 80 M WRITE FILEMARKS(16) */ { 0x80, T, "WRITE FILEMARKS(16)" }, /* 81 Z REBUILD(16) */ { 0x81, D, "REBUILD(16)" }, /* 81 O READ REVERSE(16) */ { 0x81, T, "READ REVERSE(16)" }, /* 82 Z REGENERATE(16) */ { 0x82, D, "REGENERATE(16)" }, /* 83 OOOOO O OO EXTENDED COPY */ { 0x83, D | T | L | P | W | O | K | V, "EXTENDED COPY" }, /* 84 OOOOO O OO RECEIVE COPY RESULTS */ { 0x84, D | T | L | P | W | O | K | V, "RECEIVE COPY RESULTS" }, /* 85 O O O ATA COMMAND PASS THROUGH(16) */ { 0x85, D | R | B, "ATA COMMAND PASS THROUGH(16)" }, /* 86 OO OO OOOOOOO ACCESS CONTROL IN */ { 0x86, ALL & ~(L | R | F), "ACCESS CONTROL IN" }, /* 87 OO OO OOOOOOO ACCESS CONTROL OUT */ { 0x87, ALL & ~(L | R | F), "ACCESS CONTROL OUT" }, /* 88 MM O O O READ(16) */ { 0x88, D | T | W | O | B, "READ(16)" }, /* 89 O COMPARE AND WRITE*/ { 0x89, D, "COMPARE AND WRITE" }, /* 8A OM O O O WRITE(16) */ { 0x8A, D | T | W | O | B, "WRITE(16)" }, /* 8B O ORWRITE */ { 0x8B, D, "ORWRITE" }, /* 8C OO O OO O M READ ATTRIBUTE */ { 0x8C, D | T | W | O | M | B | V, "READ ATTRIBUTE" }, /* 8D OO O OO O O WRITE ATTRIBUTE */ { 0x8D, D | T | W | O | M | B | V, "WRITE ATTRIBUTE" }, /* 8E O O O O WRITE AND VERIFY(16) */ { 0x8E, D | W | O | B, "WRITE AND VERIFY(16)" }, /* 8F OO O O O VERIFY(16) */ { 0x8F, D | T | W | O | B, "VERIFY(16)" }, /* 90 O O O O PRE-FETCH(16) */ { 0x90, D | W | O | B, "PRE-FETCH(16)" }, /* 91 O O O O SYNCHRONIZE CACHE(16) */ { 0x91, D | W | O | B, "SYNCHRONIZE CACHE(16)" }, /* 91 O SPACE(16) */ { 0x91, T, "SPACE(16)" }, /* 92 Z O O LOCK UNLOCK CACHE(16) */ { 0x92, D | W | O, "LOCK UNLOCK CACHE(16)" }, /* 92 O LOCATE(16) */ { 0x92, T, "LOCATE(16)" }, /* 93 O WRITE SAME(16) */ { 0x93, D, "WRITE SAME(16)" }, /* 93 M ERASE(16) */ { 0x93, T, "ERASE(16)" }, /* 94 O ZBC OUT */ { 0x94, ALL, "ZBC OUT" }, /* 95 O ZBC IN */ { 0x95, ALL, "ZBC IN" }, /* 96 */ /* 97 */ /* 98 */ /* 99 */ /* 9A O WRITE STREAM(16) */ { 0x9A, D, "WRITE STREAM(16)" }, /* 9B OOOOOOOOOO OOO READ BUFFER(16) */ { 0x9B, ALL & ~(B) , "READ BUFFER(16)" }, /* 9C O WRITE ATOMIC(16) */ { 0x9C, D, "WRITE ATOMIC(16)" }, /* 9D SERVICE ACTION BIDIRECTIONAL */ { 0x9D, ALL, "SERVICE ACTION BIDIRECTIONAL" }, /* XXX KDM ALL for this? op-num.txt defines it for none.. */ /* 9E SERVICE ACTION IN(16) */ { 0x9E, ALL, "SERVICE ACTION IN(16)" }, /* 9F M SERVICE ACTION OUT(16) */ { 0x9F, ALL, "SERVICE ACTION OUT(16)" }, /* A0 MMOOO OMMM OMO REPORT LUNS */ { 0xA0, ALL & ~(R | B), "REPORT LUNS" }, /* A1 O BLANK */ { 0xA1, R, "BLANK" }, /* A1 O O ATA COMMAND PASS THROUGH(12) */ { 0xA1, D | B, "ATA COMMAND PASS THROUGH(12)" }, /* A2 OO O O SECURITY PROTOCOL IN */ { 0xA2, D | T | R | V, "SECURITY PROTOCOL IN" }, /* A3 OOO O OOMOOOM MAINTENANCE (IN) */ { 0xA3, ALL & ~(P | R | F), "MAINTENANCE (IN)" }, /* A3 O SEND KEY */ { 0xA3, R, "SEND KEY" }, /* A4 OOO O OOOOOOO MAINTENANCE (OUT) */ { 0xA4, ALL & ~(P | R | F), "MAINTENANCE (OUT)" }, /* A4 O REPORT KEY */ { 0xA4, R, "REPORT KEY" }, /* A5 O O OM MOVE MEDIUM */ { 0xA5, T | W | O | M, "MOVE MEDIUM" }, /* A5 O PLAY AUDIO(12) */ { 0xA5, R, "PLAY AUDIO(12)" }, /* A6 O EXCHANGE MEDIUM */ { 0xA6, M, "EXCHANGE MEDIUM" }, /* A6 O LOAD/UNLOAD C/DVD */ { 0xA6, R, "LOAD/UNLOAD C/DVD" }, /* A7 ZZ O O MOVE MEDIUM ATTACHED */ { 0xA7, D | T | W | O, "MOVE MEDIUM ATTACHED" }, /* A7 O SET READ AHEAD */ { 0xA7, R, "SET READ AHEAD" }, /* A8 O OOO READ(12) */ { 0xA8, D | W | R | O, "READ(12)" }, /* A8 GET MESSAGE(12) */ { 0xA8, C, "GET MESSAGE(12)" }, /* A9 O SERVICE ACTION OUT(12) */ { 0xA9, V, "SERVICE ACTION OUT(12)" }, /* AA O OOO WRITE(12) */ { 0xAA, D | W | R | O, "WRITE(12)" }, /* AA SEND MESSAGE(12) */ { 0xAA, C, "SEND MESSAGE(12)" }, /* AB O O SERVICE ACTION IN(12) */ { 0xAB, R | V, "SERVICE ACTION IN(12)" }, /* AC O ERASE(12) */ { 0xAC, O, "ERASE(12)" }, /* AC O GET PERFORMANCE */ { 0xAC, R, "GET PERFORMANCE" }, /* AD O READ DVD STRUCTURE */ { 0xAD, R, "READ DVD STRUCTURE" }, /* AE O O O WRITE AND VERIFY(12) */ { 0xAE, D | W | O, "WRITE AND VERIFY(12)" }, /* AF O OZO VERIFY(12) */ { 0xAF, D | W | R | O, "VERIFY(12)" }, /* B0 ZZZ SEARCH DATA HIGH(12) */ { 0xB0, W | R | O, "SEARCH DATA HIGH(12)" }, /* B1 ZZZ SEARCH DATA EQUAL(12) */ { 0xB1, W | R | O, "SEARCH DATA EQUAL(12)" }, /* B2 ZZZ SEARCH DATA LOW(12) */ { 0xB2, W | R | O, "SEARCH DATA LOW(12)" }, /* B3 Z OZO SET LIMITS(12) */ { 0xB3, D | W | R | O, "SET LIMITS(12)" }, /* B4 ZZ OZO READ ELEMENT STATUS ATTACHED */ { 0xB4, D | T | W | R | O, "READ ELEMENT STATUS ATTACHED" }, /* B5 OO O O SECURITY PROTOCOL OUT */ { 0xB5, D | T | R | V, "SECURITY PROTOCOL OUT" }, /* B5 O REQUEST VOLUME ELEMENT ADDRESS */ { 0xB5, M, "REQUEST VOLUME ELEMENT ADDRESS" }, /* B6 O SEND VOLUME TAG */ { 0xB6, M, "SEND VOLUME TAG" }, /* B6 O SET STREAMING */ { 0xB6, R, "SET STREAMING" }, /* B7 O O READ DEFECT DATA(12) */ { 0xB7, D | O, "READ DEFECT DATA(12)" }, /* B8 O OZOM READ ELEMENT STATUS */ { 0xB8, T | W | R | O | M, "READ ELEMENT STATUS" }, /* B9 O READ CD MSF */ { 0xB9, R, "READ CD MSF" }, /* BA O O OOMO REDUNDANCY GROUP (IN) */ { 0xBA, D | W | O | M | A | E, "REDUNDANCY GROUP (IN)" }, /* BA O SCAN */ { 0xBA, R, "SCAN" }, /* BB O O OOOO REDUNDANCY GROUP (OUT) */ { 0xBB, D | W | O | M | A | E, "REDUNDANCY GROUP (OUT)" }, /* BB O SET CD SPEED */ { 0xBB, R, "SET CD SPEED" }, /* BC O O OOMO SPARE (IN) */ { 0xBC, D | W | O | M | A | E, "SPARE (IN)" }, /* BD O O OOOO SPARE (OUT) */ { 0xBD, D | W | O | M | A | E, "SPARE (OUT)" }, /* BD O MECHANISM STATUS */ { 0xBD, R, "MECHANISM STATUS" }, /* BE O O OOMO VOLUME SET (IN) */ { 0xBE, D | W | O | M | A | E, "VOLUME SET (IN)" }, /* BE O READ CD */ { 0xBE, R, "READ CD" }, /* BF O O OOOO VOLUME SET (OUT) */ { 0xBF, D | W | O | M | A | E, "VOLUME SET (OUT)" }, /* BF O SEND DVD STRUCTURE */ { 0xBF, R, "SEND DVD STRUCTURE" } }; const char * scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data) { caddr_t match; int i, j; u_int32_t opmask; u_int16_t pd_type; int num_ops[2]; struct op_table_entry *table[2]; int num_tables; /* * If we've got inquiry data, use it to determine what type of * device we're dealing with here. Otherwise, assume direct * access. */ if (inq_data == NULL) { pd_type = T_DIRECT; match = NULL; } else { pd_type = SID_TYPE(inq_data); match = cam_quirkmatch((caddr_t)inq_data, (caddr_t)scsi_op_quirk_table, nitems(scsi_op_quirk_table), sizeof(*scsi_op_quirk_table), scsi_inquiry_match); } if (match != NULL) { table[0] = ((struct scsi_op_quirk_entry *)match)->op_table; num_ops[0] = ((struct scsi_op_quirk_entry *)match)->num_ops; table[1] = scsi_op_codes; num_ops[1] = nitems(scsi_op_codes); num_tables = 2; } else { /* * If this is true, we have a vendor specific opcode that * wasn't covered in the quirk table. */ if ((opcode > 0xBF) || ((opcode > 0x5F) && (opcode < 0x80))) return("Vendor Specific Command"); table[0] = scsi_op_codes; num_ops[0] = nitems(scsi_op_codes); num_tables = 1; } /* RBC is 'Simplified' Direct Access Device */ if (pd_type == T_RBC) pd_type = T_DIRECT; /* * Host managed drives are direct access for the most part. */ if (pd_type == T_ZBC_HM) pd_type = T_DIRECT; /* Map NODEVICE to Direct Access Device to handle REPORT LUNS, etc. */ if (pd_type == T_NODEVICE) pd_type = T_DIRECT; opmask = 1 << pd_type; for (j = 0; j < num_tables; j++) { for (i = 0;i < num_ops[j] && table[j][i].opcode <= opcode; i++){ if ((table[j][i].opcode == opcode) && ((table[j][i].opmask & opmask) != 0)) return(table[j][i].desc); } } /* * If we can't find a match for the command in the table, we just * assume it's a vendor specifc command. */ return("Vendor Specific Command"); } #else /* SCSI_NO_OP_STRINGS */ const char * scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data) { return(""); } #endif #if !defined(SCSI_NO_SENSE_STRINGS) #define SST(asc, ascq, action, desc) \ asc, ascq, action, desc #else const char empty_string[] = ""; #define SST(asc, ascq, action, desc) \ asc, ascq, action, empty_string #endif const struct sense_key_table_entry sense_key_table[] = { { SSD_KEY_NO_SENSE, SS_NOP, "NO SENSE" }, { SSD_KEY_RECOVERED_ERROR, SS_NOP|SSQ_PRINT_SENSE, "RECOVERED ERROR" }, { SSD_KEY_NOT_READY, SS_RDEF, "NOT READY" }, { SSD_KEY_MEDIUM_ERROR, SS_RDEF, "MEDIUM ERROR" }, { SSD_KEY_HARDWARE_ERROR, SS_RDEF, "HARDWARE FAILURE" }, { SSD_KEY_ILLEGAL_REQUEST, SS_FATAL|EINVAL, "ILLEGAL REQUEST" }, { SSD_KEY_UNIT_ATTENTION, SS_FATAL|ENXIO, "UNIT ATTENTION" }, { SSD_KEY_DATA_PROTECT, SS_FATAL|EACCES, "DATA PROTECT" }, { SSD_KEY_BLANK_CHECK, SS_FATAL|ENOSPC, "BLANK CHECK" }, { SSD_KEY_Vendor_Specific, SS_FATAL|EIO, "Vendor Specific" }, { SSD_KEY_COPY_ABORTED, SS_FATAL|EIO, "COPY ABORTED" }, { SSD_KEY_ABORTED_COMMAND, SS_RDEF, "ABORTED COMMAND" }, { SSD_KEY_EQUAL, SS_NOP, "EQUAL" }, { SSD_KEY_VOLUME_OVERFLOW, SS_FATAL|EIO, "VOLUME OVERFLOW" }, { SSD_KEY_MISCOMPARE, SS_NOP, "MISCOMPARE" }, { SSD_KEY_COMPLETED, SS_NOP, "COMPLETED" } }; static struct asc_table_entry quantum_fireball_entries[] = { { SST(0x04, 0x0b, SS_START | SSQ_DECREMENT_COUNT | ENXIO, "Logical unit not ready, initializing cmd. required") } }; static struct asc_table_entry sony_mo_entries[] = { { SST(0x04, 0x00, SS_START | SSQ_DECREMENT_COUNT | ENXIO, "Logical unit not ready, cause not reportable") } }; static struct asc_table_entry hgst_entries[] = { { SST(0x04, 0xF0, SS_RDEF, "Vendor Unique - Logical Unit Not Ready") }, { SST(0x0A, 0x01, SS_RDEF, "Unrecovered Super Certification Log Write Error") }, { SST(0x0A, 0x02, SS_RDEF, "Unrecovered Super Certification Log Read Error") }, { SST(0x15, 0x03, SS_RDEF, "Unrecovered Sector Error") }, { SST(0x3E, 0x04, SS_RDEF, "Unrecovered Self-Test Hard-Cache Test Fail") }, { SST(0x3E, 0x05, SS_RDEF, "Unrecovered Self-Test OTF-Cache Fail") }, { SST(0x40, 0x00, SS_RDEF, "Unrecovered SAT No Buffer Overflow Error") }, { SST(0x40, 0x01, SS_RDEF, "Unrecovered SAT Buffer Overflow Error") }, { SST(0x40, 0x02, SS_RDEF, "Unrecovered SAT No Buffer Overflow With ECS Fault") }, { SST(0x40, 0x03, SS_RDEF, "Unrecovered SAT Buffer Overflow With ECS Fault") }, { SST(0x40, 0x81, SS_RDEF, "DRAM Failure") }, { SST(0x44, 0x0B, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xF2, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xF6, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xF9, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xFA, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x5D, 0x22, SS_RDEF, "Extreme Over-Temperature Warning") }, { SST(0x5D, 0x50, SS_RDEF, "Load/Unload cycle Count Warning") }, { SST(0x81, 0x00, SS_RDEF, "Vendor Unique - Internal Logic Error") }, { SST(0x85, 0x00, SS_RDEF, "Vendor Unique - Internal Key Seed Error") }, }; static struct asc_table_entry seagate_entries[] = { { SST(0x04, 0xF0, SS_RDEF, "Logical Unit Not Ready, super certify in Progress") }, { SST(0x08, 0x86, SS_RDEF, "Write Fault Data Corruption") }, { SST(0x09, 0x0D, SS_RDEF, "Tracking Failure") }, { SST(0x09, 0x0E, SS_RDEF, "ETF Failure") }, { SST(0x0B, 0x5D, SS_RDEF, "Pre-SMART Warning") }, { SST(0x0B, 0x85, SS_RDEF, "5V Voltage Warning") }, { SST(0x0B, 0x8C, SS_RDEF, "12V Voltage Warning") }, { SST(0x0C, 0xFF, SS_RDEF, "Write Error - Too many error recovery revs") }, { SST(0x11, 0xFF, SS_RDEF, "Unrecovered Read Error - Too many error recovery revs") }, { SST(0x19, 0x0E, SS_RDEF, "Fewer than 1/2 defect list copies") }, { SST(0x20, 0xF3, SS_RDEF, "Illegal CDB linked to skip mask cmd") }, { SST(0x24, 0xF0, SS_RDEF, "Illegal byte in CDB, LBA not matching") }, { SST(0x24, 0xF1, SS_RDEF, "Illegal byte in CDB, LEN not matching") }, { SST(0x24, 0xF2, SS_RDEF, "Mask not matching transfer length") }, { SST(0x24, 0xF3, SS_RDEF, "Drive formatted without plist") }, { SST(0x26, 0x95, SS_RDEF, "Invalid Field Parameter - CAP File") }, { SST(0x26, 0x96, SS_RDEF, "Invalid Field Parameter - RAP File") }, { SST(0x26, 0x97, SS_RDEF, "Invalid Field Parameter - TMS Firmware Tag") }, { SST(0x26, 0x98, SS_RDEF, "Invalid Field Parameter - Check Sum") }, { SST(0x26, 0x99, SS_RDEF, "Invalid Field Parameter - Firmware Tag") }, { SST(0x29, 0x08, SS_RDEF, "Write Log Dump data") }, { SST(0x29, 0x09, SS_RDEF, "Write Log Dump data") }, { SST(0x29, 0x0A, SS_RDEF, "Reserved disk space") }, { SST(0x29, 0x0B, SS_RDEF, "SDBP") }, { SST(0x29, 0x0C, SS_RDEF, "SDBP") }, { SST(0x31, 0x91, SS_RDEF, "Format Corrupted World Wide Name (WWN) is Invalid") }, { SST(0x32, 0x03, SS_RDEF, "Defect List - Length exceeds Command Allocated Length") }, { SST(0x33, 0x00, SS_RDEF, "Flash not ready for access") }, { SST(0x3F, 0x70, SS_RDEF, "Invalid RAP block") }, { SST(0x3F, 0x71, SS_RDEF, "RAP/ETF mismatch") }, { SST(0x3F, 0x90, SS_RDEF, "Invalid CAP block") }, { SST(0x3F, 0x91, SS_RDEF, "World Wide Name (WWN) Mismatch") }, { SST(0x40, 0x01, SS_RDEF, "DRAM Parity Error") }, { SST(0x40, 0x02, SS_RDEF, "DRAM Parity Error") }, { SST(0x42, 0x0A, SS_RDEF, "Loopback Test") }, { SST(0x42, 0x0B, SS_RDEF, "Loopback Test") }, { SST(0x44, 0xF2, SS_RDEF, "Compare error during data integrity check") }, { SST(0x44, 0xF6, SS_RDEF, "Unrecoverable error during data integrity check") }, { SST(0x47, 0x80, SS_RDEF, "Fibre Channel Sequence Error") }, { SST(0x4E, 0x01, SS_RDEF, "Information Unit Too Short") }, { SST(0x80, 0x00, SS_RDEF, "General Firmware Error / Command Timeout") }, { SST(0x80, 0x01, SS_RDEF, "Command Timeout") }, { SST(0x80, 0x02, SS_RDEF, "Command Timeout") }, { SST(0x80, 0x80, SS_RDEF, "FC FIFO Error During Read Transfer") }, { SST(0x80, 0x81, SS_RDEF, "FC FIFO Error During Write Transfer") }, { SST(0x80, 0x82, SS_RDEF, "DISC FIFO Error During Read Transfer") }, { SST(0x80, 0x83, SS_RDEF, "DISC FIFO Error During Write Transfer") }, { SST(0x80, 0x84, SS_RDEF, "LBA Seeded LRC Error on Read") }, { SST(0x80, 0x85, SS_RDEF, "LBA Seeded LRC Error on Write") }, { SST(0x80, 0x86, SS_RDEF, "IOEDC Error on Read") }, { SST(0x80, 0x87, SS_RDEF, "IOEDC Error on Write") }, { SST(0x80, 0x88, SS_RDEF, "Host Parity Check Failed") }, { SST(0x80, 0x89, SS_RDEF, "IOEDC error on read detected by formatter") }, { SST(0x80, 0x8A, SS_RDEF, "Host Parity Errors / Host FIFO Initialization Failed") }, { SST(0x80, 0x8B, SS_RDEF, "Host Parity Errors") }, { SST(0x80, 0x8C, SS_RDEF, "Host Parity Errors") }, { SST(0x80, 0x8D, SS_RDEF, "Host Parity Errors") }, { SST(0x81, 0x00, SS_RDEF, "LA Check Failed") }, { SST(0x82, 0x00, SS_RDEF, "Internal client detected insufficient buffer") }, { SST(0x84, 0x00, SS_RDEF, "Scheduled Diagnostic And Repair") }, }; static struct scsi_sense_quirk_entry sense_quirk_table[] = { { /* * XXX The Quantum Fireball ST and SE like to return 0x04 0x0b * when they really should return 0x04 0x02. */ {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "FIREBALL S*", "*"}, /*num_sense_keys*/0, nitems(quantum_fireball_entries), /*sense key entries*/NULL, quantum_fireball_entries }, { /* * This Sony MO drive likes to return 0x04, 0x00 when it * isn't spun up. */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SONY", "SMO-*", "*"}, /*num_sense_keys*/0, nitems(sony_mo_entries), /*sense key entries*/NULL, sony_mo_entries }, { /* * HGST vendor-specific error codes */ {T_DIRECT, SIP_MEDIA_FIXED, "HGST", "*", "*"}, /*num_sense_keys*/0, nitems(hgst_entries), /*sense key entries*/NULL, hgst_entries }, { /* * SEAGATE vendor-specific error codes */ {T_DIRECT, SIP_MEDIA_FIXED, "SEAGATE", "*", "*"}, /*num_sense_keys*/0, nitems(seagate_entries), /*sense key entries*/NULL, seagate_entries } }; const u_int sense_quirk_table_size = nitems(sense_quirk_table); static struct asc_table_entry asc_table[] = { /* * From: http://www.t10.org/lists/asc-num.txt * Modifications by Jung-uk Kim (jkim@FreeBSD.org) */ /* * File: ASC-NUM.TXT * * SCSI ASC/ASCQ Assignments * Numeric Sorted Listing * as of 8/12/15 * * D - DIRECT ACCESS DEVICE (SBC-2) device column key * .T - SEQUENTIAL ACCESS DEVICE (SSC) ------------------- * . L - PRINTER DEVICE (SSC) blank = reserved * . P - PROCESSOR DEVICE (SPC) not blank = allowed * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC-2) * . . R - CD DEVICE (MMC) * . . O - OPTICAL MEMORY DEVICE (SBC-2) * . . .M - MEDIA CHANGER DEVICE (SMC) * . . . A - STORAGE ARRAY DEVICE (SCC) * . . . E - ENCLOSURE SERVICES DEVICE (SES) * . . . .B - SIMPLIFIED DIRECT-ACCESS DEVICE (RBC) * . . . . K - OPTICAL CARD READER/WRITER DEVICE (OCRW) * . . . . V - AUTOMATION/DRIVE INTERFACE (ADC) * . . . . .F - OBJECT-BASED STORAGE (OSD) * DTLPWROMAEBKVF * ASC ASCQ Action * Description */ /* DTLPWROMAEBKVF */ { SST(0x00, 0x00, SS_NOP, "No additional sense information") }, /* T */ { SST(0x00, 0x01, SS_RDEF, "Filemark detected") }, /* T */ { SST(0x00, 0x02, SS_RDEF, "End-of-partition/medium detected") }, /* T */ { SST(0x00, 0x03, SS_RDEF, "Setmark detected") }, /* T */ { SST(0x00, 0x04, SS_RDEF, "Beginning-of-partition/medium detected") }, /* TL */ { SST(0x00, 0x05, SS_RDEF, "End-of-data detected") }, /* DTLPWROMAEBKVF */ { SST(0x00, 0x06, SS_RDEF, "I/O process terminated") }, /* T */ { SST(0x00, 0x07, SS_RDEF, /* XXX TBD */ "Programmable early warning detected") }, /* R */ { SST(0x00, 0x11, SS_FATAL | EBUSY, "Audio play operation in progress") }, /* R */ { SST(0x00, 0x12, SS_NOP, "Audio play operation paused") }, /* R */ { SST(0x00, 0x13, SS_NOP, "Audio play operation successfully completed") }, /* R */ { SST(0x00, 0x14, SS_RDEF, "Audio play operation stopped due to error") }, /* R */ { SST(0x00, 0x15, SS_NOP, "No current audio status to return") }, /* DTLPWROMAEBKVF */ { SST(0x00, 0x16, SS_FATAL | EBUSY, "Operation in progress") }, /* DTL WROMAEBKVF */ { SST(0x00, 0x17, SS_RDEF, "Cleaning requested") }, /* T */ { SST(0x00, 0x18, SS_RDEF, /* XXX TBD */ "Erase operation in progress") }, /* T */ { SST(0x00, 0x19, SS_RDEF, /* XXX TBD */ "Locate operation in progress") }, /* T */ { SST(0x00, 0x1A, SS_RDEF, /* XXX TBD */ "Rewind operation in progress") }, /* T */ { SST(0x00, 0x1B, SS_RDEF, /* XXX TBD */ "Set capacity operation in progress") }, /* T */ { SST(0x00, 0x1C, SS_RDEF, /* XXX TBD */ "Verify operation in progress") }, /* DT B */ { SST(0x00, 0x1D, SS_NOP, "ATA pass through information available") }, /* DT R MAEBKV */ { SST(0x00, 0x1E, SS_RDEF, /* XXX TBD */ "Conflicting SA creation request") }, /* DT B */ { SST(0x00, 0x1F, SS_RDEF, /* XXX TBD */ "Logical unit transitioning to another power condition") }, /* DT P B */ { SST(0x00, 0x20, SS_NOP, "Extended copy information available") }, /* D */ { SST(0x00, 0x21, SS_RDEF, /* XXX TBD */ "Atomic command aborted due to ACA") }, /* D W O BK */ { SST(0x01, 0x00, SS_RDEF, "No index/sector signal") }, /* D WRO BK */ { SST(0x02, 0x00, SS_RDEF, "No seek complete") }, /* DTL W O BK */ { SST(0x03, 0x00, SS_RDEF, "Peripheral device write fault") }, /* T */ { SST(0x03, 0x01, SS_RDEF, "No write current") }, /* T */ { SST(0x03, 0x02, SS_RDEF, "Excessive write errors") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x00, SS_RDEF, "Logical unit not ready, cause not reportable") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x01, SS_WAIT | EBUSY, "Logical unit is in process of becoming ready") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x02, SS_START | SSQ_DECREMENT_COUNT | ENXIO, "Logical unit not ready, initializing command required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x03, SS_FATAL | ENXIO, "Logical unit not ready, manual intervention required") }, /* DTL RO B */ { SST(0x04, 0x04, SS_FATAL | EBUSY, "Logical unit not ready, format in progress") }, /* DT W O A BK F */ { SST(0x04, 0x05, SS_FATAL | EBUSY, "Logical unit not ready, rebuild in progress") }, /* DT W O A BK */ { SST(0x04, 0x06, SS_FATAL | EBUSY, "Logical unit not ready, recalculation in progress") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x07, SS_FATAL | EBUSY, "Logical unit not ready, operation in progress") }, /* R */ { SST(0x04, 0x08, SS_FATAL | EBUSY, "Logical unit not ready, long write in progress") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x09, SS_RDEF, /* XXX TBD */ "Logical unit not ready, self-test in progress") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x0A, SS_WAIT | ENXIO, "Logical unit not accessible, asymmetric access state transition")}, /* DTLPWROMAEBKVF */ { SST(0x04, 0x0B, SS_FATAL | ENXIO, "Logical unit not accessible, target port in standby state") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x0C, SS_FATAL | ENXIO, "Logical unit not accessible, target port in unavailable state") }, /* F */ { SST(0x04, 0x0D, SS_RDEF, /* XXX TBD */ "Logical unit not ready, structure check required") }, /* DTL WR MAEBKVF */ { SST(0x04, 0x0E, SS_RDEF, /* XXX TBD */ "Logical unit not ready, security session in progress") }, /* DT WROM B */ { SST(0x04, 0x10, SS_RDEF, /* XXX TBD */ "Logical unit not ready, auxiliary memory not accessible") }, /* DT WRO AEB VF */ { SST(0x04, 0x11, SS_WAIT | EBUSY, "Logical unit not ready, notify (enable spinup) required") }, /* M V */ { SST(0x04, 0x12, SS_RDEF, /* XXX TBD */ "Logical unit not ready, offline") }, /* DT R MAEBKV */ { SST(0x04, 0x13, SS_RDEF, /* XXX TBD */ "Logical unit not ready, SA creation in progress") }, /* D B */ { SST(0x04, 0x14, SS_RDEF, /* XXX TBD */ "Logical unit not ready, space allocation in progress") }, /* M */ { SST(0x04, 0x15, SS_RDEF, /* XXX TBD */ "Logical unit not ready, robotics disabled") }, /* M */ { SST(0x04, 0x16, SS_RDEF, /* XXX TBD */ "Logical unit not ready, configuration required") }, /* M */ { SST(0x04, 0x17, SS_RDEF, /* XXX TBD */ "Logical unit not ready, calibration required") }, /* M */ { SST(0x04, 0x18, SS_RDEF, /* XXX TBD */ "Logical unit not ready, a door is open") }, /* M */ { SST(0x04, 0x19, SS_RDEF, /* XXX TBD */ "Logical unit not ready, operating in sequential mode") }, /* DT B */ { SST(0x04, 0x1A, SS_RDEF, /* XXX TBD */ "Logical unit not ready, START/STOP UNIT command in progress") }, /* D B */ { SST(0x04, 0x1B, SS_RDEF, /* XXX TBD */ "Logical unit not ready, sanitize in progress") }, /* DT MAEB */ { SST(0x04, 0x1C, SS_RDEF, /* XXX TBD */ "Logical unit not ready, additional power use not yet granted") }, /* D */ { SST(0x04, 0x1D, SS_RDEF, /* XXX TBD */ "Logical unit not ready, configuration in progress") }, /* D */ { SST(0x04, 0x1E, SS_FATAL | ENXIO, "Logical unit not ready, microcode activation required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x1F, SS_FATAL | ENXIO, "Logical unit not ready, microcode download required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x20, SS_RDEF, /* XXX TBD */ "Logical unit not ready, logical unit reset required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x21, SS_RDEF, /* XXX TBD */ "Logical unit not ready, hard reset required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x22, SS_RDEF, /* XXX TBD */ "Logical unit not ready, power cycle required") }, /* DTL WROMAEBKVF */ { SST(0x05, 0x00, SS_RDEF, "Logical unit does not respond to selection") }, /* D WROM BK */ { SST(0x06, 0x00, SS_RDEF, "No reference position found") }, /* DTL WROM BK */ { SST(0x07, 0x00, SS_RDEF, "Multiple peripheral devices selected") }, /* DTL WROMAEBKVF */ { SST(0x08, 0x00, SS_RDEF, "Logical unit communication failure") }, /* DTL WROMAEBKVF */ { SST(0x08, 0x01, SS_RDEF, "Logical unit communication time-out") }, /* DTL WROMAEBKVF */ { SST(0x08, 0x02, SS_RDEF, "Logical unit communication parity error") }, /* DT ROM BK */ { SST(0x08, 0x03, SS_RDEF, "Logical unit communication CRC error (Ultra-DMA/32)") }, /* DTLPWRO K */ { SST(0x08, 0x04, SS_RDEF, /* XXX TBD */ "Unreachable copy target") }, /* DT WRO B */ { SST(0x09, 0x00, SS_RDEF, "Track following error") }, /* WRO K */ { SST(0x09, 0x01, SS_RDEF, "Tracking servo failure") }, /* WRO K */ { SST(0x09, 0x02, SS_RDEF, "Focus servo failure") }, /* WRO */ { SST(0x09, 0x03, SS_RDEF, "Spindle servo failure") }, /* DT WRO B */ { SST(0x09, 0x04, SS_RDEF, "Head select fault") }, /* DT RO B */ { SST(0x09, 0x05, SS_RDEF, "Vibration induced tracking error") }, /* DTLPWROMAEBKVF */ { SST(0x0A, 0x00, SS_FATAL | ENOSPC, "Error log overflow") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Warning") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Warning - specified temperature exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x02, SS_NOP | SSQ_PRINT_SENSE, "Warning - enclosure degraded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Warning - background self-test failed") }, /* DTLPWRO AEBKVF */ { SST(0x0B, 0x04, SS_NOP | SSQ_PRINT_SENSE, "Warning - background pre-scan detected medium error") }, /* DTLPWRO AEBKVF */ { SST(0x0B, 0x05, SS_NOP | SSQ_PRINT_SENSE, "Warning - background medium scan detected medium error") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x06, SS_NOP | SSQ_PRINT_SENSE, "Warning - non-volatile cache now volatile") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x07, SS_NOP | SSQ_PRINT_SENSE, "Warning - degraded power to non-volatile cache") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x08, SS_NOP | SSQ_PRINT_SENSE, "Warning - power loss expected") }, /* D */ { SST(0x0B, 0x09, SS_NOP | SSQ_PRINT_SENSE, "Warning - device statistics notification available") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0A, SS_NOP | SSQ_PRINT_SENSE, "Warning - High critical temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0B, SS_NOP | SSQ_PRINT_SENSE, "Warning - Low critical temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0C, SS_NOP | SSQ_PRINT_SENSE, "Warning - High operating temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0D, SS_NOP | SSQ_PRINT_SENSE, "Warning - Low operating temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0E, SS_NOP | SSQ_PRINT_SENSE, "Warning - High citical humidity limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0F, SS_NOP | SSQ_PRINT_SENSE, "Warning - Low citical humidity limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x10, SS_NOP | SSQ_PRINT_SENSE, "Warning - High operating humidity limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x11, SS_NOP | SSQ_PRINT_SENSE, "Warning - Low operating humidity limit exceeded") }, /* T R */ { SST(0x0C, 0x00, SS_RDEF, "Write error") }, /* K */ { SST(0x0C, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Write error - recovered with auto reallocation") }, /* D W O BK */ { SST(0x0C, 0x02, SS_RDEF, "Write error - auto reallocation failed") }, /* D W O BK */ { SST(0x0C, 0x03, SS_RDEF, "Write error - recommend reassignment") }, /* DT W O B */ { SST(0x0C, 0x04, SS_RDEF, "Compression check miscompare error") }, /* DT W O B */ { SST(0x0C, 0x05, SS_RDEF, "Data expansion occurred during compression") }, /* DT W O B */ { SST(0x0C, 0x06, SS_RDEF, "Block not compressible") }, /* R */ { SST(0x0C, 0x07, SS_RDEF, "Write error - recovery needed") }, /* R */ { SST(0x0C, 0x08, SS_RDEF, "Write error - recovery failed") }, /* R */ { SST(0x0C, 0x09, SS_RDEF, "Write error - loss of streaming") }, /* R */ { SST(0x0C, 0x0A, SS_RDEF, "Write error - padding blocks added") }, /* DT WROM B */ { SST(0x0C, 0x0B, SS_RDEF, /* XXX TBD */ "Auxiliary memory write error") }, /* DTLPWRO AEBKVF */ { SST(0x0C, 0x0C, SS_RDEF, /* XXX TBD */ "Write error - unexpected unsolicited data") }, /* DTLPWRO AEBKVF */ { SST(0x0C, 0x0D, SS_RDEF, /* XXX TBD */ "Write error - not enough unsolicited data") }, /* DT W O BK */ { SST(0x0C, 0x0E, SS_RDEF, /* XXX TBD */ "Multiple write errors") }, /* R */ { SST(0x0C, 0x0F, SS_RDEF, /* XXX TBD */ "Defects in error window") }, /* D */ { SST(0x0C, 0x10, SS_RDEF, /* XXX TBD */ "Incomplete multiple atomic write operations") }, /* D */ { SST(0x0C, 0x11, SS_RDEF, /* XXX TBD */ "Write error - recovery scan needed") }, /* D */ { SST(0x0C, 0x12, SS_RDEF, /* XXX TBD */ "Write error - insufficient zone resources") }, /* DTLPWRO A K */ { SST(0x0D, 0x00, SS_RDEF, /* XXX TBD */ "Error detected by third party temporary initiator") }, /* DTLPWRO A K */ { SST(0x0D, 0x01, SS_RDEF, /* XXX TBD */ "Third party device failure") }, /* DTLPWRO A K */ { SST(0x0D, 0x02, SS_RDEF, /* XXX TBD */ "Copy target device not reachable") }, /* DTLPWRO A K */ { SST(0x0D, 0x03, SS_RDEF, /* XXX TBD */ "Incorrect copy target device type") }, /* DTLPWRO A K */ { SST(0x0D, 0x04, SS_RDEF, /* XXX TBD */ "Copy target device data underrun") }, /* DTLPWRO A K */ { SST(0x0D, 0x05, SS_RDEF, /* XXX TBD */ "Copy target device data overrun") }, /* DT PWROMAEBK F */ { SST(0x0E, 0x00, SS_RDEF, /* XXX TBD */ "Invalid information unit") }, /* DT PWROMAEBK F */ { SST(0x0E, 0x01, SS_RDEF, /* XXX TBD */ "Information unit too short") }, /* DT PWROMAEBK F */ { SST(0x0E, 0x02, SS_RDEF, /* XXX TBD */ "Information unit too long") }, /* DT P R MAEBK F */ { SST(0x0E, 0x03, SS_FATAL | EINVAL, "Invalid field in command information unit") }, /* D W O BK */ { SST(0x10, 0x00, SS_RDEF, "ID CRC or ECC error") }, /* DT W O */ { SST(0x10, 0x01, SS_RDEF, /* XXX TBD */ "Logical block guard check failed") }, /* DT W O */ { SST(0x10, 0x02, SS_RDEF, /* XXX TBD */ "Logical block application tag check failed") }, /* DT W O */ { SST(0x10, 0x03, SS_RDEF, /* XXX TBD */ "Logical block reference tag check failed") }, /* T */ { SST(0x10, 0x04, SS_RDEF, /* XXX TBD */ "Logical block protection error on recovered buffer data") }, /* T */ { SST(0x10, 0x05, SS_RDEF, /* XXX TBD */ "Logical block protection method error") }, /* DT WRO BK */ { SST(0x11, 0x00, SS_FATAL|EIO, "Unrecovered read error") }, /* DT WRO BK */ { SST(0x11, 0x01, SS_FATAL|EIO, "Read retries exhausted") }, /* DT WRO BK */ { SST(0x11, 0x02, SS_FATAL|EIO, "Error too long to correct") }, /* DT W O BK */ { SST(0x11, 0x03, SS_FATAL|EIO, "Multiple read errors") }, /* D W O BK */ { SST(0x11, 0x04, SS_FATAL|EIO, "Unrecovered read error - auto reallocate failed") }, /* WRO B */ { SST(0x11, 0x05, SS_FATAL|EIO, "L-EC uncorrectable error") }, /* WRO B */ { SST(0x11, 0x06, SS_FATAL|EIO, "CIRC unrecovered error") }, /* W O B */ { SST(0x11, 0x07, SS_RDEF, "Data re-synchronization error") }, /* T */ { SST(0x11, 0x08, SS_RDEF, "Incomplete block read") }, /* T */ { SST(0x11, 0x09, SS_RDEF, "No gap found") }, /* DT O BK */ { SST(0x11, 0x0A, SS_RDEF, "Miscorrected error") }, /* D W O BK */ { SST(0x11, 0x0B, SS_FATAL|EIO, "Unrecovered read error - recommend reassignment") }, /* D W O BK */ { SST(0x11, 0x0C, SS_FATAL|EIO, "Unrecovered read error - recommend rewrite the data") }, /* DT WRO B */ { SST(0x11, 0x0D, SS_RDEF, "De-compression CRC error") }, /* DT WRO B */ { SST(0x11, 0x0E, SS_RDEF, "Cannot decompress using declared algorithm") }, /* R */ { SST(0x11, 0x0F, SS_RDEF, "Error reading UPC/EAN number") }, /* R */ { SST(0x11, 0x10, SS_RDEF, "Error reading ISRC number") }, /* R */ { SST(0x11, 0x11, SS_RDEF, "Read error - loss of streaming") }, /* DT WROM B */ { SST(0x11, 0x12, SS_RDEF, /* XXX TBD */ "Auxiliary memory read error") }, /* DTLPWRO AEBKVF */ { SST(0x11, 0x13, SS_RDEF, /* XXX TBD */ "Read error - failed retransmission request") }, /* D */ { SST(0x11, 0x14, SS_RDEF, /* XXX TBD */ "Read error - LBA marked bad by application client") }, /* D */ { SST(0x11, 0x15, SS_RDEF, /* XXX TBD */ "Write after sanitize required") }, /* D W O BK */ { SST(0x12, 0x00, SS_RDEF, "Address mark not found for ID field") }, /* D W O BK */ { SST(0x13, 0x00, SS_RDEF, "Address mark not found for data field") }, /* DTL WRO BK */ { SST(0x14, 0x00, SS_RDEF, "Recorded entity not found") }, /* DT WRO BK */ { SST(0x14, 0x01, SS_RDEF, "Record not found") }, /* T */ { SST(0x14, 0x02, SS_RDEF, "Filemark or setmark not found") }, /* T */ { SST(0x14, 0x03, SS_RDEF, "End-of-data not found") }, /* T */ { SST(0x14, 0x04, SS_RDEF, "Block sequence error") }, /* DT W O BK */ { SST(0x14, 0x05, SS_RDEF, "Record not found - recommend reassignment") }, /* DT W O BK */ { SST(0x14, 0x06, SS_RDEF, "Record not found - data auto-reallocated") }, /* T */ { SST(0x14, 0x07, SS_RDEF, /* XXX TBD */ "Locate operation failure") }, /* DTL WROM BK */ { SST(0x15, 0x00, SS_RDEF, "Random positioning error") }, /* DTL WROM BK */ { SST(0x15, 0x01, SS_RDEF, "Mechanical positioning error") }, /* DT WRO BK */ { SST(0x15, 0x02, SS_RDEF, "Positioning error detected by read of medium") }, /* D W O BK */ { SST(0x16, 0x00, SS_RDEF, "Data synchronization mark error") }, /* D W O BK */ { SST(0x16, 0x01, SS_RDEF, "Data sync error - data rewritten") }, /* D W O BK */ { SST(0x16, 0x02, SS_RDEF, "Data sync error - recommend rewrite") }, /* D W O BK */ { SST(0x16, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Data sync error - data auto-reallocated") }, /* D W O BK */ { SST(0x16, 0x04, SS_RDEF, "Data sync error - recommend reassignment") }, /* DT WRO BK */ { SST(0x17, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with no error correction applied") }, /* DT WRO BK */ { SST(0x17, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with retries") }, /* DT WRO BK */ { SST(0x17, 0x02, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with positive head offset") }, /* DT WRO BK */ { SST(0x17, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with negative head offset") }, /* WRO B */ { SST(0x17, 0x04, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with retries and/or CIRC applied") }, /* D WRO BK */ { SST(0x17, 0x05, SS_NOP | SSQ_PRINT_SENSE, "Recovered data using previous sector ID") }, /* D W O BK */ { SST(0x17, 0x06, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - data auto-reallocated") }, /* D WRO BK */ { SST(0x17, 0x07, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - recommend reassignment") }, /* D WRO BK */ { SST(0x17, 0x08, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - recommend rewrite") }, /* D WRO BK */ { SST(0x17, 0x09, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - data rewritten") }, /* DT WRO BK */ { SST(0x18, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with error correction applied") }, /* D WRO BK */ { SST(0x18, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with error corr. & retries applied") }, /* D WRO BK */ { SST(0x18, 0x02, SS_NOP | SSQ_PRINT_SENSE, "Recovered data - data auto-reallocated") }, /* R */ { SST(0x18, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with CIRC") }, /* R */ { SST(0x18, 0x04, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with L-EC") }, /* D WRO BK */ { SST(0x18, 0x05, SS_NOP | SSQ_PRINT_SENSE, "Recovered data - recommend reassignment") }, /* D WRO BK */ { SST(0x18, 0x06, SS_NOP | SSQ_PRINT_SENSE, "Recovered data - recommend rewrite") }, /* D W O BK */ { SST(0x18, 0x07, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with ECC - data rewritten") }, /* R */ { SST(0x18, 0x08, SS_RDEF, /* XXX TBD */ "Recovered data with linking") }, /* D O K */ { SST(0x19, 0x00, SS_RDEF, "Defect list error") }, /* D O K */ { SST(0x19, 0x01, SS_RDEF, "Defect list not available") }, /* D O K */ { SST(0x19, 0x02, SS_RDEF, "Defect list error in primary list") }, /* D O K */ { SST(0x19, 0x03, SS_RDEF, "Defect list error in grown list") }, /* DTLPWROMAEBKVF */ { SST(0x1A, 0x00, SS_RDEF, "Parameter list length error") }, /* DTLPWROMAEBKVF */ { SST(0x1B, 0x00, SS_RDEF, "Synchronous data transfer error") }, /* D O BK */ { SST(0x1C, 0x00, SS_RDEF, "Defect list not found") }, /* D O BK */ { SST(0x1C, 0x01, SS_RDEF, "Primary defect list not found") }, /* D O BK */ { SST(0x1C, 0x02, SS_RDEF, "Grown defect list not found") }, /* DT WRO BK */ { SST(0x1D, 0x00, SS_FATAL, "Miscompare during verify operation") }, /* D B */ { SST(0x1D, 0x01, SS_RDEF, /* XXX TBD */ "Miscomparable verify of unmapped LBA") }, /* D W O BK */ { SST(0x1E, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Recovered ID with ECC correction") }, /* D O K */ { SST(0x1F, 0x00, SS_RDEF, "Partial defect list transfer") }, /* DTLPWROMAEBKVF */ { SST(0x20, 0x00, SS_FATAL | EINVAL, "Invalid command operation code") }, /* DT PWROMAEBK */ { SST(0x20, 0x01, SS_RDEF, /* XXX TBD */ "Access denied - initiator pending-enrolled") }, /* DT PWROMAEBK */ { SST(0x20, 0x02, SS_FATAL | EPERM, "Access denied - no access rights") }, /* DT PWROMAEBK */ { SST(0x20, 0x03, SS_RDEF, /* XXX TBD */ "Access denied - invalid mgmt ID key") }, /* T */ { SST(0x20, 0x04, SS_RDEF, /* XXX TBD */ "Illegal command while in write capable state") }, /* T */ { SST(0x20, 0x05, SS_RDEF, /* XXX TBD */ "Obsolete") }, /* T */ { SST(0x20, 0x06, SS_RDEF, /* XXX TBD */ "Illegal command while in explicit address mode") }, /* T */ { SST(0x20, 0x07, SS_RDEF, /* XXX TBD */ "Illegal command while in implicit address mode") }, /* DT PWROMAEBK */ { SST(0x20, 0x08, SS_RDEF, /* XXX TBD */ "Access denied - enrollment conflict") }, /* DT PWROMAEBK */ { SST(0x20, 0x09, SS_RDEF, /* XXX TBD */ "Access denied - invalid LU identifier") }, /* DT PWROMAEBK */ { SST(0x20, 0x0A, SS_RDEF, /* XXX TBD */ "Access denied - invalid proxy token") }, /* DT PWROMAEBK */ { SST(0x20, 0x0B, SS_RDEF, /* XXX TBD */ "Access denied - ACL LUN conflict") }, /* T */ { SST(0x20, 0x0C, SS_FATAL | EINVAL, "Illegal command when not in append-only mode") }, /* DT WRO BK */ { SST(0x21, 0x00, SS_FATAL | EINVAL, "Logical block address out of range") }, /* DT WROM BK */ { SST(0x21, 0x01, SS_FATAL | EINVAL, "Invalid element address") }, /* R */ { SST(0x21, 0x02, SS_RDEF, /* XXX TBD */ "Invalid address for write") }, /* R */ { SST(0x21, 0x03, SS_RDEF, /* XXX TBD */ "Invalid write crossing layer jump") }, /* D */ { SST(0x21, 0x04, SS_RDEF, /* XXX TBD */ "Unaligned write command") }, /* D */ { SST(0x21, 0x05, SS_RDEF, /* XXX TBD */ "Write boundary violation") }, /* D */ { SST(0x21, 0x06, SS_RDEF, /* XXX TBD */ "Attempt to read invalid data") }, /* D */ { SST(0x21, 0x07, SS_RDEF, /* XXX TBD */ "Read boundary violation") }, /* D */ { SST(0x22, 0x00, SS_FATAL | EINVAL, "Illegal function (use 20 00, 24 00, or 26 00)") }, /* DT P B */ { SST(0x23, 0x00, SS_FATAL | EINVAL, "Invalid token operation, cause not reportable") }, /* DT P B */ { SST(0x23, 0x01, SS_FATAL | EINVAL, "Invalid token operation, unsupported token type") }, /* DT P B */ { SST(0x23, 0x02, SS_FATAL | EINVAL, "Invalid token operation, remote token usage not supported") }, /* DT P B */ { SST(0x23, 0x03, SS_FATAL | EINVAL, "Invalid token operation, remote ROD token creation not supported") }, /* DT P B */ { SST(0x23, 0x04, SS_FATAL | EINVAL, "Invalid token operation, token unknown") }, /* DT P B */ { SST(0x23, 0x05, SS_FATAL | EINVAL, "Invalid token operation, token corrupt") }, /* DT P B */ { SST(0x23, 0x06, SS_FATAL | EINVAL, "Invalid token operation, token revoked") }, /* DT P B */ { SST(0x23, 0x07, SS_FATAL | EINVAL, "Invalid token operation, token expired") }, /* DT P B */ { SST(0x23, 0x08, SS_FATAL | EINVAL, "Invalid token operation, token cancelled") }, /* DT P B */ { SST(0x23, 0x09, SS_FATAL | EINVAL, "Invalid token operation, token deleted") }, /* DT P B */ { SST(0x23, 0x0A, SS_FATAL | EINVAL, "Invalid token operation, invalid token length") }, /* DTLPWROMAEBKVF */ { SST(0x24, 0x00, SS_FATAL | EINVAL, "Invalid field in CDB") }, /* DTLPWRO AEBKVF */ { SST(0x24, 0x01, SS_RDEF, /* XXX TBD */ "CDB decryption error") }, /* T */ { SST(0x24, 0x02, SS_RDEF, /* XXX TBD */ "Obsolete") }, /* T */ { SST(0x24, 0x03, SS_RDEF, /* XXX TBD */ "Obsolete") }, /* F */ { SST(0x24, 0x04, SS_RDEF, /* XXX TBD */ "Security audit value frozen") }, /* F */ { SST(0x24, 0x05, SS_RDEF, /* XXX TBD */ "Security working key frozen") }, /* F */ { SST(0x24, 0x06, SS_RDEF, /* XXX TBD */ "NONCE not unique") }, /* F */ { SST(0x24, 0x07, SS_RDEF, /* XXX TBD */ "NONCE timestamp out of range") }, /* DT R MAEBKV */ { SST(0x24, 0x08, SS_RDEF, /* XXX TBD */ "Invalid XCDB") }, /* DTLPWROMAEBKVF */ { SST(0x25, 0x00, SS_FATAL | ENXIO | SSQ_LOST, "Logical unit not supported") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x00, SS_FATAL | EINVAL, "Invalid field in parameter list") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x01, SS_FATAL | EINVAL, "Parameter not supported") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x02, SS_FATAL | EINVAL, "Parameter value invalid") }, /* DTLPWROMAE K */ { SST(0x26, 0x03, SS_FATAL | EINVAL, "Threshold parameters not supported") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x04, SS_FATAL | EINVAL, "Invalid release of persistent reservation") }, /* DTLPWRO A BK */ { SST(0x26, 0x05, SS_RDEF, /* XXX TBD */ "Data decryption error") }, /* DTLPWRO K */ { SST(0x26, 0x06, SS_FATAL | EINVAL, "Too many target descriptors") }, /* DTLPWRO K */ { SST(0x26, 0x07, SS_FATAL | EINVAL, "Unsupported target descriptor type code") }, /* DTLPWRO K */ { SST(0x26, 0x08, SS_FATAL | EINVAL, "Too many segment descriptors") }, /* DTLPWRO K */ { SST(0x26, 0x09, SS_FATAL | EINVAL, "Unsupported segment descriptor type code") }, /* DTLPWRO K */ { SST(0x26, 0x0A, SS_FATAL | EINVAL, "Unexpected inexact segment") }, /* DTLPWRO K */ { SST(0x26, 0x0B, SS_FATAL | EINVAL, "Inline data length exceeded") }, /* DTLPWRO K */ { SST(0x26, 0x0C, SS_FATAL | EINVAL, "Invalid operation for copy source or destination") }, /* DTLPWRO K */ { SST(0x26, 0x0D, SS_FATAL | EINVAL, "Copy segment granularity violation") }, /* DT PWROMAEBK */ { SST(0x26, 0x0E, SS_RDEF, /* XXX TBD */ "Invalid parameter while port is enabled") }, /* F */ { SST(0x26, 0x0F, SS_RDEF, /* XXX TBD */ "Invalid data-out buffer integrity check value") }, /* T */ { SST(0x26, 0x10, SS_RDEF, /* XXX TBD */ "Data decryption key fail limit reached") }, /* T */ { SST(0x26, 0x11, SS_RDEF, /* XXX TBD */ "Incomplete key-associated data set") }, /* T */ { SST(0x26, 0x12, SS_RDEF, /* XXX TBD */ "Vendor specific key reference not found") }, /* D */ { SST(0x26, 0x13, SS_RDEF, /* XXX TBD */ "Application tag mode page is invalid") }, /* DT WRO BK */ { SST(0x27, 0x00, SS_FATAL | EACCES, "Write protected") }, /* DT WRO BK */ { SST(0x27, 0x01, SS_FATAL | EACCES, "Hardware write protected") }, /* DT WRO BK */ { SST(0x27, 0x02, SS_FATAL | EACCES, "Logical unit software write protected") }, /* T R */ { SST(0x27, 0x03, SS_FATAL | EACCES, "Associated write protect") }, /* T R */ { SST(0x27, 0x04, SS_FATAL | EACCES, "Persistent write protect") }, /* T R */ { SST(0x27, 0x05, SS_FATAL | EACCES, "Permanent write protect") }, /* R F */ { SST(0x27, 0x06, SS_RDEF, /* XXX TBD */ "Conditional write protect") }, /* D B */ { SST(0x27, 0x07, SS_FATAL | ENOSPC, "Space allocation failed write protect") }, /* D */ { SST(0x27, 0x08, SS_FATAL | EACCES, "Zone is read only") }, /* DTLPWROMAEBKVF */ { SST(0x28, 0x00, SS_FATAL | ENXIO, "Not ready to ready change, medium may have changed") }, /* DT WROM B */ { SST(0x28, 0x01, SS_FATAL | ENXIO, "Import or export element accessed") }, /* R */ { SST(0x28, 0x02, SS_RDEF, /* XXX TBD */ "Format-layer may have changed") }, /* M */ { SST(0x28, 0x03, SS_RDEF, /* XXX TBD */ "Import/export element accessed, medium changed") }, /* * XXX JGibbs - All of these should use the same errno, but I don't * think ENXIO is the correct choice. Should we borrow from * the networking errnos? ECONNRESET anyone? */ /* DTLPWROMAEBKVF */ { SST(0x29, 0x00, SS_FATAL | ENXIO, "Power on, reset, or bus device reset occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x01, SS_RDEF, "Power on occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x02, SS_RDEF, "SCSI bus reset occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x03, SS_RDEF, "Bus device reset function occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x04, SS_RDEF, "Device internal reset") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x05, SS_RDEF, "Transceiver mode changed to single-ended") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x06, SS_RDEF, "Transceiver mode changed to LVD") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x07, SS_RDEF, /* XXX TBD */ "I_T nexus loss occurred") }, /* DTL WROMAEBKVF */ { SST(0x2A, 0x00, SS_RDEF, "Parameters changed") }, /* DTL WROMAEBKVF */ { SST(0x2A, 0x01, SS_RDEF, "Mode parameters changed") }, /* DTL WROMAE K */ { SST(0x2A, 0x02, SS_RDEF, "Log parameters changed") }, /* DTLPWROMAE K */ { SST(0x2A, 0x03, SS_RDEF, "Reservations preempted") }, /* DTLPWROMAE */ { SST(0x2A, 0x04, SS_RDEF, /* XXX TBD */ "Reservations released") }, /* DTLPWROMAE */ { SST(0x2A, 0x05, SS_RDEF, /* XXX TBD */ "Registrations preempted") }, /* DTLPWROMAEBKVF */ { SST(0x2A, 0x06, SS_RDEF, /* XXX TBD */ "Asymmetric access state changed") }, /* DTLPWROMAEBKVF */ { SST(0x2A, 0x07, SS_RDEF, /* XXX TBD */ "Implicit asymmetric access state transition failed") }, /* DT WROMAEBKVF */ { SST(0x2A, 0x08, SS_RDEF, /* XXX TBD */ "Priority changed") }, /* D */ { SST(0x2A, 0x09, SS_RDEF, /* XXX TBD */ "Capacity data has changed") }, /* DT */ { SST(0x2A, 0x0A, SS_RDEF, /* XXX TBD */ "Error history I_T nexus cleared") }, /* DT */ { SST(0x2A, 0x0B, SS_RDEF, /* XXX TBD */ "Error history snapshot released") }, /* F */ { SST(0x2A, 0x0C, SS_RDEF, /* XXX TBD */ "Error recovery attributes have changed") }, /* T */ { SST(0x2A, 0x0D, SS_RDEF, /* XXX TBD */ "Data encryption capabilities changed") }, /* DT M E V */ { SST(0x2A, 0x10, SS_RDEF, /* XXX TBD */ "Timestamp changed") }, /* T */ { SST(0x2A, 0x11, SS_RDEF, /* XXX TBD */ "Data encryption parameters changed by another I_T nexus") }, /* T */ { SST(0x2A, 0x12, SS_RDEF, /* XXX TBD */ "Data encryption parameters changed by vendor specific event") }, /* T */ { SST(0x2A, 0x13, SS_RDEF, /* XXX TBD */ "Data encryption key instance counter has changed") }, /* DT R MAEBKV */ { SST(0x2A, 0x14, SS_RDEF, /* XXX TBD */ "SA creation capabilities data has changed") }, /* T M V */ { SST(0x2A, 0x15, SS_RDEF, /* XXX TBD */ "Medium removal prevention preempted") }, /* DTLPWRO K */ { SST(0x2B, 0x00, SS_RDEF, "Copy cannot execute since host cannot disconnect") }, /* DTLPWROMAEBKVF */ { SST(0x2C, 0x00, SS_RDEF, "Command sequence error") }, /* */ { SST(0x2C, 0x01, SS_RDEF, "Too many windows specified") }, /* */ { SST(0x2C, 0x02, SS_RDEF, "Invalid combination of windows specified") }, /* R */ { SST(0x2C, 0x03, SS_RDEF, "Current program area is not empty") }, /* R */ { SST(0x2C, 0x04, SS_RDEF, "Current program area is empty") }, /* B */ { SST(0x2C, 0x05, SS_RDEF, /* XXX TBD */ "Illegal power condition request") }, /* R */ { SST(0x2C, 0x06, SS_RDEF, /* XXX TBD */ "Persistent prevent conflict") }, /* DTLPWROMAEBKVF */ { SST(0x2C, 0x07, SS_RDEF, /* XXX TBD */ "Previous busy status") }, /* DTLPWROMAEBKVF */ { SST(0x2C, 0x08, SS_RDEF, /* XXX TBD */ "Previous task set full status") }, /* DTLPWROM EBKVF */ { SST(0x2C, 0x09, SS_RDEF, /* XXX TBD */ "Previous reservation conflict status") }, /* F */ { SST(0x2C, 0x0A, SS_RDEF, /* XXX TBD */ "Partition or collection contains user objects") }, /* T */ { SST(0x2C, 0x0B, SS_RDEF, /* XXX TBD */ "Not reserved") }, /* D */ { SST(0x2C, 0x0C, SS_RDEF, /* XXX TBD */ "ORWRITE generation does not match") }, /* D */ { SST(0x2C, 0x0D, SS_RDEF, /* XXX TBD */ "Reset write pointer not allowed") }, /* D */ { SST(0x2C, 0x0E, SS_RDEF, /* XXX TBD */ "Zone is offline") }, /* D */ { SST(0x2C, 0x0F, SS_RDEF, /* XXX TBD */ "Stream not open") }, /* D */ { SST(0x2C, 0x10, SS_RDEF, /* XXX TBD */ "Unwritten data in zone") }, /* T */ { SST(0x2D, 0x00, SS_RDEF, "Overwrite error on update in place") }, /* R */ { SST(0x2E, 0x00, SS_RDEF, /* XXX TBD */ "Insufficient time for operation") }, /* D */ { SST(0x2E, 0x01, SS_RDEF, /* XXX TBD */ "Command timeout before processing") }, /* D */ { SST(0x2E, 0x02, SS_RDEF, /* XXX TBD */ "Command timeout during processing") }, /* D */ { SST(0x2E, 0x03, SS_RDEF, /* XXX TBD */ "Command timeout during processing due to error recovery") }, /* DTLPWROMAEBKVF */ { SST(0x2F, 0x00, SS_RDEF, "Commands cleared by another initiator") }, /* D */ { SST(0x2F, 0x01, SS_RDEF, /* XXX TBD */ "Commands cleared by power loss notification") }, /* DTLPWROMAEBKVF */ { SST(0x2F, 0x02, SS_RDEF, /* XXX TBD */ "Commands cleared by device server") }, /* DTLPWROMAEBKVF */ { SST(0x2F, 0x03, SS_RDEF, /* XXX TBD */ "Some commands cleared by queuing layer event") }, /* DT WROM BK */ { SST(0x30, 0x00, SS_RDEF, "Incompatible medium installed") }, /* DT WRO BK */ { SST(0x30, 0x01, SS_RDEF, "Cannot read medium - unknown format") }, /* DT WRO BK */ { SST(0x30, 0x02, SS_RDEF, "Cannot read medium - incompatible format") }, /* DT R K */ { SST(0x30, 0x03, SS_RDEF, "Cleaning cartridge installed") }, /* DT WRO BK */ { SST(0x30, 0x04, SS_RDEF, "Cannot write medium - unknown format") }, /* DT WRO BK */ { SST(0x30, 0x05, SS_RDEF, "Cannot write medium - incompatible format") }, /* DT WRO B */ { SST(0x30, 0x06, SS_RDEF, "Cannot format medium - incompatible medium") }, /* DTL WROMAEBKVF */ { SST(0x30, 0x07, SS_RDEF, "Cleaning failure") }, /* R */ { SST(0x30, 0x08, SS_RDEF, "Cannot write - application code mismatch") }, /* R */ { SST(0x30, 0x09, SS_RDEF, "Current session not fixated for append") }, /* DT WRO AEBK */ { SST(0x30, 0x0A, SS_RDEF, /* XXX TBD */ "Cleaning request rejected") }, /* T */ { SST(0x30, 0x0C, SS_RDEF, /* XXX TBD */ "WORM medium - overwrite attempted") }, /* T */ { SST(0x30, 0x0D, SS_RDEF, /* XXX TBD */ "WORM medium - integrity check") }, /* R */ { SST(0x30, 0x10, SS_RDEF, /* XXX TBD */ "Medium not formatted") }, /* M */ { SST(0x30, 0x11, SS_RDEF, /* XXX TBD */ "Incompatible volume type") }, /* M */ { SST(0x30, 0x12, SS_RDEF, /* XXX TBD */ "Incompatible volume qualifier") }, /* M */ { SST(0x30, 0x13, SS_RDEF, /* XXX TBD */ "Cleaning volume expired") }, /* DT WRO BK */ { SST(0x31, 0x00, SS_RDEF, "Medium format corrupted") }, /* D L RO B */ { SST(0x31, 0x01, SS_RDEF, "Format command failed") }, /* R */ { SST(0x31, 0x02, SS_RDEF, /* XXX TBD */ "Zoned formatting failed due to spare linking") }, /* D B */ { SST(0x31, 0x03, SS_RDEF, /* XXX TBD */ "SANITIZE command failed") }, /* D W O BK */ { SST(0x32, 0x00, SS_RDEF, "No defect spare location available") }, /* D W O BK */ { SST(0x32, 0x01, SS_RDEF, "Defect list update failure") }, /* T */ { SST(0x33, 0x00, SS_RDEF, "Tape length error") }, /* DTLPWROMAEBKVF */ { SST(0x34, 0x00, SS_RDEF, "Enclosure failure") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x00, SS_RDEF, "Enclosure services failure") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x01, SS_RDEF, "Unsupported enclosure function") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x02, SS_RDEF, "Enclosure services unavailable") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x03, SS_RDEF, "Enclosure services transfer failure") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x04, SS_RDEF, "Enclosure services transfer refused") }, /* DTL WROMAEBKVF */ { SST(0x35, 0x05, SS_RDEF, /* XXX TBD */ "Enclosure services checksum error") }, /* L */ { SST(0x36, 0x00, SS_RDEF, "Ribbon, ink, or toner failure") }, /* DTL WROMAEBKVF */ { SST(0x37, 0x00, SS_RDEF, "Rounded parameter") }, /* B */ { SST(0x38, 0x00, SS_RDEF, /* XXX TBD */ "Event status notification") }, /* B */ { SST(0x38, 0x02, SS_RDEF, /* XXX TBD */ "ESN - power management class event") }, /* B */ { SST(0x38, 0x04, SS_RDEF, /* XXX TBD */ "ESN - media class event") }, /* B */ { SST(0x38, 0x06, SS_RDEF, /* XXX TBD */ "ESN - device busy class event") }, /* D */ { SST(0x38, 0x07, SS_RDEF, /* XXX TBD */ "Thin provisioning soft threshold reached") }, /* DTL WROMAE K */ { SST(0x39, 0x00, SS_RDEF, "Saving parameters not supported") }, /* DTL WROM BK */ { SST(0x3A, 0x00, SS_FATAL | ENXIO, "Medium not present") }, /* DT WROM BK */ { SST(0x3A, 0x01, SS_FATAL | ENXIO, "Medium not present - tray closed") }, /* DT WROM BK */ { SST(0x3A, 0x02, SS_FATAL | ENXIO, "Medium not present - tray open") }, /* DT WROM B */ { SST(0x3A, 0x03, SS_RDEF, /* XXX TBD */ "Medium not present - loadable") }, /* DT WRO B */ { SST(0x3A, 0x04, SS_RDEF, /* XXX TBD */ "Medium not present - medium auxiliary memory accessible") }, /* TL */ { SST(0x3B, 0x00, SS_RDEF, "Sequential positioning error") }, /* T */ { SST(0x3B, 0x01, SS_RDEF, "Tape position error at beginning-of-medium") }, /* T */ { SST(0x3B, 0x02, SS_RDEF, "Tape position error at end-of-medium") }, /* L */ { SST(0x3B, 0x03, SS_RDEF, "Tape or electronic vertical forms unit not ready") }, /* L */ { SST(0x3B, 0x04, SS_RDEF, "Slew failure") }, /* L */ { SST(0x3B, 0x05, SS_RDEF, "Paper jam") }, /* L */ { SST(0x3B, 0x06, SS_RDEF, "Failed to sense top-of-form") }, /* L */ { SST(0x3B, 0x07, SS_RDEF, "Failed to sense bottom-of-form") }, /* T */ { SST(0x3B, 0x08, SS_RDEF, "Reposition error") }, /* */ { SST(0x3B, 0x09, SS_RDEF, "Read past end of medium") }, /* */ { SST(0x3B, 0x0A, SS_RDEF, "Read past beginning of medium") }, /* */ { SST(0x3B, 0x0B, SS_RDEF, "Position past end of medium") }, /* T */ { SST(0x3B, 0x0C, SS_RDEF, "Position past beginning of medium") }, /* DT WROM BK */ { SST(0x3B, 0x0D, SS_FATAL | ENOSPC, "Medium destination element full") }, /* DT WROM BK */ { SST(0x3B, 0x0E, SS_RDEF, "Medium source element empty") }, /* R */ { SST(0x3B, 0x0F, SS_RDEF, "End of medium reached") }, /* DT WROM BK */ { SST(0x3B, 0x11, SS_RDEF, "Medium magazine not accessible") }, /* DT WROM BK */ { SST(0x3B, 0x12, SS_RDEF, "Medium magazine removed") }, /* DT WROM BK */ { SST(0x3B, 0x13, SS_RDEF, "Medium magazine inserted") }, /* DT WROM BK */ { SST(0x3B, 0x14, SS_RDEF, "Medium magazine locked") }, /* DT WROM BK */ { SST(0x3B, 0x15, SS_RDEF, "Medium magazine unlocked") }, /* R */ { SST(0x3B, 0x16, SS_RDEF, /* XXX TBD */ "Mechanical positioning or changer error") }, /* F */ { SST(0x3B, 0x17, SS_RDEF, /* XXX TBD */ "Read past end of user object") }, /* M */ { SST(0x3B, 0x18, SS_RDEF, /* XXX TBD */ "Element disabled") }, /* M */ { SST(0x3B, 0x19, SS_RDEF, /* XXX TBD */ "Element enabled") }, /* M */ { SST(0x3B, 0x1A, SS_RDEF, /* XXX TBD */ "Data transfer device removed") }, /* M */ { SST(0x3B, 0x1B, SS_RDEF, /* XXX TBD */ "Data transfer device inserted") }, /* T */ { SST(0x3B, 0x1C, SS_RDEF, /* XXX TBD */ "Too many logical objects on partition to support operation") }, /* DTLPWROMAE K */ { SST(0x3D, 0x00, SS_RDEF, "Invalid bits in IDENTIFY message") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x00, SS_RDEF, "Logical unit has not self-configured yet") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x01, SS_RDEF, "Logical unit failure") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x02, SS_RDEF, "Timeout on logical unit") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x03, SS_RDEF, /* XXX TBD */ "Logical unit failed self-test") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x04, SS_RDEF, /* XXX TBD */ "Logical unit unable to update self-test log") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x00, SS_RDEF, "Target operating conditions have changed") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x01, SS_RDEF, "Microcode has been changed") }, /* DTLPWROM BK */ { SST(0x3F, 0x02, SS_RDEF, "Changed operating definition") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x03, SS_RDEF, "INQUIRY data has changed") }, /* DT WROMAEBK */ { SST(0x3F, 0x04, SS_RDEF, "Component device attached") }, /* DT WROMAEBK */ { SST(0x3F, 0x05, SS_RDEF, "Device identifier changed") }, /* DT WROMAEB */ { SST(0x3F, 0x06, SS_RDEF, "Redundancy group created or modified") }, /* DT WROMAEB */ { SST(0x3F, 0x07, SS_RDEF, "Redundancy group deleted") }, /* DT WROMAEB */ { SST(0x3F, 0x08, SS_RDEF, "Spare created or modified") }, /* DT WROMAEB */ { SST(0x3F, 0x09, SS_RDEF, "Spare deleted") }, /* DT WROMAEBK */ { SST(0x3F, 0x0A, SS_RDEF, "Volume set created or modified") }, /* DT WROMAEBK */ { SST(0x3F, 0x0B, SS_RDEF, "Volume set deleted") }, /* DT WROMAEBK */ { SST(0x3F, 0x0C, SS_RDEF, "Volume set deassigned") }, /* DT WROMAEBK */ { SST(0x3F, 0x0D, SS_RDEF, "Volume set reassigned") }, /* DTLPWROMAE */ { SST(0x3F, 0x0E, SS_RDEF | SSQ_RESCAN , "Reported LUNs data has changed") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x0F, SS_RDEF, /* XXX TBD */ "Echo buffer overwritten") }, /* DT WROM B */ { SST(0x3F, 0x10, SS_RDEF, /* XXX TBD */ "Medium loadable") }, /* DT WROM B */ { SST(0x3F, 0x11, SS_RDEF, /* XXX TBD */ "Medium auxiliary memory accessible") }, /* DTLPWR MAEBK F */ { SST(0x3F, 0x12, SS_RDEF, /* XXX TBD */ "iSCSI IP address added") }, /* DTLPWR MAEBK F */ { SST(0x3F, 0x13, SS_RDEF, /* XXX TBD */ "iSCSI IP address removed") }, /* DTLPWR MAEBK F */ { SST(0x3F, 0x14, SS_RDEF, /* XXX TBD */ "iSCSI IP address changed") }, /* DTLPWR MAEBK */ { SST(0x3F, 0x15, SS_RDEF, /* XXX TBD */ "Inspect referrals sense descriptors") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x16, SS_RDEF, /* XXX TBD */ "Microcode has been changed without reset") }, /* D */ { SST(0x3F, 0x17, SS_RDEF, /* XXX TBD */ "Zone transition to full") }, /* D */ { SST(0x40, 0x00, SS_RDEF, "RAM failure") }, /* deprecated - use 40 NN instead */ /* DTLPWROMAEBKVF */ { SST(0x40, 0x80, SS_RDEF, "Diagnostic failure: ASCQ = Component ID") }, /* DTLPWROMAEBKVF */ { SST(0x40, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x80->0xFF */ /* D */ { SST(0x41, 0x00, SS_RDEF, "Data path failure") }, /* deprecated - use 40 NN instead */ /* D */ { SST(0x42, 0x00, SS_RDEF, "Power-on or self-test failure") }, /* deprecated - use 40 NN instead */ /* DTLPWROMAEBKVF */ { SST(0x43, 0x00, SS_RDEF, "Message error") }, /* DTLPWROMAEBKVF */ { SST(0x44, 0x00, SS_FATAL | EIO, "Internal target failure") }, /* DT P MAEBKVF */ { SST(0x44, 0x01, SS_RDEF, /* XXX TBD */ "Persistent reservation information lost") }, /* DT B */ { SST(0x44, 0x71, SS_RDEF, /* XXX TBD */ "ATA device failed set features") }, /* DTLPWROMAEBKVF */ { SST(0x45, 0x00, SS_RDEF, "Select or reselect failure") }, /* DTLPWROM BK */ { SST(0x46, 0x00, SS_RDEF, "Unsuccessful soft reset") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x00, SS_RDEF, "SCSI parity error") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x01, SS_RDEF, /* XXX TBD */ "Data phase CRC error detected") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x02, SS_RDEF, /* XXX TBD */ "SCSI parity error detected during ST data phase") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x03, SS_RDEF, /* XXX TBD */ "Information unit iuCRC error detected") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x04, SS_RDEF, /* XXX TBD */ "Asynchronous information protection error detected") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x05, SS_RDEF, /* XXX TBD */ "Protocol service CRC error") }, /* DT MAEBKVF */ { SST(0x47, 0x06, SS_RDEF, /* XXX TBD */ "PHY test function in progress") }, /* DT PWROMAEBK */ { SST(0x47, 0x7F, SS_RDEF, /* XXX TBD */ "Some commands cleared by iSCSI protocol event") }, /* DTLPWROMAEBKVF */ { SST(0x48, 0x00, SS_RDEF, "Initiator detected error message received") }, /* DTLPWROMAEBKVF */ { SST(0x49, 0x00, SS_RDEF, "Invalid message error") }, /* DTLPWROMAEBKVF */ { SST(0x4A, 0x00, SS_RDEF, "Command phase error") }, /* DTLPWROMAEBKVF */ { SST(0x4B, 0x00, SS_RDEF, "Data phase error") }, /* DT PWROMAEBK */ { SST(0x4B, 0x01, SS_RDEF, /* XXX TBD */ "Invalid target port transfer tag received") }, /* DT PWROMAEBK */ { SST(0x4B, 0x02, SS_RDEF, /* XXX TBD */ "Too much write data") }, /* DT PWROMAEBK */ { SST(0x4B, 0x03, SS_RDEF, /* XXX TBD */ "ACK/NAK timeout") }, /* DT PWROMAEBK */ { SST(0x4B, 0x04, SS_RDEF, /* XXX TBD */ "NAK received") }, /* DT PWROMAEBK */ { SST(0x4B, 0x05, SS_RDEF, /* XXX TBD */ "Data offset error") }, /* DT PWROMAEBK */ { SST(0x4B, 0x06, SS_RDEF, /* XXX TBD */ "Initiator response timeout") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x07, SS_RDEF, /* XXX TBD */ "Connection lost") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x08, SS_RDEF, /* XXX TBD */ "Data-in buffer overflow - data buffer size") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x09, SS_RDEF, /* XXX TBD */ "Data-in buffer overflow - data buffer descriptor area") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0A, SS_RDEF, /* XXX TBD */ "Data-in buffer error") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0B, SS_RDEF, /* XXX TBD */ "Data-out buffer overflow - data buffer size") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0C, SS_RDEF, /* XXX TBD */ "Data-out buffer overflow - data buffer descriptor area") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0D, SS_RDEF, /* XXX TBD */ "Data-out buffer error") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0E, SS_RDEF, /* XXX TBD */ "PCIe fabric error") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0F, SS_RDEF, /* XXX TBD */ "PCIe completion timeout") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x10, SS_RDEF, /* XXX TBD */ "PCIe completer abort") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x11, SS_RDEF, /* XXX TBD */ "PCIe poisoned TLP received") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x12, SS_RDEF, /* XXX TBD */ "PCIe ECRC check failed") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x13, SS_RDEF, /* XXX TBD */ "PCIe unsupported request") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x14, SS_RDEF, /* XXX TBD */ "PCIe ACS violation") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x15, SS_RDEF, /* XXX TBD */ "PCIe TLP prefix blocket") }, /* DTLPWROMAEBKVF */ { SST(0x4C, 0x00, SS_RDEF, "Logical unit failed self-configuration") }, /* DTLPWROMAEBKVF */ { SST(0x4D, 0x00, SS_RDEF, "Tagged overlapped commands: ASCQ = Queue tag ID") }, /* DTLPWROMAEBKVF */ { SST(0x4D, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x00->0xFF */ /* DTLPWROMAEBKVF */ { SST(0x4E, 0x00, SS_RDEF, "Overlapped commands attempted") }, /* T */ { SST(0x50, 0x00, SS_RDEF, "Write append error") }, /* T */ { SST(0x50, 0x01, SS_RDEF, "Write append position error") }, /* T */ { SST(0x50, 0x02, SS_RDEF, "Position error related to timing") }, /* T RO */ { SST(0x51, 0x00, SS_RDEF, "Erase failure") }, /* R */ { SST(0x51, 0x01, SS_RDEF, /* XXX TBD */ "Erase failure - incomplete erase operation detected") }, /* T */ { SST(0x52, 0x00, SS_RDEF, "Cartridge fault") }, /* DTL WROM BK */ { SST(0x53, 0x00, SS_RDEF, "Media load or eject failed") }, /* T */ { SST(0x53, 0x01, SS_RDEF, "Unload tape failure") }, /* DT WROM BK */ { SST(0x53, 0x02, SS_RDEF, "Medium removal prevented") }, /* M */ { SST(0x53, 0x03, SS_RDEF, /* XXX TBD */ "Medium removal prevented by data transfer element") }, /* T */ { SST(0x53, 0x04, SS_RDEF, /* XXX TBD */ "Medium thread or unthread failure") }, /* M */ { SST(0x53, 0x05, SS_RDEF, /* XXX TBD */ "Volume identifier invalid") }, /* T */ { SST(0x53, 0x06, SS_RDEF, /* XXX TBD */ "Volume identifier missing") }, /* M */ { SST(0x53, 0x07, SS_RDEF, /* XXX TBD */ "Duplicate volume identifier") }, /* M */ { SST(0x53, 0x08, SS_RDEF, /* XXX TBD */ "Element status unknown") }, /* M */ { SST(0x53, 0x09, SS_RDEF, /* XXX TBD */ "Data transfer device error - load failed") }, /* M */ { SST(0x53, 0x0A, SS_RDEF, /* XXX TBD */ "Data transfer device error - unload failed") }, /* M */ { SST(0x53, 0x0B, SS_RDEF, /* XXX TBD */ "Data transfer device error - unload missing") }, /* M */ { SST(0x53, 0x0C, SS_RDEF, /* XXX TBD */ "Data transfer device error - eject failed") }, /* M */ { SST(0x53, 0x0D, SS_RDEF, /* XXX TBD */ "Data transfer device error - library communication failed") }, /* P */ { SST(0x54, 0x00, SS_RDEF, "SCSI to host system interface failure") }, /* P */ { SST(0x55, 0x00, SS_RDEF, "System resource failure") }, /* D O BK */ { SST(0x55, 0x01, SS_FATAL | ENOSPC, "System buffer full") }, /* DTLPWROMAE K */ { SST(0x55, 0x02, SS_RDEF, /* XXX TBD */ "Insufficient reservation resources") }, /* DTLPWROMAE K */ { SST(0x55, 0x03, SS_RDEF, /* XXX TBD */ "Insufficient resources") }, /* DTLPWROMAE K */ { SST(0x55, 0x04, SS_RDEF, /* XXX TBD */ "Insufficient registration resources") }, /* DT PWROMAEBK */ { SST(0x55, 0x05, SS_RDEF, /* XXX TBD */ "Insufficient access control resources") }, /* DT WROM B */ { SST(0x55, 0x06, SS_RDEF, /* XXX TBD */ "Auxiliary memory out of space") }, /* F */ { SST(0x55, 0x07, SS_RDEF, /* XXX TBD */ "Quota error") }, /* T */ { SST(0x55, 0x08, SS_RDEF, /* XXX TBD */ "Maximum number of supplemental decryption keys exceeded") }, /* M */ { SST(0x55, 0x09, SS_RDEF, /* XXX TBD */ "Medium auxiliary memory not accessible") }, /* M */ { SST(0x55, 0x0A, SS_RDEF, /* XXX TBD */ "Data currently unavailable") }, /* DTLPWROMAEBKVF */ { SST(0x55, 0x0B, SS_RDEF, /* XXX TBD */ "Insufficient power for operation") }, /* DT P B */ { SST(0x55, 0x0C, SS_RDEF, /* XXX TBD */ "Insufficient resources to create ROD") }, /* DT P B */ { SST(0x55, 0x0D, SS_RDEF, /* XXX TBD */ "Insufficient resources to create ROD token") }, /* D */ { SST(0x55, 0x0E, SS_RDEF, /* XXX TBD */ "Insufficient zone resources") }, /* D */ { SST(0x55, 0x0F, SS_RDEF, /* XXX TBD */ "Insufficient zone resources to complete write") }, /* D */ { SST(0x55, 0x10, SS_RDEF, /* XXX TBD */ "Maximum number of streams open") }, /* R */ { SST(0x57, 0x00, SS_RDEF, "Unable to recover table-of-contents") }, /* O */ { SST(0x58, 0x00, SS_RDEF, "Generation does not exist") }, /* O */ { SST(0x59, 0x00, SS_RDEF, "Updated block read") }, /* DTLPWRO BK */ { SST(0x5A, 0x00, SS_RDEF, "Operator request or state change input") }, /* DT WROM BK */ { SST(0x5A, 0x01, SS_RDEF, "Operator medium removal request") }, /* DT WRO A BK */ { SST(0x5A, 0x02, SS_RDEF, "Operator selected write protect") }, /* DT WRO A BK */ { SST(0x5A, 0x03, SS_RDEF, "Operator selected write permit") }, /* DTLPWROM K */ { SST(0x5B, 0x00, SS_RDEF, "Log exception") }, /* DTLPWROM K */ { SST(0x5B, 0x01, SS_RDEF, "Threshold condition met") }, /* DTLPWROM K */ { SST(0x5B, 0x02, SS_RDEF, "Log counter at maximum") }, /* DTLPWROM K */ { SST(0x5B, 0x03, SS_RDEF, "Log list codes exhausted") }, /* D O */ { SST(0x5C, 0x00, SS_RDEF, "RPL status change") }, /* D O */ { SST(0x5C, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Spindles synchronized") }, /* D O */ { SST(0x5C, 0x02, SS_RDEF, "Spindles not synchronized") }, /* DTLPWROMAEBKVF */ { SST(0x5D, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Failure prediction threshold exceeded") }, /* R B */ { SST(0x5D, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Media failure prediction threshold exceeded") }, /* R */ { SST(0x5D, 0x02, SS_NOP | SSQ_PRINT_SENSE, "Logical unit failure prediction threshold exceeded") }, /* R */ { SST(0x5D, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Spare area exhaustion prediction threshold exceeded") }, /* D B */ { SST(0x5D, 0x10, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x11, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x12, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x13, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x14, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x15, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure access times too high") }, /* D B */ { SST(0x5D, 0x16, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x17, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x18, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure controller detected") }, /* D B */ { SST(0x5D, 0x19, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x1A, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x1B, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x1C, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x1D, SS_NOP | SSQ_PRINT_SENSE, "Hardware impending failure power loss protection circuit") }, /* D B */ { SST(0x5D, 0x20, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x21, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x22, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x23, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x24, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x25, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure access times too high") }, /* D B */ { SST(0x5D, 0x26, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x27, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x28, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure controller detected") }, /* D B */ { SST(0x5D, 0x29, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x2A, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x2B, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x2C, SS_NOP | SSQ_PRINT_SENSE, "Controller impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x30, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x31, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x32, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x33, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x34, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x35, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure access times too high") }, /* D B */ { SST(0x5D, 0x36, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x37, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x38, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure controller detected") }, /* D B */ { SST(0x5D, 0x39, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x3A, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x3B, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x3C, SS_NOP | SSQ_PRINT_SENSE, "Data channel impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x40, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x41, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x42, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x43, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x44, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x45, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure access times too high") }, /* D B */ { SST(0x5D, 0x46, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x47, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x48, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure controller detected") }, /* D B */ { SST(0x5D, 0x49, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x4A, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x4B, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x4C, SS_NOP | SSQ_PRINT_SENSE, "Servo impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x50, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x51, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x52, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x53, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x54, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x55, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure access times too high") }, /* D B */ { SST(0x5D, 0x56, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x57, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x58, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure controller detected") }, /* D B */ { SST(0x5D, 0x59, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x5A, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x5B, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x5C, SS_NOP | SSQ_PRINT_SENSE, "Spindle impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x60, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x61, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x62, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x63, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x64, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x65, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure access times too high") }, /* D B */ { SST(0x5D, 0x66, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x67, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x68, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure controller detected") }, /* D B */ { SST(0x5D, 0x69, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x6A, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x6B, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x6C, SS_NOP | SSQ_PRINT_SENSE, "Firmware impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x73, SS_NOP | SSQ_PRINT_SENSE, "Media impending failure endurance limit met") }, /* DTLPWROMAEBKVF */ { SST(0x5D, 0xFF, SS_NOP | SSQ_PRINT_SENSE, "Failure prediction threshold exceeded (false)") }, /* DTLPWRO A K */ { SST(0x5E, 0x00, SS_RDEF, "Low power condition on") }, /* DTLPWRO A K */ { SST(0x5E, 0x01, SS_RDEF, "Idle condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x02, SS_RDEF, "Standby condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x03, SS_RDEF, "Idle condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x04, SS_RDEF, "Standby condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x05, SS_RDEF, "Idle-B condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x06, SS_RDEF, "Idle-B condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x07, SS_RDEF, "Idle-C condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x08, SS_RDEF, "Idle-C condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x09, SS_RDEF, "Standby-Y condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x0A, SS_RDEF, "Standby-Y condition activated by command") }, /* B */ { SST(0x5E, 0x41, SS_RDEF, /* XXX TBD */ "Power state change to active") }, /* B */ { SST(0x5E, 0x42, SS_RDEF, /* XXX TBD */ "Power state change to idle") }, /* B */ { SST(0x5E, 0x43, SS_RDEF, /* XXX TBD */ "Power state change to standby") }, /* B */ { SST(0x5E, 0x45, SS_RDEF, /* XXX TBD */ "Power state change to sleep") }, /* BK */ { SST(0x5E, 0x47, SS_RDEF, /* XXX TBD */ "Power state change to device control") }, /* */ { SST(0x60, 0x00, SS_RDEF, "Lamp failure") }, /* */ { SST(0x61, 0x00, SS_RDEF, "Video acquisition error") }, /* */ { SST(0x61, 0x01, SS_RDEF, "Unable to acquire video") }, /* */ { SST(0x61, 0x02, SS_RDEF, "Out of focus") }, /* */ { SST(0x62, 0x00, SS_RDEF, "Scan head positioning error") }, /* R */ { SST(0x63, 0x00, SS_RDEF, "End of user area encountered on this track") }, /* R */ { SST(0x63, 0x01, SS_FATAL | ENOSPC, "Packet does not fit in available space") }, /* R */ { SST(0x64, 0x00, SS_FATAL | ENXIO, "Illegal mode for this track") }, /* R */ { SST(0x64, 0x01, SS_RDEF, "Invalid packet size") }, /* DTLPWROMAEBKVF */ { SST(0x65, 0x00, SS_RDEF, "Voltage fault") }, /* */ { SST(0x66, 0x00, SS_RDEF, "Automatic document feeder cover up") }, /* */ { SST(0x66, 0x01, SS_RDEF, "Automatic document feeder lift up") }, /* */ { SST(0x66, 0x02, SS_RDEF, "Document jam in automatic document feeder") }, /* */ { SST(0x66, 0x03, SS_RDEF, "Document miss feed automatic in document feeder") }, /* A */ { SST(0x67, 0x00, SS_RDEF, "Configuration failure") }, /* A */ { SST(0x67, 0x01, SS_RDEF, "Configuration of incapable logical units failed") }, /* A */ { SST(0x67, 0x02, SS_RDEF, "Add logical unit failed") }, /* A */ { SST(0x67, 0x03, SS_RDEF, "Modification of logical unit failed") }, /* A */ { SST(0x67, 0x04, SS_RDEF, "Exchange of logical unit failed") }, /* A */ { SST(0x67, 0x05, SS_RDEF, "Remove of logical unit failed") }, /* A */ { SST(0x67, 0x06, SS_RDEF, "Attachment of logical unit failed") }, /* A */ { SST(0x67, 0x07, SS_RDEF, "Creation of logical unit failed") }, /* A */ { SST(0x67, 0x08, SS_RDEF, /* XXX TBD */ "Assign failure occurred") }, /* A */ { SST(0x67, 0x09, SS_RDEF, /* XXX TBD */ "Multiply assigned logical unit") }, /* DTLPWROMAEBKVF */ { SST(0x67, 0x0A, SS_RDEF, /* XXX TBD */ "Set target port groups command failed") }, /* DT B */ { SST(0x67, 0x0B, SS_RDEF, /* XXX TBD */ "ATA device feature not enabled") }, /* A */ { SST(0x68, 0x00, SS_RDEF, "Logical unit not configured") }, /* D */ { SST(0x68, 0x01, SS_RDEF, "Subsidiary logical unit not configured") }, /* A */ { SST(0x69, 0x00, SS_RDEF, "Data loss on logical unit") }, /* A */ { SST(0x69, 0x01, SS_RDEF, "Multiple logical unit failures") }, /* A */ { SST(0x69, 0x02, SS_RDEF, "Parity/data mismatch") }, /* A */ { SST(0x6A, 0x00, SS_RDEF, "Informational, refer to log") }, /* A */ { SST(0x6B, 0x00, SS_RDEF, "State change has occurred") }, /* A */ { SST(0x6B, 0x01, SS_RDEF, "Redundancy level got better") }, /* A */ { SST(0x6B, 0x02, SS_RDEF, "Redundancy level got worse") }, /* A */ { SST(0x6C, 0x00, SS_RDEF, "Rebuild failure occurred") }, /* A */ { SST(0x6D, 0x00, SS_RDEF, "Recalculate failure occurred") }, /* A */ { SST(0x6E, 0x00, SS_RDEF, "Command to logical unit failed") }, /* R */ { SST(0x6F, 0x00, SS_RDEF, /* XXX TBD */ "Copy protection key exchange failure - authentication failure") }, /* R */ { SST(0x6F, 0x01, SS_RDEF, /* XXX TBD */ "Copy protection key exchange failure - key not present") }, /* R */ { SST(0x6F, 0x02, SS_RDEF, /* XXX TBD */ "Copy protection key exchange failure - key not established") }, /* R */ { SST(0x6F, 0x03, SS_RDEF, /* XXX TBD */ "Read of scrambled sector without authentication") }, /* R */ { SST(0x6F, 0x04, SS_RDEF, /* XXX TBD */ "Media region code is mismatched to logical unit region") }, /* R */ { SST(0x6F, 0x05, SS_RDEF, /* XXX TBD */ "Drive region must be permanent/region reset count error") }, /* R */ { SST(0x6F, 0x06, SS_RDEF, /* XXX TBD */ "Insufficient block count for binding NONCE recording") }, /* R */ { SST(0x6F, 0x07, SS_RDEF, /* XXX TBD */ "Conflict in binding NONCE recording") }, /* T */ { SST(0x70, 0x00, SS_RDEF, "Decompression exception short: ASCQ = Algorithm ID") }, /* T */ { SST(0x70, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x00 -> 0xFF */ /* T */ { SST(0x71, 0x00, SS_RDEF, "Decompression exception long: ASCQ = Algorithm ID") }, /* T */ { SST(0x71, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x00 -> 0xFF */ /* R */ { SST(0x72, 0x00, SS_RDEF, "Session fixation error") }, /* R */ { SST(0x72, 0x01, SS_RDEF, "Session fixation error writing lead-in") }, /* R */ { SST(0x72, 0x02, SS_RDEF, "Session fixation error writing lead-out") }, /* R */ { SST(0x72, 0x03, SS_RDEF, "Session fixation error - incomplete track in session") }, /* R */ { SST(0x72, 0x04, SS_RDEF, "Empty or partially written reserved track") }, /* R */ { SST(0x72, 0x05, SS_RDEF, /* XXX TBD */ "No more track reservations allowed") }, /* R */ { SST(0x72, 0x06, SS_RDEF, /* XXX TBD */ "RMZ extension is not allowed") }, /* R */ { SST(0x72, 0x07, SS_RDEF, /* XXX TBD */ "No more test zone extensions are allowed") }, /* R */ { SST(0x73, 0x00, SS_RDEF, "CD control error") }, /* R */ { SST(0x73, 0x01, SS_RDEF, "Power calibration area almost full") }, /* R */ { SST(0x73, 0x02, SS_FATAL | ENOSPC, "Power calibration area is full") }, /* R */ { SST(0x73, 0x03, SS_RDEF, "Power calibration area error") }, /* R */ { SST(0x73, 0x04, SS_RDEF, "Program memory area update failure") }, /* R */ { SST(0x73, 0x05, SS_RDEF, "Program memory area is full") }, /* R */ { SST(0x73, 0x06, SS_RDEF, /* XXX TBD */ "RMA/PMA is almost full") }, /* R */ { SST(0x73, 0x10, SS_RDEF, /* XXX TBD */ "Current power calibration area almost full") }, /* R */ { SST(0x73, 0x11, SS_RDEF, /* XXX TBD */ "Current power calibration area is full") }, /* R */ { SST(0x73, 0x17, SS_RDEF, /* XXX TBD */ "RDZ is full") }, /* T */ { SST(0x74, 0x00, SS_RDEF, /* XXX TBD */ "Security error") }, /* T */ { SST(0x74, 0x01, SS_RDEF, /* XXX TBD */ "Unable to decrypt data") }, /* T */ { SST(0x74, 0x02, SS_RDEF, /* XXX TBD */ "Unencrypted data encountered while decrypting") }, /* T */ { SST(0x74, 0x03, SS_RDEF, /* XXX TBD */ "Incorrect data encryption key") }, /* T */ { SST(0x74, 0x04, SS_RDEF, /* XXX TBD */ "Cryptographic integrity validation failed") }, /* T */ { SST(0x74, 0x05, SS_RDEF, /* XXX TBD */ "Error decrypting data") }, /* T */ { SST(0x74, 0x06, SS_RDEF, /* XXX TBD */ "Unknown signature verification key") }, /* T */ { SST(0x74, 0x07, SS_RDEF, /* XXX TBD */ "Encryption parameters not useable") }, /* DT R M E VF */ { SST(0x74, 0x08, SS_RDEF, /* XXX TBD */ "Digital signature validation failure") }, /* T */ { SST(0x74, 0x09, SS_RDEF, /* XXX TBD */ "Encryption mode mismatch on read") }, /* T */ { SST(0x74, 0x0A, SS_RDEF, /* XXX TBD */ "Encrypted block not raw read enabled") }, /* T */ { SST(0x74, 0x0B, SS_RDEF, /* XXX TBD */ "Incorrect encryption parameters") }, /* DT R MAEBKV */ { SST(0x74, 0x0C, SS_RDEF, /* XXX TBD */ "Unable to decrypt parameter list") }, /* T */ { SST(0x74, 0x0D, SS_RDEF, /* XXX TBD */ "Encryption algorithm disabled") }, /* DT R MAEBKV */ { SST(0x74, 0x10, SS_RDEF, /* XXX TBD */ "SA creation parameter value invalid") }, /* DT R MAEBKV */ { SST(0x74, 0x11, SS_RDEF, /* XXX TBD */ "SA creation parameter value rejected") }, /* DT R MAEBKV */ { SST(0x74, 0x12, SS_RDEF, /* XXX TBD */ "Invalid SA usage") }, /* T */ { SST(0x74, 0x21, SS_RDEF, /* XXX TBD */ "Data encryption configuration prevented") }, /* DT R MAEBKV */ { SST(0x74, 0x30, SS_RDEF, /* XXX TBD */ "SA creation parameter not supported") }, /* DT R MAEBKV */ { SST(0x74, 0x40, SS_RDEF, /* XXX TBD */ "Authentication failed") }, /* V */ { SST(0x74, 0x61, SS_RDEF, /* XXX TBD */ "External data encryption key manager access error") }, /* V */ { SST(0x74, 0x62, SS_RDEF, /* XXX TBD */ "External data encryption key manager error") }, /* V */ { SST(0x74, 0x63, SS_RDEF, /* XXX TBD */ "External data encryption key not found") }, /* V */ { SST(0x74, 0x64, SS_RDEF, /* XXX TBD */ "External data encryption request not authorized") }, /* T */ { SST(0x74, 0x6E, SS_RDEF, /* XXX TBD */ "External data encryption control timeout") }, /* T */ { SST(0x74, 0x6F, SS_RDEF, /* XXX TBD */ "External data encryption control error") }, /* DT R M E V */ { SST(0x74, 0x71, SS_FATAL | EACCES, "Logical unit access not authorized") }, /* D */ { SST(0x74, 0x79, SS_FATAL | EACCES, "Security conflict in translated device") } }; const u_int asc_table_size = nitems(asc_table); struct asc_key { int asc; int ascq; }; static int ascentrycomp(const void *key, const void *member) { int asc; int ascq; const struct asc_table_entry *table_entry; asc = ((const struct asc_key *)key)->asc; ascq = ((const struct asc_key *)key)->ascq; table_entry = (const struct asc_table_entry *)member; if (asc >= table_entry->asc) { if (asc > table_entry->asc) return (1); if (ascq <= table_entry->ascq) { /* Check for ranges */ if (ascq == table_entry->ascq || ((table_entry->action & SSQ_RANGE) != 0 && ascq >= (table_entry - 1)->ascq)) return (0); return (-1); } return (1); } return (-1); } static int senseentrycomp(const void *key, const void *member) { int sense_key; const struct sense_key_table_entry *table_entry; sense_key = *((const int *)key); table_entry = (const struct sense_key_table_entry *)member; if (sense_key >= table_entry->sense_key) { if (sense_key == table_entry->sense_key) return (0); return (1); } return (-1); } static void fetchtableentries(int sense_key, int asc, int ascq, struct scsi_inquiry_data *inq_data, const struct sense_key_table_entry **sense_entry, const struct asc_table_entry **asc_entry) { caddr_t match; const struct asc_table_entry *asc_tables[2]; const struct sense_key_table_entry *sense_tables[2]; struct asc_key asc_ascq; size_t asc_tables_size[2]; size_t sense_tables_size[2]; int num_asc_tables; int num_sense_tables; int i; /* Default to failure */ *sense_entry = NULL; *asc_entry = NULL; match = NULL; if (inq_data != NULL) match = cam_quirkmatch((caddr_t)inq_data, (caddr_t)sense_quirk_table, sense_quirk_table_size, sizeof(*sense_quirk_table), scsi_inquiry_match); if (match != NULL) { struct scsi_sense_quirk_entry *quirk; quirk = (struct scsi_sense_quirk_entry *)match; asc_tables[0] = quirk->asc_info; asc_tables_size[0] = quirk->num_ascs; asc_tables[1] = asc_table; asc_tables_size[1] = asc_table_size; num_asc_tables = 2; sense_tables[0] = quirk->sense_key_info; sense_tables_size[0] = quirk->num_sense_keys; sense_tables[1] = sense_key_table; sense_tables_size[1] = nitems(sense_key_table); num_sense_tables = 2; } else { asc_tables[0] = asc_table; asc_tables_size[0] = asc_table_size; num_asc_tables = 1; sense_tables[0] = sense_key_table; sense_tables_size[0] = nitems(sense_key_table); num_sense_tables = 1; } asc_ascq.asc = asc; asc_ascq.ascq = ascq; for (i = 0; i < num_asc_tables; i++) { void *found_entry; found_entry = bsearch(&asc_ascq, asc_tables[i], asc_tables_size[i], sizeof(**asc_tables), ascentrycomp); if (found_entry) { *asc_entry = (struct asc_table_entry *)found_entry; break; } } for (i = 0; i < num_sense_tables; i++) { void *found_entry; found_entry = bsearch(&sense_key, sense_tables[i], sense_tables_size[i], sizeof(**sense_tables), senseentrycomp); if (found_entry) { *sense_entry = (struct sense_key_table_entry *)found_entry; break; } } } void scsi_sense_desc(int sense_key, int asc, int ascq, struct scsi_inquiry_data *inq_data, const char **sense_key_desc, const char **asc_desc) { const struct asc_table_entry *asc_entry; const struct sense_key_table_entry *sense_entry; fetchtableentries(sense_key, asc, ascq, inq_data, &sense_entry, &asc_entry); if (sense_entry != NULL) *sense_key_desc = sense_entry->desc; else *sense_key_desc = "Invalid Sense Key"; if (asc_entry != NULL) *asc_desc = asc_entry->desc; else if (asc >= 0x80 && asc <= 0xff) *asc_desc = "Vendor Specific ASC"; else if (ascq >= 0x80 && ascq <= 0xff) *asc_desc = "Vendor Specific ASCQ"; else *asc_desc = "Reserved ASC/ASCQ pair"; } /* * Given sense and device type information, return the appropriate action. * If we do not understand the specific error as identified by the ASC/ASCQ * pair, fall back on the more generic actions derived from the sense key. */ scsi_sense_action scsi_error_action(struct ccb_scsiio *csio, struct scsi_inquiry_data *inq_data, u_int32_t sense_flags) { const struct asc_table_entry *asc_entry; const struct sense_key_table_entry *sense_entry; int error_code, sense_key, asc, ascq; scsi_sense_action action; if (!scsi_extract_sense_ccb((union ccb *)csio, &error_code, &sense_key, &asc, &ascq)) { action = SS_RETRY | SSQ_DECREMENT_COUNT | SSQ_PRINT_SENSE | EIO; } else if ((error_code == SSD_DEFERRED_ERROR) || (error_code == SSD_DESC_DEFERRED_ERROR)) { /* * XXX dufault@FreeBSD.org * This error doesn't relate to the command associated * with this request sense. A deferred error is an error * for a command that has already returned GOOD status * (see SCSI2 8.2.14.2). * * By my reading of that section, it looks like the current * command has been cancelled, we should now clean things up * (hopefully recovering any lost data) and then retry the * current command. There are two easy choices, both wrong: * * 1. Drop through (like we had been doing), thus treating * this as if the error were for the current command and * return and stop the current command. * * 2. Issue a retry (like I made it do) thus hopefully * recovering the current transfer, and ignoring the * fact that we've dropped a command. * * These should probably be handled in a device specific * sense handler or punted back up to a user mode daemon */ action = SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE; } else { fetchtableentries(sense_key, asc, ascq, inq_data, &sense_entry, &asc_entry); /* * Override the 'No additional Sense' entry (0,0) * with the error action of the sense key. */ if (asc_entry != NULL && (asc != 0 || ascq != 0)) action = asc_entry->action; else if (sense_entry != NULL) action = sense_entry->action; else action = SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE; if (sense_key == SSD_KEY_RECOVERED_ERROR) { /* * The action succeeded but the device wants * the user to know that some recovery action * was required. */ action &= ~(SS_MASK|SSQ_MASK|SS_ERRMASK); action |= SS_NOP|SSQ_PRINT_SENSE; } else if (sense_key == SSD_KEY_ILLEGAL_REQUEST) { if ((sense_flags & SF_QUIET_IR) != 0) action &= ~SSQ_PRINT_SENSE; } else if (sense_key == SSD_KEY_UNIT_ATTENTION) { if ((sense_flags & SF_RETRY_UA) != 0 && (action & SS_MASK) == SS_FAIL) { action &= ~(SS_MASK|SSQ_MASK); action |= SS_RETRY|SSQ_DECREMENT_COUNT| SSQ_PRINT_SENSE; } action |= SSQ_UA; } } if ((action & SS_MASK) >= SS_START && (sense_flags & SF_NO_RECOVERY)) { action &= ~SS_MASK; action |= SS_FAIL; } else if ((action & SS_MASK) == SS_RETRY && (sense_flags & SF_NO_RETRY)) { action &= ~SS_MASK; action |= SS_FAIL; } if ((sense_flags & SF_PRINT_ALWAYS) != 0) action |= SSQ_PRINT_SENSE; else if ((sense_flags & SF_NO_PRINT) != 0) action &= ~SSQ_PRINT_SENSE; return (action); } char * scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string, size_t len) { struct sbuf sb; int error; if (len == 0) return (""); sbuf_new(&sb, cdb_string, len, SBUF_FIXEDLEN); scsi_cdb_sbuf(cdb_ptr, &sb); /* ENOMEM just means that the fixed buffer is full, OK to ignore */ error = sbuf_finish(&sb); if (error != 0 && error != ENOMEM) return (""); return(sbuf_data(&sb)); } void scsi_cdb_sbuf(u_int8_t *cdb_ptr, struct sbuf *sb) { u_int8_t cdb_len; int i; if (cdb_ptr == NULL) return; /* * This is taken from the SCSI-3 draft spec. * (T10/1157D revision 0.3) * The top 3 bits of an opcode are the group code. The next 5 bits * are the command code. * Group 0: six byte commands * Group 1: ten byte commands * Group 2: ten byte commands * Group 3: reserved * Group 4: sixteen byte commands * Group 5: twelve byte commands * Group 6: vendor specific * Group 7: vendor specific */ switch((*cdb_ptr >> 5) & 0x7) { case 0: cdb_len = 6; break; case 1: case 2: cdb_len = 10; break; case 3: case 6: case 7: /* in this case, just print out the opcode */ cdb_len = 1; break; case 4: cdb_len = 16; break; case 5: cdb_len = 12; break; } for (i = 0; i < cdb_len; i++) sbuf_printf(sb, "%02hhx ", cdb_ptr[i]); return; } const char * scsi_status_string(struct ccb_scsiio *csio) { switch(csio->scsi_status) { case SCSI_STATUS_OK: return("OK"); case SCSI_STATUS_CHECK_COND: return("Check Condition"); case SCSI_STATUS_BUSY: return("Busy"); case SCSI_STATUS_INTERMED: return("Intermediate"); case SCSI_STATUS_INTERMED_COND_MET: return("Intermediate-Condition Met"); case SCSI_STATUS_RESERV_CONFLICT: return("Reservation Conflict"); case SCSI_STATUS_CMD_TERMINATED: return("Command Terminated"); case SCSI_STATUS_QUEUE_FULL: return("Queue Full"); case SCSI_STATUS_ACA_ACTIVE: return("ACA Active"); case SCSI_STATUS_TASK_ABORTED: return("Task Aborted"); default: { static char unkstr[64]; snprintf(unkstr, sizeof(unkstr), "Unknown %#x", csio->scsi_status); return(unkstr); } } } /* * scsi_command_string() returns 0 for success and -1 for failure. */ #ifdef _KERNEL int scsi_command_string(struct ccb_scsiio *csio, struct sbuf *sb) #else /* !_KERNEL */ int scsi_command_string(struct cam_device *device, struct ccb_scsiio *csio, struct sbuf *sb) #endif /* _KERNEL/!_KERNEL */ { struct scsi_inquiry_data *inq_data; #ifdef _KERNEL struct ccb_getdev *cgd; #endif /* _KERNEL */ #ifdef _KERNEL if ((cgd = (struct ccb_getdev*)xpt_alloc_ccb_nowait()) == NULL) return(-1); /* * Get the device information. */ xpt_setup_ccb(&cgd->ccb_h, csio->ccb_h.path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); /* * If the device is unconfigured, just pretend that it is a hard * drive. scsi_op_desc() needs this. */ if (cgd->ccb_h.status == CAM_DEV_NOT_THERE) cgd->inq_data.device = T_DIRECT; inq_data = &cgd->inq_data; #else /* !_KERNEL */ inq_data = &device->inq_data; #endif /* _KERNEL/!_KERNEL */ sbuf_printf(sb, "%s. CDB: ", scsi_op_desc(scsiio_cdb_ptr(csio)[0], inq_data)); scsi_cdb_sbuf(scsiio_cdb_ptr(csio), sb); #ifdef _KERNEL xpt_free_ccb((union ccb *)cgd); #endif return(0); } /* * Iterate over sense descriptors. Each descriptor is passed into iter_func(). * If iter_func() returns 0, list traversal continues. If iter_func() * returns non-zero, list traversal is stopped. */ void scsi_desc_iterate(struct scsi_sense_data_desc *sense, u_int sense_len, int (*iter_func)(struct scsi_sense_data_desc *sense, u_int, struct scsi_sense_desc_header *, void *), void *arg) { int cur_pos; int desc_len; /* * First make sure the extra length field is present. */ if (SSD_DESC_IS_PRESENT(sense, sense_len, extra_len) == 0) return; /* * The length of data actually returned may be different than the * extra_len recorded in the structure. */ desc_len = sense_len -offsetof(struct scsi_sense_data_desc, sense_desc); /* * Limit this further by the extra length reported, and the maximum * allowed extra length. */ desc_len = MIN(desc_len, MIN(sense->extra_len, SSD_EXTRA_MAX)); /* * Subtract the size of the header from the descriptor length. * This is to ensure that we have at least the header left, so we * don't have to check that inside the loop. This can wind up * being a negative value. */ desc_len -= sizeof(struct scsi_sense_desc_header); for (cur_pos = 0; cur_pos < desc_len;) { struct scsi_sense_desc_header *header; header = (struct scsi_sense_desc_header *) &sense->sense_desc[cur_pos]; /* * Check to make sure we have the entire descriptor. We * don't call iter_func() unless we do. * * Note that although cur_pos is at the beginning of the * descriptor, desc_len already has the header length * subtracted. So the comparison of the length in the * header (which does not include the header itself) to * desc_len - cur_pos is correct. */ if (header->length > (desc_len - cur_pos)) break; if (iter_func(sense, sense_len, header, arg) != 0) break; cur_pos += sizeof(*header) + header->length; } } struct scsi_find_desc_info { uint8_t desc_type; struct scsi_sense_desc_header *header; }; static int scsi_find_desc_func(struct scsi_sense_data_desc *sense, u_int sense_len, struct scsi_sense_desc_header *header, void *arg) { struct scsi_find_desc_info *desc_info; desc_info = (struct scsi_find_desc_info *)arg; if (header->desc_type == desc_info->desc_type) { desc_info->header = header; /* We found the descriptor, tell the iterator to stop. */ return (1); } else return (0); } /* * Given a descriptor type, return a pointer to it if it is in the sense * data and not truncated. Avoiding truncating sense data will simplify * things significantly for the caller. */ uint8_t * scsi_find_desc(struct scsi_sense_data_desc *sense, u_int sense_len, uint8_t desc_type) { struct scsi_find_desc_info desc_info; desc_info.desc_type = desc_type; desc_info.header = NULL; scsi_desc_iterate(sense, sense_len, scsi_find_desc_func, &desc_info); return ((uint8_t *)desc_info.header); } /* * Fill in SCSI descriptor sense data with the specified parameters. */ static void scsi_set_sense_data_desc_va(struct scsi_sense_data *sense_data, u_int *sense_len, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, va_list ap) { struct scsi_sense_data_desc *sense; scsi_sense_elem_type elem_type; int space, len; uint8_t *desc, *data; memset(sense_data, 0, sizeof(*sense_data)); sense = (struct scsi_sense_data_desc *)sense_data; if (current_error != 0) sense->error_code = SSD_DESC_CURRENT_ERROR; else sense->error_code = SSD_DESC_DEFERRED_ERROR; sense->sense_key = sense_key; sense->add_sense_code = asc; sense->add_sense_code_qual = ascq; sense->flags = 0; desc = &sense->sense_desc[0]; space = *sense_len - offsetof(struct scsi_sense_data_desc, sense_desc); while ((elem_type = va_arg(ap, scsi_sense_elem_type)) != SSD_ELEM_NONE) { if (elem_type >= SSD_ELEM_MAX) { printf("%s: invalid sense type %d\n", __func__, elem_type); break; } len = va_arg(ap, int); data = va_arg(ap, uint8_t *); switch (elem_type) { case SSD_ELEM_SKIP: break; case SSD_ELEM_DESC: if (space < len) { sense->flags |= SSDD_SDAT_OVFL; break; } bcopy(data, desc, len); desc += len; space -= len; break; case SSD_ELEM_SKS: { struct scsi_sense_sks *sks = (void *)desc; if (len > sizeof(sks->sense_key_spec)) break; if (space < sizeof(*sks)) { sense->flags |= SSDD_SDAT_OVFL; break; } sks->desc_type = SSD_DESC_SKS; sks->length = sizeof(*sks) - (offsetof(struct scsi_sense_sks, length) + 1); bcopy(data, &sks->sense_key_spec, len); desc += sizeof(*sks); space -= sizeof(*sks); break; } case SSD_ELEM_COMMAND: { struct scsi_sense_command *cmd = (void *)desc; if (len > sizeof(cmd->command_info)) break; if (space < sizeof(*cmd)) { sense->flags |= SSDD_SDAT_OVFL; break; } cmd->desc_type = SSD_DESC_COMMAND; cmd->length = sizeof(*cmd) - (offsetof(struct scsi_sense_command, length) + 1); bcopy(data, &cmd->command_info[ sizeof(cmd->command_info) - len], len); desc += sizeof(*cmd); space -= sizeof(*cmd); break; } case SSD_ELEM_INFO: { struct scsi_sense_info *info = (void *)desc; if (len > sizeof(info->info)) break; if (space < sizeof(*info)) { sense->flags |= SSDD_SDAT_OVFL; break; } info->desc_type = SSD_DESC_INFO; info->length = sizeof(*info) - (offsetof(struct scsi_sense_info, length) + 1); info->byte2 = SSD_INFO_VALID; bcopy(data, &info->info[sizeof(info->info) - len], len); desc += sizeof(*info); space -= sizeof(*info); break; } case SSD_ELEM_FRU: { struct scsi_sense_fru *fru = (void *)desc; if (len > sizeof(fru->fru)) break; if (space < sizeof(*fru)) { sense->flags |= SSDD_SDAT_OVFL; break; } fru->desc_type = SSD_DESC_FRU; fru->length = sizeof(*fru) - (offsetof(struct scsi_sense_fru, length) + 1); fru->fru = *data; desc += sizeof(*fru); space -= sizeof(*fru); break; } case SSD_ELEM_STREAM: { struct scsi_sense_stream *stream = (void *)desc; if (len > sizeof(stream->byte3)) break; if (space < sizeof(*stream)) { sense->flags |= SSDD_SDAT_OVFL; break; } stream->desc_type = SSD_DESC_STREAM; stream->length = sizeof(*stream) - (offsetof(struct scsi_sense_stream, length) + 1); stream->byte3 = *data; desc += sizeof(*stream); space -= sizeof(*stream); break; } default: /* * We shouldn't get here, but if we do, do nothing. * We've already consumed the arguments above. */ break; } } sense->extra_len = desc - &sense->sense_desc[0]; *sense_len = offsetof(struct scsi_sense_data_desc, extra_len) + 1 + sense->extra_len; } /* * Fill in SCSI fixed sense data with the specified parameters. */ static void scsi_set_sense_data_fixed_va(struct scsi_sense_data *sense_data, u_int *sense_len, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, va_list ap) { struct scsi_sense_data_fixed *sense; scsi_sense_elem_type elem_type; uint8_t *data; int len; memset(sense_data, 0, sizeof(*sense_data)); sense = (struct scsi_sense_data_fixed *)sense_data; if (current_error != 0) sense->error_code = SSD_CURRENT_ERROR; else sense->error_code = SSD_DEFERRED_ERROR; sense->flags = sense_key & SSD_KEY; sense->extra_len = 0; if (*sense_len >= 13) { sense->add_sense_code = asc; sense->extra_len = MAX(sense->extra_len, 5); } else sense->flags |= SSD_SDAT_OVFL; if (*sense_len >= 14) { sense->add_sense_code_qual = ascq; sense->extra_len = MAX(sense->extra_len, 6); } else sense->flags |= SSD_SDAT_OVFL; while ((elem_type = va_arg(ap, scsi_sense_elem_type)) != SSD_ELEM_NONE) { if (elem_type >= SSD_ELEM_MAX) { printf("%s: invalid sense type %d\n", __func__, elem_type); break; } len = va_arg(ap, int); data = va_arg(ap, uint8_t *); switch (elem_type) { case SSD_ELEM_SKIP: break; case SSD_ELEM_SKS: if (len > sizeof(sense->sense_key_spec)) break; if (*sense_len < 18) { sense->flags |= SSD_SDAT_OVFL; break; } bcopy(data, &sense->sense_key_spec[0], len); sense->extra_len = MAX(sense->extra_len, 10); break; case SSD_ELEM_COMMAND: if (*sense_len < 12) { sense->flags |= SSD_SDAT_OVFL; break; } if (len > sizeof(sense->cmd_spec_info)) { data += len - sizeof(sense->cmd_spec_info); len -= len - sizeof(sense->cmd_spec_info); } bcopy(data, &sense->cmd_spec_info[ sizeof(sense->cmd_spec_info) - len], len); sense->extra_len = MAX(sense->extra_len, 4); break; case SSD_ELEM_INFO: /* Set VALID bit only if no overflow. */ sense->error_code |= SSD_ERRCODE_VALID; while (len > sizeof(sense->info)) { if (data[0] != 0) sense->error_code &= ~SSD_ERRCODE_VALID; data ++; len --; } bcopy(data, &sense->info[sizeof(sense->info) - len], len); break; case SSD_ELEM_FRU: if (*sense_len < 15) { sense->flags |= SSD_SDAT_OVFL; break; } sense->fru = *data; sense->extra_len = MAX(sense->extra_len, 7); break; case SSD_ELEM_STREAM: sense->flags |= *data & (SSD_ILI | SSD_EOM | SSD_FILEMARK); break; default: /* * We can't handle that in fixed format. Skip it. */ break; } } *sense_len = offsetof(struct scsi_sense_data_fixed, extra_len) + 1 + sense->extra_len; } /* * Fill in SCSI sense data with the specified parameters. This routine can * fill in either fixed or descriptor type sense data. */ void scsi_set_sense_data_va(struct scsi_sense_data *sense_data, u_int *sense_len, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, va_list ap) { if (*sense_len > SSD_FULL_SIZE) *sense_len = SSD_FULL_SIZE; if (sense_format == SSD_TYPE_DESC) scsi_set_sense_data_desc_va(sense_data, sense_len, sense_format, current_error, sense_key, asc, ascq, ap); else scsi_set_sense_data_fixed_va(sense_data, sense_len, sense_format, current_error, sense_key, asc, ascq, ap); } void scsi_set_sense_data(struct scsi_sense_data *sense_data, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, ...) { va_list ap; u_int sense_len = SSD_FULL_SIZE; va_start(ap, ascq); scsi_set_sense_data_va(sense_data, &sense_len, sense_format, current_error, sense_key, asc, ascq, ap); va_end(ap); } void scsi_set_sense_data_len(struct scsi_sense_data *sense_data, u_int *sense_len, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, ...) { va_list ap; va_start(ap, ascq); scsi_set_sense_data_va(sense_data, sense_len, sense_format, current_error, sense_key, asc, ascq, ap); va_end(ap); } /* * Get sense information for three similar sense data types. */ int scsi_get_sense_info(struct scsi_sense_data *sense_data, u_int sense_len, uint8_t info_type, uint64_t *info, int64_t *signed_info) { scsi_sense_data_type sense_type; if (sense_len == 0) goto bailout; sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; uint8_t *desc; sense = (struct scsi_sense_data_desc *)sense_data; desc = scsi_find_desc(sense, sense_len, info_type); if (desc == NULL) goto bailout; switch (info_type) { case SSD_DESC_INFO: { struct scsi_sense_info *info_desc; info_desc = (struct scsi_sense_info *)desc; *info = scsi_8btou64(info_desc->info); if (signed_info != NULL) *signed_info = *info; break; } case SSD_DESC_COMMAND: { struct scsi_sense_command *cmd_desc; cmd_desc = (struct scsi_sense_command *)desc; *info = scsi_8btou64(cmd_desc->command_info); if (signed_info != NULL) *signed_info = *info; break; } case SSD_DESC_FRU: { struct scsi_sense_fru *fru_desc; fru_desc = (struct scsi_sense_fru *)desc; *info = fru_desc->fru; if (signed_info != NULL) *signed_info = (int8_t)fru_desc->fru; break; } default: goto bailout; break; } break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; switch (info_type) { case SSD_DESC_INFO: { uint32_t info_val; if ((sense->error_code & SSD_ERRCODE_VALID) == 0) goto bailout; if (SSD_FIXED_IS_PRESENT(sense, sense_len, info) == 0) goto bailout; info_val = scsi_4btoul(sense->info); *info = info_val; if (signed_info != NULL) *signed_info = (int32_t)info_val; break; } case SSD_DESC_COMMAND: { uint32_t cmd_val; if ((SSD_FIXED_IS_PRESENT(sense, sense_len, cmd_spec_info) == 0) || (SSD_FIXED_IS_FILLED(sense, cmd_spec_info) == 0)) goto bailout; cmd_val = scsi_4btoul(sense->cmd_spec_info); if (cmd_val == 0) goto bailout; *info = cmd_val; if (signed_info != NULL) *signed_info = (int32_t)cmd_val; break; } case SSD_DESC_FRU: if ((SSD_FIXED_IS_PRESENT(sense, sense_len, fru) == 0) || (SSD_FIXED_IS_FILLED(sense, fru) == 0)) goto bailout; if (sense->fru == 0) goto bailout; *info = sense->fru; if (signed_info != NULL) *signed_info = (int8_t)sense->fru; break; default: goto bailout; break; } break; } default: goto bailout; break; } return (0); bailout: return (1); } int scsi_get_sks(struct scsi_sense_data *sense_data, u_int sense_len, uint8_t *sks) { scsi_sense_data_type sense_type; if (sense_len == 0) goto bailout; sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; struct scsi_sense_sks *desc; sense = (struct scsi_sense_data_desc *)sense_data; desc = (struct scsi_sense_sks *)scsi_find_desc(sense, sense_len, SSD_DESC_SKS); if (desc == NULL) goto bailout; /* * No need to check the SKS valid bit for descriptor sense. * If the descriptor is present, it is valid. */ bcopy(desc->sense_key_spec, sks, sizeof(desc->sense_key_spec)); break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if ((SSD_FIXED_IS_PRESENT(sense, sense_len, sense_key_spec)== 0) || (SSD_FIXED_IS_FILLED(sense, sense_key_spec) == 0)) goto bailout; if ((sense->sense_key_spec[0] & SSD_SCS_VALID) == 0) goto bailout; bcopy(sense->sense_key_spec, sks,sizeof(sense->sense_key_spec)); break; } default: goto bailout; break; } return (0); bailout: return (1); } /* * Provide a common interface for fixed and descriptor sense to detect * whether we have block-specific sense information. It is clear by the * presence of the block descriptor in descriptor mode, but we have to * infer from the inquiry data and ILI bit in fixed mode. */ int scsi_get_block_info(struct scsi_sense_data *sense_data, u_int sense_len, struct scsi_inquiry_data *inq_data, uint8_t *block_bits) { scsi_sense_data_type sense_type; if (inq_data != NULL) { switch (SID_TYPE(inq_data)) { case T_DIRECT: case T_RBC: case T_ZBC_HM: break; default: goto bailout; break; } } sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; struct scsi_sense_block *block; sense = (struct scsi_sense_data_desc *)sense_data; block = (struct scsi_sense_block *)scsi_find_desc(sense, sense_len, SSD_DESC_BLOCK); if (block == NULL) goto bailout; *block_bits = block->byte3; break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (SSD_FIXED_IS_PRESENT(sense, sense_len, flags) == 0) goto bailout; if ((sense->flags & SSD_ILI) == 0) goto bailout; *block_bits = sense->flags & SSD_ILI; break; } default: goto bailout; break; } return (0); bailout: return (1); } int scsi_get_stream_info(struct scsi_sense_data *sense_data, u_int sense_len, struct scsi_inquiry_data *inq_data, uint8_t *stream_bits) { scsi_sense_data_type sense_type; if (inq_data != NULL) { switch (SID_TYPE(inq_data)) { case T_SEQUENTIAL: break; default: goto bailout; break; } } sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; struct scsi_sense_stream *stream; sense = (struct scsi_sense_data_desc *)sense_data; stream = (struct scsi_sense_stream *)scsi_find_desc(sense, sense_len, SSD_DESC_STREAM); if (stream == NULL) goto bailout; *stream_bits = stream->byte3; break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (SSD_FIXED_IS_PRESENT(sense, sense_len, flags) == 0) goto bailout; if ((sense->flags & (SSD_ILI|SSD_EOM|SSD_FILEMARK)) == 0) goto bailout; *stream_bits = sense->flags & (SSD_ILI|SSD_EOM|SSD_FILEMARK); break; } default: goto bailout; break; } return (0); bailout: return (1); } void scsi_info_sbuf(struct sbuf *sb, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, uint64_t info) { sbuf_printf(sb, "Info: %#jx", info); } void scsi_command_sbuf(struct sbuf *sb, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, uint64_t csi) { sbuf_printf(sb, "Command Specific Info: %#jx", csi); } void scsi_progress_sbuf(struct sbuf *sb, uint16_t progress) { sbuf_printf(sb, "Progress: %d%% (%d/%d) complete", (progress * 100) / SSD_SKS_PROGRESS_DENOM, progress, SSD_SKS_PROGRESS_DENOM); } /* * Returns 1 for failure (i.e. SKS isn't valid) and 0 for success. */ int scsi_sks_sbuf(struct sbuf *sb, int sense_key, uint8_t *sks) { if ((sks[0] & SSD_SKS_VALID) == 0) return (1); switch (sense_key) { case SSD_KEY_ILLEGAL_REQUEST: { struct scsi_sense_sks_field *field; int bad_command; char tmpstr[40]; /*Field Pointer*/ field = (struct scsi_sense_sks_field *)sks; if (field->byte0 & SSD_SKS_FIELD_CMD) bad_command = 1; else bad_command = 0; tmpstr[0] = '\0'; /* Bit pointer is valid */ if (field->byte0 & SSD_SKS_BPV) snprintf(tmpstr, sizeof(tmpstr), "bit %d ", field->byte0 & SSD_SKS_BIT_VALUE); sbuf_printf(sb, "%s byte %d %sis invalid", bad_command ? "Command" : "Data", scsi_2btoul(field->field), tmpstr); break; } case SSD_KEY_UNIT_ATTENTION: { struct scsi_sense_sks_overflow *overflow; overflow = (struct scsi_sense_sks_overflow *)sks; /*UA Condition Queue Overflow*/ sbuf_printf(sb, "Unit Attention Condition Queue %s", (overflow->byte0 & SSD_SKS_OVERFLOW_SET) ? "Overflowed" : "Did Not Overflow??"); break; } case SSD_KEY_RECOVERED_ERROR: case SSD_KEY_HARDWARE_ERROR: case SSD_KEY_MEDIUM_ERROR: { struct scsi_sense_sks_retry *retry; /*Actual Retry Count*/ retry = (struct scsi_sense_sks_retry *)sks; sbuf_printf(sb, "Actual Retry Count: %d", scsi_2btoul(retry->actual_retry_count)); break; } case SSD_KEY_NO_SENSE: case SSD_KEY_NOT_READY: { struct scsi_sense_sks_progress *progress; int progress_val; /*Progress Indication*/ progress = (struct scsi_sense_sks_progress *)sks; progress_val = scsi_2btoul(progress->progress); scsi_progress_sbuf(sb, progress_val); break; } case SSD_KEY_COPY_ABORTED: { struct scsi_sense_sks_segment *segment; char tmpstr[40]; /*Segment Pointer*/ segment = (struct scsi_sense_sks_segment *)sks; tmpstr[0] = '\0'; if (segment->byte0 & SSD_SKS_SEGMENT_BPV) snprintf(tmpstr, sizeof(tmpstr), "bit %d ", segment->byte0 & SSD_SKS_SEGMENT_BITPTR); sbuf_printf(sb, "%s byte %d %sis invalid", (segment->byte0 & SSD_SKS_SEGMENT_SD) ? "Segment" : "Data", scsi_2btoul(segment->field), tmpstr); break; } default: sbuf_printf(sb, "Sense Key Specific: %#x,%#x", sks[0], scsi_2btoul(&sks[1])); break; } return (0); } void scsi_fru_sbuf(struct sbuf *sb, uint64_t fru) { sbuf_printf(sb, "Field Replaceable Unit: %d", (int)fru); } void scsi_stream_sbuf(struct sbuf *sb, uint8_t stream_bits, uint64_t info) { int need_comma; need_comma = 0; /* * XXX KDM this needs more descriptive decoding. */ if (stream_bits & SSD_DESC_STREAM_FM) { sbuf_printf(sb, "Filemark"); need_comma = 1; } if (stream_bits & SSD_DESC_STREAM_EOM) { sbuf_printf(sb, "%sEOM", (need_comma) ? "," : ""); need_comma = 1; } if (stream_bits & SSD_DESC_STREAM_ILI) sbuf_printf(sb, "%sILI", (need_comma) ? "," : ""); sbuf_printf(sb, ": Info: %#jx", (uintmax_t) info); } void scsi_block_sbuf(struct sbuf *sb, uint8_t block_bits, uint64_t info) { if (block_bits & SSD_DESC_BLOCK_ILI) sbuf_printf(sb, "ILI: residue %#jx", (uintmax_t) info); } void scsi_sense_info_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_info *info; info = (struct scsi_sense_info *)header; scsi_info_sbuf(sb, cdb, cdb_len, inq_data, scsi_8btou64(info->info)); } void scsi_sense_command_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_command *command; command = (struct scsi_sense_command *)header; scsi_command_sbuf(sb, cdb, cdb_len, inq_data, scsi_8btou64(command->command_info)); } void scsi_sense_sks_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_sks *sks; int error_code, sense_key, asc, ascq; sks = (struct scsi_sense_sks *)header; scsi_extract_sense_len(sense, sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); scsi_sks_sbuf(sb, sense_key, sks->sense_key_spec); } void scsi_sense_fru_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_fru *fru; fru = (struct scsi_sense_fru *)header; scsi_fru_sbuf(sb, (uint64_t)fru->fru); } void scsi_sense_stream_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_stream *stream; uint64_t info; stream = (struct scsi_sense_stream *)header; info = 0; scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info, NULL); scsi_stream_sbuf(sb, stream->byte3, info); } void scsi_sense_block_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_block *block; uint64_t info; block = (struct scsi_sense_block *)header; info = 0; scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info, NULL); scsi_block_sbuf(sb, block->byte3, info); } void scsi_sense_progress_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_progress *progress; const char *sense_key_desc; const char *asc_desc; int progress_val; progress = (struct scsi_sense_progress *)header; /* * Get descriptions for the sense key, ASC, and ASCQ in the * progress descriptor. These could be different than the values * in the overall sense data. */ scsi_sense_desc(progress->sense_key, progress->add_sense_code, progress->add_sense_code_qual, inq_data, &sense_key_desc, &asc_desc); progress_val = scsi_2btoul(progress->progress); /* * The progress indicator is for the operation described by the * sense key, ASC, and ASCQ in the descriptor. */ sbuf_cat(sb, sense_key_desc); sbuf_printf(sb, " asc:%x,%x (%s): ", progress->add_sense_code, progress->add_sense_code_qual, asc_desc); scsi_progress_sbuf(sb, progress_val); } void scsi_sense_ata_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_ata_ret_desc *res; res = (struct scsi_sense_ata_ret_desc *)header; sbuf_printf(sb, "ATA status: %02x (%s%s%s%s%s%s%s%s), ", res->status, (res->status & 0x80) ? "BSY " : "", (res->status & 0x40) ? "DRDY " : "", (res->status & 0x20) ? "DF " : "", (res->status & 0x10) ? "SERV " : "", (res->status & 0x08) ? "DRQ " : "", (res->status & 0x04) ? "CORR " : "", (res->status & 0x02) ? "IDX " : "", (res->status & 0x01) ? "ERR" : ""); if (res->status & 1) { sbuf_printf(sb, "error: %02x (%s%s%s%s%s%s%s%s), ", res->error, (res->error & 0x80) ? "ICRC " : "", (res->error & 0x40) ? "UNC " : "", (res->error & 0x20) ? "MC " : "", (res->error & 0x10) ? "IDNF " : "", (res->error & 0x08) ? "MCR " : "", (res->error & 0x04) ? "ABRT " : "", (res->error & 0x02) ? "NM " : "", (res->error & 0x01) ? "ILI" : ""); } if (res->flags & SSD_DESC_ATA_FLAG_EXTEND) { sbuf_printf(sb, "count: %02x%02x, ", res->count_15_8, res->count_7_0); sbuf_printf(sb, "LBA: %02x%02x%02x%02x%02x%02x, ", res->lba_47_40, res->lba_39_32, res->lba_31_24, res->lba_23_16, res->lba_15_8, res->lba_7_0); } else { sbuf_printf(sb, "count: %02x, ", res->count_7_0); sbuf_printf(sb, "LBA: %02x%02x%02x, ", res->lba_23_16, res->lba_15_8, res->lba_7_0); } sbuf_printf(sb, "device: %02x, ", res->device); } void scsi_sense_forwarded_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_forwarded *forwarded; const char *sense_key_desc; const char *asc_desc; int error_code, sense_key, asc, ascq; forwarded = (struct scsi_sense_forwarded *)header; scsi_extract_sense_len((struct scsi_sense_data *)forwarded->sense_data, forwarded->length - 2, &error_code, &sense_key, &asc, &ascq, 1); scsi_sense_desc(sense_key, asc, ascq, NULL, &sense_key_desc, &asc_desc); sbuf_printf(sb, "Forwarded sense: %s asc:%x,%x (%s): ", sense_key_desc, asc, ascq, asc_desc); } /* * Generic sense descriptor printing routine. This is used when we have * not yet implemented a specific printing routine for this descriptor. */ void scsi_sense_generic_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { int i; uint8_t *buf_ptr; sbuf_printf(sb, "Descriptor %#x:", header->desc_type); buf_ptr = (uint8_t *)&header[1]; for (i = 0; i < header->length; i++, buf_ptr++) sbuf_printf(sb, " %02x", *buf_ptr); } /* * Keep this list in numeric order. This speeds the array traversal. */ struct scsi_sense_desc_printer { uint8_t desc_type; /* * The function arguments here are the superset of what is needed * to print out various different descriptors. Command and * information descriptors need inquiry data and command type. * Sense key specific descriptors need the sense key. * * The sense, cdb, and inquiry data arguments may be NULL, but the * information printed may not be fully decoded as a result. */ void (*print_func)(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); } scsi_sense_printers[] = { {SSD_DESC_INFO, scsi_sense_info_sbuf}, {SSD_DESC_COMMAND, scsi_sense_command_sbuf}, {SSD_DESC_SKS, scsi_sense_sks_sbuf}, {SSD_DESC_FRU, scsi_sense_fru_sbuf}, {SSD_DESC_STREAM, scsi_sense_stream_sbuf}, {SSD_DESC_BLOCK, scsi_sense_block_sbuf}, {SSD_DESC_ATA, scsi_sense_ata_sbuf}, {SSD_DESC_PROGRESS, scsi_sense_progress_sbuf}, {SSD_DESC_FORWARDED, scsi_sense_forwarded_sbuf} }; void scsi_sense_desc_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { u_int i; for (i = 0; i < nitems(scsi_sense_printers); i++) { struct scsi_sense_desc_printer *printer; printer = &scsi_sense_printers[i]; /* * The list is sorted, so quit if we've passed our * descriptor number. */ if (printer->desc_type > header->desc_type) break; if (printer->desc_type != header->desc_type) continue; printer->print_func(sb, sense, sense_len, cdb, cdb_len, inq_data, header); return; } /* * No specific printing routine, so use the generic routine. */ scsi_sense_generic_sbuf(sb, sense, sense_len, cdb, cdb_len, inq_data, header); } scsi_sense_data_type scsi_sense_type(struct scsi_sense_data *sense_data) { switch (sense_data->error_code & SSD_ERRCODE) { case SSD_DESC_CURRENT_ERROR: case SSD_DESC_DEFERRED_ERROR: return (SSD_TYPE_DESC); break; case SSD_CURRENT_ERROR: case SSD_DEFERRED_ERROR: return (SSD_TYPE_FIXED); break; default: break; } return (SSD_TYPE_NONE); } struct scsi_print_sense_info { struct sbuf *sb; char *path_str; uint8_t *cdb; int cdb_len; struct scsi_inquiry_data *inq_data; }; static int scsi_print_desc_func(struct scsi_sense_data_desc *sense, u_int sense_len, struct scsi_sense_desc_header *header, void *arg) { struct scsi_print_sense_info *print_info; print_info = (struct scsi_print_sense_info *)arg; switch (header->desc_type) { case SSD_DESC_INFO: case SSD_DESC_FRU: case SSD_DESC_COMMAND: case SSD_DESC_SKS: case SSD_DESC_BLOCK: case SSD_DESC_STREAM: /* * We have already printed these descriptors, if they are * present. */ break; default: { sbuf_printf(print_info->sb, "%s", print_info->path_str); scsi_sense_desc_sbuf(print_info->sb, (struct scsi_sense_data *)sense, sense_len, print_info->cdb, print_info->cdb_len, print_info->inq_data, header); sbuf_printf(print_info->sb, "\n"); break; } } /* * Tell the iterator that we want to see more descriptors if they * are present. */ return (0); } void scsi_sense_only_sbuf(struct scsi_sense_data *sense, u_int sense_len, struct sbuf *sb, char *path_str, struct scsi_inquiry_data *inq_data, uint8_t *cdb, int cdb_len) { int error_code, sense_key, asc, ascq; sbuf_cat(sb, path_str); scsi_extract_sense_len(sense, sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); sbuf_printf(sb, "SCSI sense: "); switch (error_code) { case SSD_DEFERRED_ERROR: case SSD_DESC_DEFERRED_ERROR: sbuf_printf(sb, "Deferred error: "); /* FALLTHROUGH */ case SSD_CURRENT_ERROR: case SSD_DESC_CURRENT_ERROR: { struct scsi_sense_data_desc *desc_sense; struct scsi_print_sense_info print_info; const char *sense_key_desc; const char *asc_desc; uint8_t sks[3]; uint64_t val; int info_valid; /* * Get descriptions for the sense key, ASC, and ASCQ. If * these aren't present in the sense data (i.e. the sense * data isn't long enough), the -1 values that * scsi_extract_sense_len() returns will yield default * or error descriptions. */ scsi_sense_desc(sense_key, asc, ascq, inq_data, &sense_key_desc, &asc_desc); /* * We first print the sense key and ASC/ASCQ. */ sbuf_cat(sb, sense_key_desc); sbuf_printf(sb, " asc:%x,%x (%s)\n", asc, ascq, asc_desc); /* * Get the info field if it is valid. */ if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &val, NULL) == 0) info_valid = 1; else info_valid = 0; if (info_valid != 0) { uint8_t bits; /* * Determine whether we have any block or stream * device-specific information. */ if (scsi_get_block_info(sense, sense_len, inq_data, &bits) == 0) { sbuf_cat(sb, path_str); scsi_block_sbuf(sb, bits, val); sbuf_printf(sb, "\n"); } else if (scsi_get_stream_info(sense, sense_len, inq_data, &bits) == 0) { sbuf_cat(sb, path_str); scsi_stream_sbuf(sb, bits, val); sbuf_printf(sb, "\n"); } else if (val != 0) { /* * The information field can be valid but 0. * If the block or stream bits aren't set, * and this is 0, it isn't terribly useful * to print it out. */ sbuf_cat(sb, path_str); scsi_info_sbuf(sb, cdb, cdb_len, inq_data, val); sbuf_printf(sb, "\n"); } } /* * Print the FRU. */ if (scsi_get_sense_info(sense, sense_len, SSD_DESC_FRU, &val, NULL) == 0) { sbuf_cat(sb, path_str); scsi_fru_sbuf(sb, val); sbuf_printf(sb, "\n"); } /* * Print any command-specific information. */ if (scsi_get_sense_info(sense, sense_len, SSD_DESC_COMMAND, &val, NULL) == 0) { sbuf_cat(sb, path_str); scsi_command_sbuf(sb, cdb, cdb_len, inq_data, val); sbuf_printf(sb, "\n"); } /* * Print out any sense-key-specific information. */ if (scsi_get_sks(sense, sense_len, sks) == 0) { sbuf_cat(sb, path_str); scsi_sks_sbuf(sb, sense_key, sks); sbuf_printf(sb, "\n"); } /* * If this is fixed sense, we're done. If we have * descriptor sense, we might have more information * available. */ if (scsi_sense_type(sense) != SSD_TYPE_DESC) break; desc_sense = (struct scsi_sense_data_desc *)sense; print_info.sb = sb; print_info.path_str = path_str; print_info.cdb = cdb; print_info.cdb_len = cdb_len; print_info.inq_data = inq_data; /* * Print any sense descriptors that we have not already printed. */ scsi_desc_iterate(desc_sense, sense_len, scsi_print_desc_func, &print_info); break; } case -1: /* * scsi_extract_sense_len() sets values to -1 if the * show_errors flag is set and they aren't present in the * sense data. This means that sense_len is 0. */ sbuf_printf(sb, "No sense data present\n"); break; default: { sbuf_printf(sb, "Error code 0x%x", error_code); if (sense->error_code & SSD_ERRCODE_VALID) { struct scsi_sense_data_fixed *fixed_sense; fixed_sense = (struct scsi_sense_data_fixed *)sense; if (SSD_FIXED_IS_PRESENT(fixed_sense, sense_len, info)){ uint32_t info; info = scsi_4btoul(fixed_sense->info); sbuf_printf(sb, " at block no. %d (decimal)", info); } } sbuf_printf(sb, "\n"); break; } } } /* * scsi_sense_sbuf() returns 0 for success and -1 for failure. */ #ifdef _KERNEL int scsi_sense_sbuf(struct ccb_scsiio *csio, struct sbuf *sb, scsi_sense_string_flags flags) #else /* !_KERNEL */ int scsi_sense_sbuf(struct cam_device *device, struct ccb_scsiio *csio, struct sbuf *sb, scsi_sense_string_flags flags) #endif /* _KERNEL/!_KERNEL */ { struct scsi_sense_data *sense; struct scsi_inquiry_data *inq_data; #ifdef _KERNEL struct ccb_getdev *cgd; #endif /* _KERNEL */ char path_str[64]; #ifndef _KERNEL if (device == NULL) return(-1); #endif /* !_KERNEL */ if ((csio == NULL) || (sb == NULL)) return(-1); /* * If the CDB is a physical address, we can't deal with it.. */ if ((csio->ccb_h.flags & CAM_CDB_PHYS) != 0) flags &= ~SSS_FLAG_PRINT_COMMAND; #ifdef _KERNEL xpt_path_string(csio->ccb_h.path, path_str, sizeof(path_str)); #else /* !_KERNEL */ cam_path_string(device, path_str, sizeof(path_str)); #endif /* _KERNEL/!_KERNEL */ #ifdef _KERNEL if ((cgd = (struct ccb_getdev*)xpt_alloc_ccb_nowait()) == NULL) return(-1); /* * Get the device information. */ xpt_setup_ccb(&cgd->ccb_h, csio->ccb_h.path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); /* * If the device is unconfigured, just pretend that it is a hard * drive. scsi_op_desc() needs this. */ if (cgd->ccb_h.status == CAM_DEV_NOT_THERE) cgd->inq_data.device = T_DIRECT; inq_data = &cgd->inq_data; #else /* !_KERNEL */ inq_data = &device->inq_data; #endif /* _KERNEL/!_KERNEL */ sense = NULL; if (flags & SSS_FLAG_PRINT_COMMAND) { sbuf_cat(sb, path_str); #ifdef _KERNEL scsi_command_string(csio, sb); #else /* !_KERNEL */ scsi_command_string(device, csio, sb); #endif /* _KERNEL/!_KERNEL */ sbuf_printf(sb, "\n"); } /* * If the sense data is a physical pointer, forget it. */ if (csio->ccb_h.flags & CAM_SENSE_PTR) { if (csio->ccb_h.flags & CAM_SENSE_PHYS) { #ifdef _KERNEL xpt_free_ccb((union ccb*)cgd); #endif /* _KERNEL/!_KERNEL */ return(-1); } else { /* * bcopy the pointer to avoid unaligned access * errors on finicky architectures. We don't * ensure that the sense data is pointer aligned. */ bcopy((struct scsi_sense_data **)&csio->sense_data, &sense, sizeof(struct scsi_sense_data *)); } } else { /* * If the physical sense flag is set, but the sense pointer * is not also set, we assume that the user is an idiot and * return. (Well, okay, it could be that somehow, the * entire csio is physical, but we would have probably core * dumped on one of the bogus pointer deferences above * already.) */ if (csio->ccb_h.flags & CAM_SENSE_PHYS) { #ifdef _KERNEL xpt_free_ccb((union ccb*)cgd); #endif /* _KERNEL/!_KERNEL */ return(-1); } else sense = &csio->sense_data; } scsi_sense_only_sbuf(sense, csio->sense_len - csio->sense_resid, sb, path_str, inq_data, scsiio_cdb_ptr(csio), csio->cdb_len); #ifdef _KERNEL xpt_free_ccb((union ccb*)cgd); #endif /* _KERNEL/!_KERNEL */ return(0); } #ifdef _KERNEL char * scsi_sense_string(struct ccb_scsiio *csio, char *str, int str_len) #else /* !_KERNEL */ char * scsi_sense_string(struct cam_device *device, struct ccb_scsiio *csio, char *str, int str_len) #endif /* _KERNEL/!_KERNEL */ { struct sbuf sb; sbuf_new(&sb, str, str_len, 0); #ifdef _KERNEL scsi_sense_sbuf(csio, &sb, SSS_FLAG_PRINT_COMMAND); #else /* !_KERNEL */ scsi_sense_sbuf(device, csio, &sb, SSS_FLAG_PRINT_COMMAND); #endif /* _KERNEL/!_KERNEL */ sbuf_finish(&sb); return(sbuf_data(&sb)); } #ifdef _KERNEL void scsi_sense_print(struct ccb_scsiio *csio) { struct sbuf sb; char str[512]; sbuf_new(&sb, str, sizeof(str), 0); scsi_sense_sbuf(csio, &sb, SSS_FLAG_PRINT_COMMAND); sbuf_finish(&sb); sbuf_putbuf(&sb); } #else /* !_KERNEL */ void scsi_sense_print(struct cam_device *device, struct ccb_scsiio *csio, FILE *ofile) { struct sbuf sb; char str[512]; if ((device == NULL) || (csio == NULL) || (ofile == NULL)) return; sbuf_new(&sb, str, sizeof(str), 0); scsi_sense_sbuf(device, csio, &sb, SSS_FLAG_PRINT_COMMAND); sbuf_finish(&sb); fprintf(ofile, "%s", sbuf_data(&sb)); } #endif /* _KERNEL/!_KERNEL */ /* * Extract basic sense information. This is backward-compatible with the * previous implementation. For new implementations, * scsi_extract_sense_len() is recommended. */ void scsi_extract_sense(struct scsi_sense_data *sense_data, int *error_code, int *sense_key, int *asc, int *ascq) { scsi_extract_sense_len(sense_data, sizeof(*sense_data), error_code, sense_key, asc, ascq, /*show_errors*/ 0); } /* * Extract basic sense information from SCSI I/O CCB structure. */ int scsi_extract_sense_ccb(union ccb *ccb, int *error_code, int *sense_key, int *asc, int *ascq) { struct scsi_sense_data *sense_data; /* Make sure there are some sense data we can access. */ if (ccb->ccb_h.func_code != XPT_SCSI_IO || (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_SCSI_STATUS_ERROR || (ccb->csio.scsi_status != SCSI_STATUS_CHECK_COND) || (ccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0 || (ccb->ccb_h.flags & CAM_SENSE_PHYS)) return (0); if (ccb->ccb_h.flags & CAM_SENSE_PTR) bcopy((struct scsi_sense_data **)&ccb->csio.sense_data, &sense_data, sizeof(struct scsi_sense_data *)); else sense_data = &ccb->csio.sense_data; scsi_extract_sense_len(sense_data, ccb->csio.sense_len - ccb->csio.sense_resid, error_code, sense_key, asc, ascq, 1); if (*error_code == -1) return (0); return (1); } /* * Extract basic sense information. If show_errors is set, sense values * will be set to -1 if they are not present. */ void scsi_extract_sense_len(struct scsi_sense_data *sense_data, u_int sense_len, int *error_code, int *sense_key, int *asc, int *ascq, int show_errors) { /* * If we have no length, we have no sense. */ if (sense_len == 0) { if (show_errors == 0) { *error_code = 0; *sense_key = 0; *asc = 0; *ascq = 0; } else { *error_code = -1; *sense_key = -1; *asc = -1; *ascq = -1; } return; } *error_code = sense_data->error_code & SSD_ERRCODE; switch (*error_code) { case SSD_DESC_CURRENT_ERROR: case SSD_DESC_DEFERRED_ERROR: { struct scsi_sense_data_desc *sense; sense = (struct scsi_sense_data_desc *)sense_data; if (SSD_DESC_IS_PRESENT(sense, sense_len, sense_key)) *sense_key = sense->sense_key & SSD_KEY; else *sense_key = (show_errors) ? -1 : 0; if (SSD_DESC_IS_PRESENT(sense, sense_len, add_sense_code)) *asc = sense->add_sense_code; else *asc = (show_errors) ? -1 : 0; if (SSD_DESC_IS_PRESENT(sense, sense_len, add_sense_code_qual)) *ascq = sense->add_sense_code_qual; else *ascq = (show_errors) ? -1 : 0; break; } case SSD_CURRENT_ERROR: case SSD_DEFERRED_ERROR: default: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (SSD_FIXED_IS_PRESENT(sense, sense_len, flags)) *sense_key = sense->flags & SSD_KEY; else *sense_key = (show_errors) ? -1 : 0; if ((SSD_FIXED_IS_PRESENT(sense, sense_len, add_sense_code)) && (SSD_FIXED_IS_FILLED(sense, add_sense_code))) *asc = sense->add_sense_code; else *asc = (show_errors) ? -1 : 0; if ((SSD_FIXED_IS_PRESENT(sense, sense_len,add_sense_code_qual)) && (SSD_FIXED_IS_FILLED(sense, add_sense_code_qual))) *ascq = sense->add_sense_code_qual; else *ascq = (show_errors) ? -1 : 0; break; } } } int scsi_get_sense_key(struct scsi_sense_data *sense_data, u_int sense_len, int show_errors) { int error_code, sense_key, asc, ascq; scsi_extract_sense_len(sense_data, sense_len, &error_code, &sense_key, &asc, &ascq, show_errors); return (sense_key); } int scsi_get_asc(struct scsi_sense_data *sense_data, u_int sense_len, int show_errors) { int error_code, sense_key, asc, ascq; scsi_extract_sense_len(sense_data, sense_len, &error_code, &sense_key, &asc, &ascq, show_errors); return (asc); } int scsi_get_ascq(struct scsi_sense_data *sense_data, u_int sense_len, int show_errors) { int error_code, sense_key, asc, ascq; scsi_extract_sense_len(sense_data, sense_len, &error_code, &sense_key, &asc, &ascq, show_errors); return (ascq); } /* * This function currently requires at least 36 bytes, or * SHORT_INQUIRY_LENGTH, worth of data to function properly. If this * function needs more or less data in the future, another length should be * defined in scsi_all.h to indicate the minimum amount of data necessary * for this routine to function properly. */ void scsi_print_inquiry_sbuf(struct sbuf *sb, struct scsi_inquiry_data *inq_data) { u_int8_t type; char *dtype, *qtype; type = SID_TYPE(inq_data); /* * Figure out basic device type and qualifier. */ if (SID_QUAL_IS_VENDOR_UNIQUE(inq_data)) { qtype = " (vendor-unique qualifier)"; } else { switch (SID_QUAL(inq_data)) { case SID_QUAL_LU_CONNECTED: qtype = ""; break; case SID_QUAL_LU_OFFLINE: qtype = " (offline)"; break; case SID_QUAL_RSVD: qtype = " (reserved qualifier)"; break; default: case SID_QUAL_BAD_LU: qtype = " (LUN not supported)"; break; } } switch (type) { case T_DIRECT: dtype = "Direct Access"; break; case T_SEQUENTIAL: dtype = "Sequential Access"; break; case T_PRINTER: dtype = "Printer"; break; case T_PROCESSOR: dtype = "Processor"; break; case T_WORM: dtype = "WORM"; break; case T_CDROM: dtype = "CD-ROM"; break; case T_SCANNER: dtype = "Scanner"; break; case T_OPTICAL: dtype = "Optical"; break; case T_CHANGER: dtype = "Changer"; break; case T_COMM: dtype = "Communication"; break; case T_STORARRAY: dtype = "Storage Array"; break; case T_ENCLOSURE: dtype = "Enclosure Services"; break; case T_RBC: dtype = "Simplified Direct Access"; break; case T_OCRW: dtype = "Optical Card Read/Write"; break; case T_OSD: dtype = "Object-Based Storage"; break; case T_ADC: dtype = "Automation/Drive Interface"; break; case T_ZBC_HM: dtype = "Host Managed Zoned Block"; break; case T_NODEVICE: dtype = "Uninstalled"; break; default: dtype = "unknown"; break; } scsi_print_inquiry_short_sbuf(sb, inq_data); sbuf_printf(sb, "%s %s ", SID_IS_REMOVABLE(inq_data) ? "Removable" : "Fixed", dtype); if (SID_ANSI_REV(inq_data) == SCSI_REV_0) sbuf_printf(sb, "SCSI "); else if (SID_ANSI_REV(inq_data) <= SCSI_REV_SPC) { sbuf_printf(sb, "SCSI-%d ", SID_ANSI_REV(inq_data)); } else { sbuf_printf(sb, "SPC-%d SCSI ", SID_ANSI_REV(inq_data) - 2); } sbuf_printf(sb, "device%s\n", qtype); } void scsi_print_inquiry(struct scsi_inquiry_data *inq_data) { struct sbuf sb; char buffer[120]; sbuf_new(&sb, buffer, 120, SBUF_FIXEDLEN); scsi_print_inquiry_sbuf(&sb, inq_data); sbuf_finish(&sb); sbuf_putbuf(&sb); } void scsi_print_inquiry_short_sbuf(struct sbuf *sb, struct scsi_inquiry_data *inq_data) { sbuf_printf(sb, "<"); cam_strvis_sbuf(sb, inq_data->vendor, sizeof(inq_data->vendor), 0); sbuf_printf(sb, " "); cam_strvis_sbuf(sb, inq_data->product, sizeof(inq_data->product), 0); sbuf_printf(sb, " "); cam_strvis_sbuf(sb, inq_data->revision, sizeof(inq_data->revision), 0); sbuf_printf(sb, "> "); } void scsi_print_inquiry_short(struct scsi_inquiry_data *inq_data) { struct sbuf sb; char buffer[84]; sbuf_new(&sb, buffer, 84, SBUF_FIXEDLEN); scsi_print_inquiry_short_sbuf(&sb, inq_data); sbuf_finish(&sb); sbuf_putbuf(&sb); } /* * Table of syncrates that don't follow the "divisible by 4" * rule. This table will be expanded in future SCSI specs. */ static struct { u_int period_factor; u_int period; /* in 100ths of ns */ } scsi_syncrates[] = { { 0x08, 625 }, /* FAST-160 */ { 0x09, 1250 }, /* FAST-80 */ { 0x0a, 2500 }, /* FAST-40 40MHz */ { 0x0b, 3030 }, /* FAST-40 33MHz */ { 0x0c, 5000 } /* FAST-20 */ }; /* * Return the frequency in kHz corresponding to the given * sync period factor. */ u_int scsi_calc_syncsrate(u_int period_factor) { u_int i; u_int num_syncrates; /* * It's a bug if period is zero, but if it is anyway, don't * die with a divide fault- instead return something which * 'approximates' async */ if (period_factor == 0) { return (3300); } num_syncrates = nitems(scsi_syncrates); /* See if the period is in the "exception" table */ for (i = 0; i < num_syncrates; i++) { if (period_factor == scsi_syncrates[i].period_factor) { /* Period in kHz */ return (100000000 / scsi_syncrates[i].period); } } /* * Wasn't in the table, so use the standard * 4 times conversion. */ return (10000000 / (period_factor * 4 * 10)); } /* * Return the SCSI sync parameter that corresponds to * the passed in period in 10ths of ns. */ u_int scsi_calc_syncparam(u_int period) { u_int i; u_int num_syncrates; if (period == 0) return (~0); /* Async */ /* Adjust for exception table being in 100ths. */ period *= 10; num_syncrates = nitems(scsi_syncrates); /* See if the period is in the "exception" table */ for (i = 0; i < num_syncrates; i++) { if (period <= scsi_syncrates[i].period) { /* Period in 100ths of ns */ return (scsi_syncrates[i].period_factor); } } /* * Wasn't in the table, so use the standard * 1/4 period in ns conversion. */ return (period/400); } int scsi_devid_is_naa_ieee_reg(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; struct scsi_vpd_id_naa_basic *naa; descr = (struct scsi_vpd_id_descriptor *)bufp; naa = (struct scsi_vpd_id_naa_basic *)descr->identifier; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA) return 0; if (descr->length < sizeof(struct scsi_vpd_id_naa_ieee_reg)) return 0; if ((naa->naa >> SVPD_ID_NAA_NAA_SHIFT) != SVPD_ID_NAA_IEEE_REG) return 0; return 1; } int scsi_devid_is_sas_target(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if (!scsi_devid_is_naa_ieee_reg(bufp)) return 0; if ((descr->id_type & SVPD_ID_PIV) == 0) /* proto field reserved */ return 0; if ((descr->proto_codeset >> SVPD_ID_PROTO_SHIFT) != SCSI_PROTO_SAS) return 0; return 1; } int scsi_devid_is_lun_eui64(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_EUI64) return 0; return 1; } int scsi_devid_is_lun_naa(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA) return 0; return 1; } int scsi_devid_is_lun_t10(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_T10) return 0; return 1; } int scsi_devid_is_lun_name(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_SCSI_NAME) return 0; return 1; } int scsi_devid_is_lun_md5(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_MD5_LUN_ID) return 0; return 1; } int scsi_devid_is_lun_uuid(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_UUID) return 0; return 1; } int scsi_devid_is_port_naa(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_PORT) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA) return 0; return 1; } struct scsi_vpd_id_descriptor * scsi_get_devid_desc(struct scsi_vpd_id_descriptor *desc, uint32_t len, scsi_devid_checkfn_t ck_fn) { uint8_t *desc_buf_end; desc_buf_end = (uint8_t *)desc + len; for (; desc->identifier <= desc_buf_end && desc->identifier + desc->length <= desc_buf_end; desc = (struct scsi_vpd_id_descriptor *)(desc->identifier + desc->length)) { if (ck_fn == NULL || ck_fn((uint8_t *)desc) != 0) return (desc); } return (NULL); } struct scsi_vpd_id_descriptor * scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t page_len, scsi_devid_checkfn_t ck_fn) { uint32_t len; if (page_len < sizeof(*id)) return (NULL); len = MIN(scsi_2btoul(id->length), page_len - sizeof(*id)); return (scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) id->desc_list, len, ck_fn)); } int scsi_transportid_sbuf(struct sbuf *sb, struct scsi_transportid_header *hdr, uint32_t valid_len) { switch (hdr->format_protocol & SCSI_TRN_PROTO_MASK) { case SCSI_PROTO_FC: { struct scsi_transportid_fcp *fcp; uint64_t n_port_name; fcp = (struct scsi_transportid_fcp *)hdr; n_port_name = scsi_8btou64(fcp->n_port_name); sbuf_printf(sb, "FCP address: 0x%.16jx",(uintmax_t)n_port_name); break; } case SCSI_PROTO_SPI: { struct scsi_transportid_spi *spi; spi = (struct scsi_transportid_spi *)hdr; sbuf_printf(sb, "SPI address: %u,%u", scsi_2btoul(spi->scsi_addr), scsi_2btoul(spi->rel_trgt_port_id)); break; } case SCSI_PROTO_SSA: /* * XXX KDM there is no transport ID defined in SPC-4 for * SSA. */ break; case SCSI_PROTO_1394: { struct scsi_transportid_1394 *sbp; uint64_t eui64; sbp = (struct scsi_transportid_1394 *)hdr; eui64 = scsi_8btou64(sbp->eui64); sbuf_printf(sb, "SBP address: 0x%.16jx", (uintmax_t)eui64); break; } case SCSI_PROTO_RDMA: { struct scsi_transportid_rdma *rdma; unsigned int i; rdma = (struct scsi_transportid_rdma *)hdr; sbuf_printf(sb, "RDMA address: 0x"); for (i = 0; i < sizeof(rdma->initiator_port_id); i++) sbuf_printf(sb, "%02x", rdma->initiator_port_id[i]); break; } case SCSI_PROTO_ISCSI: { uint32_t add_len, i; uint8_t *iscsi_name = NULL; int nul_found = 0; sbuf_printf(sb, "iSCSI address: "); if ((hdr->format_protocol & SCSI_TRN_FORMAT_MASK) == SCSI_TRN_ISCSI_FORMAT_DEVICE) { struct scsi_transportid_iscsi_device *dev; dev = (struct scsi_transportid_iscsi_device *)hdr; /* * Verify how much additional data we really have. */ add_len = scsi_2btoul(dev->additional_length); add_len = MIN(add_len, valid_len - __offsetof(struct scsi_transportid_iscsi_device, iscsi_name)); iscsi_name = &dev->iscsi_name[0]; } else if ((hdr->format_protocol & SCSI_TRN_FORMAT_MASK) == SCSI_TRN_ISCSI_FORMAT_PORT) { struct scsi_transportid_iscsi_port *port; port = (struct scsi_transportid_iscsi_port *)hdr; add_len = scsi_2btoul(port->additional_length); add_len = MIN(add_len, valid_len - __offsetof(struct scsi_transportid_iscsi_port, iscsi_name)); iscsi_name = &port->iscsi_name[0]; } else { sbuf_printf(sb, "unknown format %x", (hdr->format_protocol & SCSI_TRN_FORMAT_MASK) >> SCSI_TRN_FORMAT_SHIFT); break; } if (add_len == 0) { sbuf_printf(sb, "not enough data"); break; } /* * This is supposed to be a NUL-terminated ASCII * string, but you never know. So we're going to * check. We need to do this because there is no * sbuf equivalent of strncat(). */ for (i = 0; i < add_len; i++) { if (iscsi_name[i] == '\0') { nul_found = 1; break; } } /* * If there is a NUL in the name, we can just use * sbuf_cat(). Otherwise we need to use sbuf_bcat(). */ if (nul_found != 0) sbuf_cat(sb, iscsi_name); else sbuf_bcat(sb, iscsi_name, add_len); break; } case SCSI_PROTO_SAS: { struct scsi_transportid_sas *sas; uint64_t sas_addr; sas = (struct scsi_transportid_sas *)hdr; sas_addr = scsi_8btou64(sas->sas_address); sbuf_printf(sb, "SAS address: 0x%.16jx", (uintmax_t)sas_addr); break; } case SCSI_PROTO_ADITP: case SCSI_PROTO_ATA: case SCSI_PROTO_UAS: /* * No Transport ID format for ADI, ATA or USB is defined in * SPC-4. */ sbuf_printf(sb, "No known Transport ID format for protocol " "%#x", hdr->format_protocol & SCSI_TRN_PROTO_MASK); break; case SCSI_PROTO_SOP: { struct scsi_transportid_sop *sop; struct scsi_sop_routing_id_norm *rid; sop = (struct scsi_transportid_sop *)hdr; rid = (struct scsi_sop_routing_id_norm *)sop->routing_id; /* * Note that there is no alternate format specified in SPC-4 * for the PCIe routing ID, so we don't really have a way * to know whether the second byte of the routing ID is * a device and function or just a function. So we just * assume bus,device,function. */ sbuf_printf(sb, "SOP Routing ID: %u,%u,%u", rid->bus, rid->devfunc >> SCSI_TRN_SOP_DEV_SHIFT, rid->devfunc & SCSI_TRN_SOP_FUNC_NORM_MAX); break; } case SCSI_PROTO_NONE: default: sbuf_printf(sb, "Unknown protocol %#x", hdr->format_protocol & SCSI_TRN_PROTO_MASK); break; } return (0); } struct scsi_nv scsi_proto_map[] = { { "fcp", SCSI_PROTO_FC }, { "spi", SCSI_PROTO_SPI }, { "ssa", SCSI_PROTO_SSA }, { "sbp", SCSI_PROTO_1394 }, { "1394", SCSI_PROTO_1394 }, { "srp", SCSI_PROTO_RDMA }, { "rdma", SCSI_PROTO_RDMA }, { "iscsi", SCSI_PROTO_ISCSI }, { "iqn", SCSI_PROTO_ISCSI }, { "sas", SCSI_PROTO_SAS }, { "aditp", SCSI_PROTO_ADITP }, { "ata", SCSI_PROTO_ATA }, { "uas", SCSI_PROTO_UAS }, { "usb", SCSI_PROTO_UAS }, { "sop", SCSI_PROTO_SOP } }; const char * scsi_nv_to_str(struct scsi_nv *table, int num_table_entries, uint64_t value) { int i; for (i = 0; i < num_table_entries; i++) { if (table[i].value == value) return (table[i].name); } return (NULL); } /* * Given a name/value table, find a value matching the given name. * Return values: * SCSI_NV_FOUND - match found * SCSI_NV_AMBIGUOUS - more than one match, none of them exact * SCSI_NV_NOT_FOUND - no match found */ scsi_nv_status scsi_get_nv(struct scsi_nv *table, int num_table_entries, char *name, int *table_entry, scsi_nv_flags flags) { int i, num_matches = 0; for (i = 0; i < num_table_entries; i++) { size_t table_len, name_len; table_len = strlen(table[i].name); name_len = strlen(name); if ((((flags & SCSI_NV_FLAG_IG_CASE) != 0) && (strncasecmp(table[i].name, name, name_len) == 0)) || (((flags & SCSI_NV_FLAG_IG_CASE) == 0) && (strncmp(table[i].name, name, name_len) == 0))) { *table_entry = i; /* * Check for an exact match. If we have the same * number of characters in the table as the argument, * and we already know they're the same, we have * an exact match. */ if (table_len == name_len) return (SCSI_NV_FOUND); /* * Otherwise, bump up the number of matches. We'll * see later how many we have. */ num_matches++; } } if (num_matches > 1) return (SCSI_NV_AMBIGUOUS); else if (num_matches == 1) return (SCSI_NV_FOUND); else return (SCSI_NV_NOT_FOUND); } /* * Parse transport IDs for Fibre Channel, 1394 and SAS. Since these are * all 64-bit numbers, the code is similar. */ int scsi_parse_transportid_64bit(int proto_id, char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { uint64_t value; char *endptr; int retval; size_t alloc_size; retval = 0; value = strtouq(id_str, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing ID %s, 64-bit number required", __func__, id_str); } retval = 1; goto bailout; } switch (proto_id) { case SCSI_PROTO_FC: alloc_size = sizeof(struct scsi_transportid_fcp); break; case SCSI_PROTO_1394: alloc_size = sizeof(struct scsi_transportid_1394); break; case SCSI_PROTO_SAS: alloc_size = sizeof(struct scsi_transportid_sas); break; default: if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unsupported " "protocol %d", __func__, proto_id); } retval = 1; goto bailout; break; /* NOTREACHED */ } #ifdef _KERNEL *hdr = malloc(alloc_size, type, flags); #else /* _KERNEL */ *hdr = malloc(alloc_size); #endif /*_KERNEL */ if (*hdr == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, alloc_size); } retval = 1; goto bailout; } *alloc_len = alloc_size; bzero(*hdr, alloc_size); switch (proto_id) { case SCSI_PROTO_FC: { struct scsi_transportid_fcp *fcp; fcp = (struct scsi_transportid_fcp *)(*hdr); fcp->format_protocol = SCSI_PROTO_FC | SCSI_TRN_FCP_FORMAT_DEFAULT; scsi_u64to8b(value, fcp->n_port_name); break; } case SCSI_PROTO_1394: { struct scsi_transportid_1394 *sbp; sbp = (struct scsi_transportid_1394 *)(*hdr); sbp->format_protocol = SCSI_PROTO_1394 | SCSI_TRN_1394_FORMAT_DEFAULT; scsi_u64to8b(value, sbp->eui64); break; } case SCSI_PROTO_SAS: { struct scsi_transportid_sas *sas; sas = (struct scsi_transportid_sas *)(*hdr); sas->format_protocol = SCSI_PROTO_SAS | SCSI_TRN_SAS_FORMAT_DEFAULT; scsi_u64to8b(value, sas->sas_address); break; } default: break; } bailout: return (retval); } /* * Parse a SPI (Parallel SCSI) address of the form: id,rel_tgt_port */ int scsi_parse_transportid_spi(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { unsigned long scsi_addr, target_port; struct scsi_transportid_spi *spi; char *tmpstr, *endptr; int retval; retval = 0; tmpstr = strsep(&id_str, ","); if (tmpstr == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no ID found", __func__); } retval = 1; goto bailout; } scsi_addr = strtoul(tmpstr, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing SCSI ID %s, number required", __func__, tmpstr); } retval = 1; goto bailout; } if (id_str == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no relative " "target port found", __func__); } retval = 1; goto bailout; } target_port = strtoul(id_str, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing relative target port %s, number " "required", __func__, id_str); } retval = 1; goto bailout; } #ifdef _KERNEL spi = malloc(sizeof(*spi), type, flags); #else spi = malloc(sizeof(*spi)); #endif if (spi == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, sizeof(*spi)); } retval = 1; goto bailout; } *alloc_len = sizeof(*spi); bzero(spi, sizeof(*spi)); spi->format_protocol = SCSI_PROTO_SPI | SCSI_TRN_SPI_FORMAT_DEFAULT; scsi_ulto2b(scsi_addr, spi->scsi_addr); scsi_ulto2b(target_port, spi->rel_trgt_port_id); *hdr = (struct scsi_transportid_header *)spi; bailout: return (retval); } /* * Parse an RDMA/SRP Initiator Port ID string. This is 32 hexadecimal digits, * optionally prefixed by "0x" or "0X". */ int scsi_parse_transportid_rdma(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { struct scsi_transportid_rdma *rdma; int retval; size_t id_len, rdma_id_size; uint8_t rdma_id[SCSI_TRN_RDMA_PORT_LEN]; char *tmpstr; unsigned int i, j; retval = 0; id_len = strlen(id_str); rdma_id_size = SCSI_TRN_RDMA_PORT_LEN; /* * Check the size. It needs to be either 32 or 34 characters long. */ if ((id_len != (rdma_id_size * 2)) && (id_len != ((rdma_id_size * 2) + 2))) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: RDMA ID " "must be 32 hex digits (0x prefix " "optional), only %zu seen", __func__, id_len); } retval = 1; goto bailout; } tmpstr = id_str; /* * If the user gave us 34 characters, the string needs to start * with '0x'. */ if (id_len == ((rdma_id_size * 2) + 2)) { if ((tmpstr[0] == '0') && ((tmpstr[1] == 'x') || (tmpstr[1] == 'X'))) { tmpstr += 2; } else { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: RDMA " "ID prefix, if used, must be \"0x\", " "got %s", __func__, tmpstr); } retval = 1; goto bailout; } } bzero(rdma_id, sizeof(rdma_id)); /* * Convert ASCII hex into binary bytes. There is no standard * 128-bit integer type, and so no strtou128t() routine to convert * from hex into a large integer. In the end, we're not going to * an integer, but rather to a byte array, so that and the fact * that we require the user to give us 32 hex digits simplifies the * logic. */ for (i = 0; i < (rdma_id_size * 2); i++) { int cur_shift; unsigned char c; /* Increment the byte array one for every 2 hex digits */ j = i >> 1; /* * The first digit in every pair is the most significant * 4 bits. The second is the least significant 4 bits. */ if ((i % 2) == 0) cur_shift = 4; else cur_shift = 0; c = tmpstr[i]; /* Convert the ASCII hex character into a number */ if (isdigit(c)) c -= '0'; else if (isalpha(c)) c -= isupper(c) ? 'A' - 10 : 'a' - 10; else { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "RDMA ID must be hex digits, got " "invalid character %c", __func__, tmpstr[i]); } retval = 1; goto bailout; } /* * The converted number can't be less than 0; the type is * unsigned, and the subtraction logic will not give us * a negative number. So we only need to make sure that * the value is not greater than 0xf. (i.e. make sure the * user didn't give us a value like "0x12jklmno"). */ if (c > 0xf) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "RDMA ID must be hex digits, got " "invalid character %c", __func__, tmpstr[i]); } retval = 1; goto bailout; } rdma_id[j] |= c << cur_shift; } #ifdef _KERNEL rdma = malloc(sizeof(*rdma), type, flags); #else rdma = malloc(sizeof(*rdma)); #endif if (rdma == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, sizeof(*rdma)); } retval = 1; goto bailout; } *alloc_len = sizeof(*rdma); bzero(rdma, *alloc_len); rdma->format_protocol = SCSI_PROTO_RDMA | SCSI_TRN_RDMA_FORMAT_DEFAULT; bcopy(rdma_id, rdma->initiator_port_id, SCSI_TRN_RDMA_PORT_LEN); *hdr = (struct scsi_transportid_header *)rdma; bailout: return (retval); } /* * Parse an iSCSI name. The format is either just the name: * * iqn.2012-06.com.example:target0 * or the name, separator and initiator session ID: * * iqn.2012-06.com.example:target0,i,0x123 * * The separator format is exact. */ int scsi_parse_transportid_iscsi(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { size_t id_len, sep_len, id_size, name_len; int retval; unsigned int i, sep_pos, sep_found; const char *sep_template = ",i,0x"; const char *iqn_prefix = "iqn."; struct scsi_transportid_iscsi_device *iscsi; retval = 0; sep_found = 0; id_len = strlen(id_str); sep_len = strlen(sep_template); /* * The separator is defined as exactly ',i,0x'. Any other commas, * or any other form, is an error. So look for a comma, and once * we find that, the next few characters must match the separator * exactly. Once we get through the separator, there should be at * least one character. */ for (i = 0, sep_pos = 0; i < id_len; i++) { if (sep_pos == 0) { if (id_str[i] == sep_template[sep_pos]) sep_pos++; continue; } if (sep_pos < sep_len) { if (id_str[i] == sep_template[sep_pos]) { sep_pos++; continue; } if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "invalid separator in iSCSI name " "\"%s\"", __func__, id_str); } retval = 1; goto bailout; } else { sep_found = 1; break; } } /* * Check to see whether we have a separator but no digits after it. */ if ((sep_pos != 0) && (sep_found == 0)) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no digits " "found after separator in iSCSI name \"%s\"", __func__, id_str); } retval = 1; goto bailout; } /* * The incoming ID string has the "iqn." prefix stripped off. We * need enough space for the base structure (the structures are the * same for the two iSCSI forms), the prefix, the ID string and a * terminating NUL. */ id_size = sizeof(*iscsi) + strlen(iqn_prefix) + id_len + 1; #ifdef _KERNEL iscsi = malloc(id_size, type, flags); #else iscsi = malloc(id_size); #endif if (iscsi == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, id_size); } retval = 1; goto bailout; } *alloc_len = id_size; bzero(iscsi, id_size); iscsi->format_protocol = SCSI_PROTO_ISCSI; if (sep_found == 0) iscsi->format_protocol |= SCSI_TRN_ISCSI_FORMAT_DEVICE; else iscsi->format_protocol |= SCSI_TRN_ISCSI_FORMAT_PORT; name_len = id_size - sizeof(*iscsi); scsi_ulto2b(name_len, iscsi->additional_length); snprintf(iscsi->iscsi_name, name_len, "%s%s", iqn_prefix, id_str); *hdr = (struct scsi_transportid_header *)iscsi; bailout: return (retval); } /* * Parse a SCSI over PCIe (SOP) identifier. The Routing ID can either be * of the form 'bus,device,function' or 'bus,function'. */ int scsi_parse_transportid_sop(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { struct scsi_transportid_sop *sop; unsigned long bus, device, function; char *tmpstr, *endptr; int retval, device_spec; retval = 0; device_spec = 0; device = 0; tmpstr = strsep(&id_str, ","); if ((tmpstr == NULL) || (*tmpstr == '\0')) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no ID found", __func__); } retval = 1; goto bailout; } bus = strtoul(tmpstr, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing PCIe bus %s, number required", __func__, tmpstr); } retval = 1; goto bailout; } if ((id_str == NULL) || (*id_str == '\0')) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no PCIe " "device or function found", __func__); } retval = 1; goto bailout; } tmpstr = strsep(&id_str, ","); function = strtoul(tmpstr, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing PCIe device/function %s, number " "required", __func__, tmpstr); } retval = 1; goto bailout; } /* * Check to see whether the user specified a third value. If so, * the second is the device. */ if (id_str != NULL) { if (*id_str == '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "no PCIe function found", __func__); } retval = 1; goto bailout; } device = function; device_spec = 1; function = strtoul(id_str, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "error parsing PCIe function %s, " "number required", __func__, id_str); } retval = 1; goto bailout; } } if (bus > SCSI_TRN_SOP_BUS_MAX) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: bus value " "%lu greater than maximum %u", __func__, bus, SCSI_TRN_SOP_BUS_MAX); } retval = 1; goto bailout; } if ((device_spec != 0) && (device > SCSI_TRN_SOP_DEV_MASK)) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: device value " "%lu greater than maximum %u", __func__, device, SCSI_TRN_SOP_DEV_MAX); } retval = 1; goto bailout; } if (((device_spec != 0) && (function > SCSI_TRN_SOP_FUNC_NORM_MAX)) || ((device_spec == 0) && (function > SCSI_TRN_SOP_FUNC_ALT_MAX))) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: function value " "%lu greater than maximum %u", __func__, function, (device_spec == 0) ? SCSI_TRN_SOP_FUNC_ALT_MAX : SCSI_TRN_SOP_FUNC_NORM_MAX); } retval = 1; goto bailout; } #ifdef _KERNEL sop = malloc(sizeof(*sop), type, flags); #else sop = malloc(sizeof(*sop)); #endif if (sop == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, sizeof(*sop)); } retval = 1; goto bailout; } *alloc_len = sizeof(*sop); bzero(sop, sizeof(*sop)); sop->format_protocol = SCSI_PROTO_SOP | SCSI_TRN_SOP_FORMAT_DEFAULT; if (device_spec != 0) { struct scsi_sop_routing_id_norm rid; rid.bus = bus; rid.devfunc = (device << SCSI_TRN_SOP_DEV_SHIFT) | function; bcopy(&rid, sop->routing_id, MIN(sizeof(rid), sizeof(sop->routing_id))); } else { struct scsi_sop_routing_id_alt rid; rid.bus = bus; rid.function = function; bcopy(&rid, sop->routing_id, MIN(sizeof(rid), sizeof(sop->routing_id))); } *hdr = (struct scsi_transportid_header *)sop; bailout: return (retval); } /* * transportid_str: NUL-terminated string with format: protcol,id * The ID is protocol specific. * hdr: Storage will be allocated for the transport ID. * alloc_len: The amount of memory allocated is returned here. * type: Malloc bucket (kernel only). * flags: Malloc flags (kernel only). * error_str: If non-NULL, it will contain error information (without * a terminating newline) if an error is returned. * error_str_len: Allocated length of the error string. * * Returns 0 for success, non-zero for failure. */ int scsi_parse_transportid(char *transportid_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { char *tmpstr; scsi_nv_status status; u_int num_proto_entries; int retval, table_entry; retval = 0; table_entry = 0; /* * We do allow a period as well as a comma to separate the protocol * from the ID string. This is to accommodate iSCSI names, which * start with "iqn.". */ tmpstr = strsep(&transportid_str, ",."); if (tmpstr == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: transportid_str is NULL", __func__); } retval = 1; goto bailout; } num_proto_entries = nitems(scsi_proto_map); status = scsi_get_nv(scsi_proto_map, num_proto_entries, tmpstr, &table_entry, SCSI_NV_FLAG_IG_CASE); if (status != SCSI_NV_FOUND) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: %s protocol " "name %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", tmpstr); } retval = 1; goto bailout; } switch (scsi_proto_map[table_entry].value) { case SCSI_PROTO_FC: case SCSI_PROTO_1394: case SCSI_PROTO_SAS: retval = scsi_parse_transportid_64bit( scsi_proto_map[table_entry].value, transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_SPI: retval = scsi_parse_transportid_spi(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_RDMA: retval = scsi_parse_transportid_rdma(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_ISCSI: retval = scsi_parse_transportid_iscsi(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_SOP: retval = scsi_parse_transportid_sop(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_SSA: case SCSI_PROTO_ADITP: case SCSI_PROTO_ATA: case SCSI_PROTO_UAS: case SCSI_PROTO_NONE: default: /* * There is no format defined for a Transport ID for these * protocols. So even if the user gives us something, we * have no way to turn it into a standard SCSI Transport ID. */ retval = 1; if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no Transport " "ID format exists for protocol %s", __func__, tmpstr); } goto bailout; break; /* NOTREACHED */ } bailout: return (retval); } struct scsi_attrib_table_entry scsi_mam_attr_table[] = { { SMA_ATTR_REM_CAP_PARTITION, SCSI_ATTR_FLAG_NONE, "Remaining Capacity in Partition", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf,/*parse_str*/ NULL }, { SMA_ATTR_MAX_CAP_PARTITION, SCSI_ATTR_FLAG_NONE, "Maximum Capacity in Partition", /*suffix*/"MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TAPEALERT_FLAGS, SCSI_ATTR_FLAG_HEX, "TapeAlert Flags", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_LOAD_COUNT, SCSI_ATTR_FLAG_NONE, "Load Count", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MAM_SPACE_REMAINING, SCSI_ATTR_FLAG_NONE, "MAM Space Remaining", /*suffix*/"bytes", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_ASSIGNING_ORG, SCSI_ATTR_FLAG_NONE, "Assigning Organization", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_FORMAT_DENSITY_CODE, SCSI_ATTR_FLAG_HEX, "Format Density Code", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_INITIALIZATION_COUNT, SCSI_ATTR_FLAG_NONE, "Initialization Count", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_VOLUME_ID, SCSI_ATTR_FLAG_NONE, "Volume Identifier", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_VOLUME_CHANGE_REF, SCSI_ATTR_FLAG_HEX, "Volume Change Reference", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD_1, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load - 1", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD_2, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load - 2", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD_3, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load - 3", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_WRITTEN_LT, SCSI_ATTR_FLAG_NONE, "Total MB Written in Medium Life", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_READ_LT, SCSI_ATTR_FLAG_NONE, "Total MB Read in Medium Life", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_WRITTEN_CUR, SCSI_ATTR_FLAG_NONE, "Total MB Written in Current/Last Load", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_READ_CUR, SCSI_ATTR_FLAG_NONE, "Total MB Read in Current/Last Load", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_FIRST_ENC_BLOCK, SCSI_ATTR_FLAG_NONE, "Logical Position of First Encrypted Block", /*suffix*/ NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_NEXT_UNENC_BLOCK, SCSI_ATTR_FLAG_NONE, "Logical Position of First Unencrypted Block after First " "Encrypted Block", /*suffix*/ NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MEDIUM_USAGE_HIST, SCSI_ATTR_FLAG_NONE, "Medium Usage History", /*suffix*/ NULL, /*to_str*/ NULL, /*parse_str*/ NULL }, { SMA_ATTR_PART_USAGE_HIST, SCSI_ATTR_FLAG_NONE, "Partition Usage History", /*suffix*/ NULL, /*to_str*/ NULL, /*parse_str*/ NULL }, { SMA_ATTR_MED_MANUF, SCSI_ATTR_FLAG_NONE, "Medium Manufacturer", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_SERIAL, SCSI_ATTR_FLAG_NONE, "Medium Serial Number", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_LENGTH, SCSI_ATTR_FLAG_NONE, "Medium Length", /*suffix*/"m", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_WIDTH, SCSI_ATTR_FLAG_FP | SCSI_ATTR_FLAG_DIV_10 | SCSI_ATTR_FLAG_FP_1DIGIT, "Medium Width", /*suffix*/"mm", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_ASSIGNING_ORG, SCSI_ATTR_FLAG_NONE, "Assigning Organization", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_DENSITY_CODE, SCSI_ATTR_FLAG_HEX, "Medium Density Code", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_MANUF_DATE, SCSI_ATTR_FLAG_NONE, "Medium Manufacture Date", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MAM_CAPACITY, SCSI_ATTR_FLAG_NONE, "MAM Capacity", /*suffix*/"bytes", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_TYPE, SCSI_ATTR_FLAG_HEX, "Medium Type", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_TYPE_INFO, SCSI_ATTR_FLAG_HEX, "Medium Type Information", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_SERIAL_NUM, SCSI_ATTR_FLAG_NONE, "Medium Serial Number", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_VENDOR, SCSI_ATTR_FLAG_NONE, "Application Vendor", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_NAME, SCSI_ATTR_FLAG_NONE, "Application Name", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_VERSION, SCSI_ATTR_FLAG_NONE, "Application Version", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_USER_MED_TEXT_LABEL, SCSI_ATTR_FLAG_NONE, "User Medium Text Label", /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_LAST_WRITTEN_TIME, SCSI_ATTR_FLAG_NONE, "Date and Time Last Written", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TEXT_LOCAL_ID, SCSI_ATTR_FLAG_HEX, "Text Localization Identifier", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_BARCODE, SCSI_ATTR_FLAG_NONE, "Barcode", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_HOST_OWNER_NAME, SCSI_ATTR_FLAG_NONE, "Owning Host Textual Name", /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MEDIA_POOL, SCSI_ATTR_FLAG_NONE, "Media Pool", /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_PART_USER_LABEL, SCSI_ATTR_FLAG_NONE, "Partition User Text Label", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_LOAD_UNLOAD_AT_PART, SCSI_ATTR_FLAG_NONE, "Load/Unload at Partition", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_FORMAT_VERSION, SCSI_ATTR_FLAG_NONE, "Application Format Version", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_VOL_COHERENCY_INFO, SCSI_ATTR_FLAG_NONE, "Volume Coherency Information", /*suffix*/NULL, /*to_str*/ scsi_attrib_volcoh_sbuf, /*parse_str*/ NULL }, { 0x0ff1, SCSI_ATTR_FLAG_NONE, "Spectra MLM Creation", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff2, SCSI_ATTR_FLAG_NONE, "Spectra MLM C3", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff3, SCSI_ATTR_FLAG_NONE, "Spectra MLM RW", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff4, SCSI_ATTR_FLAG_NONE, "Spectra MLM SDC List", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff7, SCSI_ATTR_FLAG_NONE, "Spectra MLM Post Scan", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ffe, SCSI_ATTR_FLAG_NONE, "Spectra MLM Checksum", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f1, SCSI_ATTR_FLAG_NONE, "Spectra MLM Creation", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f2, SCSI_ATTR_FLAG_NONE, "Spectra MLM C3", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f3, SCSI_ATTR_FLAG_NONE, "Spectra MLM RW", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f4, SCSI_ATTR_FLAG_NONE, "Spectra MLM SDC List", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f7, SCSI_ATTR_FLAG_NONE, "Spectra MLM Post Scan", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17ff, SCSI_ATTR_FLAG_NONE, "Spectra MLM Checksum", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, }; /* * Print out Volume Coherency Information (Attribute 0x080c). * This field has two variable length members, including one at the * beginning, so it isn't practical to have a fixed structure definition. * This is current as of SSC4r03 (see section 4.2.21.3), dated March 25, * 2013. */ int scsi_attrib_volcoh_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size; uint64_t tmp_val; uint8_t *cur_ptr; int retval; int vcr_len, as_len; retval = 0; tmp_val = 0; field_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); if (field_size > avail_len) { if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; goto bailout; } else if (field_size == 0) { /* * It isn't clear from the spec whether a field length of * 0 is invalid here. It probably is, but be lenient here * to avoid inconveniencing the user. */ goto bailout; } cur_ptr = hdr->attribute; vcr_len = *cur_ptr; cur_ptr++; sbuf_printf(sb, "\n\tVolume Change Reference Value:"); switch (vcr_len) { case 0: if (error_str != NULL) { snprintf(error_str, error_str_len, "Volume Change " "Reference value has length of 0"); } retval = 1; goto bailout; break; /*NOTREACHED*/ case 1: tmp_val = *cur_ptr; break; case 2: tmp_val = scsi_2btoul(cur_ptr); break; case 3: tmp_val = scsi_3btoul(cur_ptr); break; case 4: tmp_val = scsi_4btoul(cur_ptr); break; case 8: tmp_val = scsi_8btou64(cur_ptr); break; default: sbuf_printf(sb, "\n"); sbuf_hexdump(sb, cur_ptr, vcr_len, NULL, 0); break; } if (vcr_len <= 8) sbuf_printf(sb, " 0x%jx\n", (uintmax_t)tmp_val); cur_ptr += vcr_len; tmp_val = scsi_8btou64(cur_ptr); sbuf_printf(sb, "\tVolume Coherency Count: %ju\n", (uintmax_t)tmp_val); cur_ptr += sizeof(tmp_val); tmp_val = scsi_8btou64(cur_ptr); sbuf_printf(sb, "\tVolume Coherency Set Identifier: 0x%jx\n", (uintmax_t)tmp_val); /* * Figure out how long the Application Client Specific Information * is and produce a hexdump. */ cur_ptr += sizeof(tmp_val); as_len = scsi_2btoul(cur_ptr); cur_ptr += sizeof(uint16_t); sbuf_printf(sb, "\tApplication Client Specific Information: "); if (((as_len == SCSI_LTFS_VER0_LEN) || (as_len == SCSI_LTFS_VER1_LEN)) && (strncmp(cur_ptr, SCSI_LTFS_STR_NAME, SCSI_LTFS_STR_LEN) == 0)) { sbuf_printf(sb, "LTFS\n"); cur_ptr += SCSI_LTFS_STR_LEN + 1; if (cur_ptr[SCSI_LTFS_UUID_LEN] != '\0') cur_ptr[SCSI_LTFS_UUID_LEN] = '\0'; sbuf_printf(sb, "\tLTFS UUID: %s\n", cur_ptr); cur_ptr += SCSI_LTFS_UUID_LEN + 1; /* XXX KDM check the length */ sbuf_printf(sb, "\tLTFS Version: %d\n", *cur_ptr); } else { sbuf_printf(sb, "Unknown\n"); sbuf_hexdump(sb, cur_ptr, as_len, NULL, 0); } bailout: return (retval); } int scsi_attrib_vendser_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size; struct scsi_attrib_vendser *vendser; cam_strvis_flags strvis_flags; int retval = 0; field_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); if (field_size > avail_len) { if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; goto bailout; } else if (field_size == 0) { /* * A field size of 0 doesn't make sense here. The device * can at least give you the vendor ID, even if it can't * give you the serial number. */ if (error_str != NULL) { snprintf(error_str, error_str_len, "The length of " "attribute ID 0x%.4x is 0", scsi_2btoul(hdr->id)); } retval = 1; goto bailout; } vendser = (struct scsi_attrib_vendser *)hdr->attribute; switch (output_flags & SCSI_ATTR_OUTPUT_NONASCII_MASK) { case SCSI_ATTR_OUTPUT_NONASCII_TRIM: strvis_flags = CAM_STRVIS_FLAG_NONASCII_TRIM; break; case SCSI_ATTR_OUTPUT_NONASCII_RAW: strvis_flags = CAM_STRVIS_FLAG_NONASCII_RAW; break; case SCSI_ATTR_OUTPUT_NONASCII_ESC: default: strvis_flags = CAM_STRVIS_FLAG_NONASCII_ESC; break;; } cam_strvis_sbuf(sb, vendser->vendor, sizeof(vendser->vendor), strvis_flags); sbuf_putc(sb, ' '); cam_strvis_sbuf(sb, vendser->serial_num, sizeof(vendser->serial_num), strvis_flags); bailout: return (retval); } int scsi_attrib_hexdump_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { uint32_t field_size; ssize_t avail_len; uint32_t print_len; uint8_t *num_ptr; int retval = 0; field_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); print_len = MIN(avail_len, field_size); num_ptr = hdr->attribute; if (print_len > 0) { sbuf_printf(sb, "\n"); sbuf_hexdump(sb, num_ptr, print_len, NULL, 0); } return (retval); } int scsi_attrib_int_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { uint64_t print_number; size_t avail_len; uint32_t number_size; int retval = 0; number_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); if (avail_len < number_size) { if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, number_size); } retval = 1; goto bailout; } switch (number_size) { case 0: /* * We don't treat this as an error, since there may be * scenarios where a device reports a field but then gives * a length of 0. See the note in scsi_attrib_ascii_sbuf(). */ goto bailout; break; /*NOTREACHED*/ case 1: print_number = hdr->attribute[0]; break; case 2: print_number = scsi_2btoul(hdr->attribute); break; case 3: print_number = scsi_3btoul(hdr->attribute); break; case 4: print_number = scsi_4btoul(hdr->attribute); break; case 8: print_number = scsi_8btou64(hdr->attribute); break; default: /* * If we wind up here, the number is too big to print * normally, so just do a hexdump. */ retval = scsi_attrib_hexdump_sbuf(sb, hdr, valid_len, flags, output_flags, error_str, error_str_len); goto bailout; break; } if (flags & SCSI_ATTR_FLAG_FP) { #ifndef _KERNEL long double num_float; num_float = (long double)print_number; if (flags & SCSI_ATTR_FLAG_DIV_10) num_float /= 10; sbuf_printf(sb, "%.*Lf", (flags & SCSI_ATTR_FLAG_FP_1DIGIT) ? 1 : 0, num_float); #else /* _KERNEL */ sbuf_printf(sb, "%ju", (flags & SCSI_ATTR_FLAG_DIV_10) ? (print_number / 10) : print_number); #endif /* _KERNEL */ } else if (flags & SCSI_ATTR_FLAG_HEX) { sbuf_printf(sb, "0x%jx", (uintmax_t)print_number); } else sbuf_printf(sb, "%ju", (uintmax_t)print_number); bailout: return (retval); } int scsi_attrib_ascii_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size, print_size; int retval = 0; avail_len = valid_len - sizeof(*hdr); field_size = scsi_2btoul(hdr->length); print_size = MIN(avail_len, field_size); if (print_size > 0) { cam_strvis_flags strvis_flags; switch (output_flags & SCSI_ATTR_OUTPUT_NONASCII_MASK) { case SCSI_ATTR_OUTPUT_NONASCII_TRIM: strvis_flags = CAM_STRVIS_FLAG_NONASCII_TRIM; break; case SCSI_ATTR_OUTPUT_NONASCII_RAW: strvis_flags = CAM_STRVIS_FLAG_NONASCII_RAW; break; case SCSI_ATTR_OUTPUT_NONASCII_ESC: default: strvis_flags = CAM_STRVIS_FLAG_NONASCII_ESC; break; } cam_strvis_sbuf(sb, hdr->attribute, print_size, strvis_flags); } else if (avail_len < field_size) { /* * We only report an error if the user didn't allocate * enough space to hold the full value of this field. If * the field length is 0, that is allowed by the spec. * e.g. in SPC-4r37, section 7.4.2.2.5, VOLUME IDENTIFIER * "This attribute indicates the current volume identifier * (see SMC-3) of the medium. If the device server supports * this attribute but does not have access to the volume * identifier, the device server shall report this attribute * with an attribute length value of zero." */ if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; } return (retval); } int scsi_attrib_text_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size, print_size; int retval = 0; int esc_text = 1; avail_len = valid_len - sizeof(*hdr); field_size = scsi_2btoul(hdr->length); print_size = MIN(avail_len, field_size); if ((output_flags & SCSI_ATTR_OUTPUT_TEXT_MASK) == SCSI_ATTR_OUTPUT_TEXT_RAW) esc_text = 0; if (print_size > 0) { uint32_t i; for (i = 0; i < print_size; i++) { if (hdr->attribute[i] == '\0') continue; else if (((unsigned char)hdr->attribute[i] < 0x80) || (esc_text == 0)) sbuf_putc(sb, hdr->attribute[i]); else sbuf_printf(sb, "%%%02x", (unsigned char)hdr->attribute[i]); } } else if (avail_len < field_size) { /* * We only report an error if the user didn't allocate * enough space to hold the full value of this field. */ if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; } return (retval); } struct scsi_attrib_table_entry * scsi_find_attrib_entry(struct scsi_attrib_table_entry *table, size_t num_table_entries, uint32_t id) { uint32_t i; for (i = 0; i < num_table_entries; i++) { if (table[i].id == id) return (&table[i]); } return (NULL); } struct scsi_attrib_table_entry * scsi_get_attrib_entry(uint32_t id) { return (scsi_find_attrib_entry(scsi_mam_attr_table, nitems(scsi_mam_attr_table), id)); } int scsi_attrib_value_sbuf(struct sbuf *sb, uint32_t valid_len, struct scsi_mam_attribute_header *hdr, uint32_t output_flags, char *error_str, size_t error_str_len) { int retval; switch (hdr->byte2 & SMA_FORMAT_MASK) { case SMA_FORMAT_ASCII: retval = scsi_attrib_ascii_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str,error_str_len); break; case SMA_FORMAT_BINARY: if (scsi_2btoul(hdr->length) <= 8) retval = scsi_attrib_int_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str, error_str_len); else retval = scsi_attrib_hexdump_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str, error_str_len); break; case SMA_FORMAT_TEXT: retval = scsi_attrib_text_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str, error_str_len); break; default: if (error_str != NULL) { snprintf(error_str, error_str_len, "Unknown attribute " "format 0x%x", hdr->byte2 & SMA_FORMAT_MASK); } retval = 1; goto bailout; break; /*NOTREACHED*/ } sbuf_trim(sb); bailout: return (retval); } void scsi_attrib_prefix_sbuf(struct sbuf *sb, uint32_t output_flags, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, const char *desc) { int need_space = 0; uint32_t len; uint32_t id; /* * We can't do anything if we don't have enough valid data for the * header. */ if (valid_len < sizeof(*hdr)) return; id = scsi_2btoul(hdr->id); /* * Note that we print out the value of the attribute listed in the * header, regardless of whether we actually got that many bytes * back from the device through the controller. A truncated result * could be the result of a failure to ask for enough data; the * header indicates how many bytes are allocated for this attribute * in the MAM. */ len = scsi_2btoul(hdr->length); if ((output_flags & SCSI_ATTR_OUTPUT_FIELD_MASK) == SCSI_ATTR_OUTPUT_FIELD_NONE) return; if ((output_flags & SCSI_ATTR_OUTPUT_FIELD_DESC) && (desc != NULL)) { sbuf_printf(sb, "%s", desc); need_space = 1; } if (output_flags & SCSI_ATTR_OUTPUT_FIELD_NUM) { sbuf_printf(sb, "%s(0x%.4x)", (need_space) ? " " : "", id); need_space = 0; } if (output_flags & SCSI_ATTR_OUTPUT_FIELD_SIZE) { sbuf_printf(sb, "%s[%d]", (need_space) ? " " : "", len); need_space = 0; } if (output_flags & SCSI_ATTR_OUTPUT_FIELD_RW) { sbuf_printf(sb, "%s(%s)", (need_space) ? " " : "", (hdr->byte2 & SMA_READ_ONLY) ? "RO" : "RW"); } sbuf_printf(sb, ": "); } int scsi_attrib_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, struct scsi_attrib_table_entry *user_table, size_t num_user_entries, int prefer_user_table, uint32_t output_flags, char *error_str, int error_str_len) { int retval; struct scsi_attrib_table_entry *table1 = NULL, *table2 = NULL; struct scsi_attrib_table_entry *entry = NULL; size_t table1_size = 0, table2_size = 0; uint32_t id; retval = 0; if (valid_len < sizeof(*hdr)) { retval = 1; goto bailout; } id = scsi_2btoul(hdr->id); if (user_table != NULL) { if (prefer_user_table != 0) { table1 = user_table; table1_size = num_user_entries; table2 = scsi_mam_attr_table; table2_size = nitems(scsi_mam_attr_table); } else { table1 = scsi_mam_attr_table; table1_size = nitems(scsi_mam_attr_table); table2 = user_table; table2_size = num_user_entries; } } else { table1 = scsi_mam_attr_table; table1_size = nitems(scsi_mam_attr_table); } entry = scsi_find_attrib_entry(table1, table1_size, id); if (entry != NULL) { scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len, entry->desc); if (entry->to_str == NULL) goto print_default; retval = entry->to_str(sb, hdr, valid_len, entry->flags, output_flags, error_str, error_str_len); goto bailout; } if (table2 != NULL) { entry = scsi_find_attrib_entry(table2, table2_size, id); if (entry != NULL) { if (entry->to_str == NULL) goto print_default; scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len, entry->desc); retval = entry->to_str(sb, hdr, valid_len, entry->flags, output_flags, error_str, error_str_len); goto bailout; } } scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len, NULL); print_default: retval = scsi_attrib_value_sbuf(sb, valid_len, hdr, output_flags, error_str, error_str_len); bailout: if (retval == 0) { if ((entry != NULL) && (entry->suffix != NULL)) sbuf_printf(sb, " %s", entry->suffix); sbuf_trim(sb); sbuf_printf(sb, "\n"); } return (retval); } void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_test_unit_ready *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_test_unit_ready *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = TEST_UNIT_READY; } void scsi_request_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), void *data_ptr, u_int8_t dxfer_len, u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_request_sense *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_request_sense *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REQUEST_SENSE; scsi_cmd->length = dxfer_len; } void scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *inq_buf, u_int32_t inq_len, int evpd, u_int8_t page_code, u_int8_t sense_len, u_int32_t timeout) { struct scsi_inquiry *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/inq_buf, /*dxfer_len*/inq_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_inquiry *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = INQUIRY; if (evpd) { scsi_cmd->byte2 |= SI_EVPD; scsi_cmd->page_code = page_code; } scsi_ulto2b(inq_len, scsi_cmd->length); } void scsi_mode_sense(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int dbd, uint8_t pc, uint8_t page, uint8_t *param_buf, uint32_t param_len, uint8_t sense_len, uint32_t timeout) { scsi_mode_sense_subpage(csio, retries, cbfcnp, tag_action, dbd, pc, page, 0, param_buf, param_len, 0, sense_len, timeout); } void scsi_mode_sense_len(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int dbd, uint8_t pc, uint8_t page, uint8_t *param_buf, uint32_t param_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout) { scsi_mode_sense_subpage(csio, retries, cbfcnp, tag_action, dbd, pc, page, 0, param_buf, param_len, minimum_cmd_size, sense_len, timeout); } void scsi_mode_sense_subpage(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int dbd, uint8_t pc, uint8_t page, uint8_t subpage, uint8_t *param_buf, uint32_t param_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout) { u_int8_t cdb_len; /* * Use the smallest possible command to perform the operation. */ if ((param_len < 256) && (minimum_cmd_size < 10)) { /* * We can fit in a 6 byte cdb. */ struct scsi_mode_sense_6 *scsi_cmd; scsi_cmd = (struct scsi_mode_sense_6 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SENSE_6; if (dbd != 0) scsi_cmd->byte2 |= SMS_DBD; scsi_cmd->page = pc | page; scsi_cmd->subpage = subpage; scsi_cmd->length = param_len; cdb_len = sizeof(*scsi_cmd); } else { /* * Need a 10 byte cdb. */ struct scsi_mode_sense_10 *scsi_cmd; scsi_cmd = (struct scsi_mode_sense_10 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SENSE_10; if (dbd != 0) scsi_cmd->byte2 |= SMS_DBD; scsi_cmd->page = pc | page; scsi_cmd->subpage = subpage; scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, param_buf, param_len, sense_len, cdb_len, timeout); } void scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int scsi_page_fmt, int save_pages, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { scsi_mode_select_len(csio, retries, cbfcnp, tag_action, scsi_page_fmt, save_pages, param_buf, param_len, 0, sense_len, timeout); } void scsi_mode_select_len(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int scsi_page_fmt, int save_pages, u_int8_t *param_buf, u_int32_t param_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout) { u_int8_t cdb_len; /* * Use the smallest possible command to perform the operation. */ if ((param_len < 256) && (minimum_cmd_size < 10)) { /* * We can fit in a 6 byte cdb. */ struct scsi_mode_select_6 *scsi_cmd; scsi_cmd = (struct scsi_mode_select_6 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SELECT_6; if (scsi_page_fmt != 0) scsi_cmd->byte2 |= SMS_PF; if (save_pages != 0) scsi_cmd->byte2 |= SMS_SP; scsi_cmd->length = param_len; cdb_len = sizeof(*scsi_cmd); } else { /* * Need a 10 byte cdb. */ struct scsi_mode_select_10 *scsi_cmd; scsi_cmd = (struct scsi_mode_select_10 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SELECT_10; if (scsi_page_fmt != 0) scsi_cmd->byte2 |= SMS_PF; if (save_pages != 0) scsi_cmd->byte2 |= SMS_SP; scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_OUT, tag_action, param_buf, param_len, sense_len, cdb_len, timeout); } void scsi_log_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t page_code, u_int8_t page, int save_pages, int ppc, u_int32_t paramptr, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_log_sense *scsi_cmd; u_int8_t cdb_len; scsi_cmd = (struct scsi_log_sense *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOG_SENSE; scsi_cmd->page = page_code | page; if (save_pages != 0) scsi_cmd->byte2 |= SLS_SP; if (ppc != 0) scsi_cmd->byte2 |= SLS_PPC; scsi_ulto2b(paramptr, scsi_cmd->paramptr); scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/param_buf, /*dxfer_len*/param_len, sense_len, cdb_len, timeout); } void scsi_log_select(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t page_code, int save_pages, int pc_reset, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_log_select *scsi_cmd; u_int8_t cdb_len; scsi_cmd = (struct scsi_log_select *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOG_SELECT; scsi_cmd->page = page_code & SLS_PAGE_CODE; if (save_pages != 0) scsi_cmd->byte2 |= SLS_SP; if (pc_reset != 0) scsi_cmd->byte2 |= SLS_PCR; scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/param_buf, /*dxfer_len*/param_len, sense_len, cdb_len, timeout); } /* * Prevent or allow the user to remove the media */ void scsi_prevent(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_prevent *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_prevent *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = PREVENT_ALLOW; scsi_cmd->how = action; } /* XXX allow specification of address and PMI bit and LBA */ void scsi_read_capacity(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, struct scsi_read_capacity_data *rcap_buf, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_capacity *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)rcap_buf, /*dxfer_len*/sizeof(*rcap_buf), sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_read_capacity *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_CAPACITY; } void scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint64_t lba, int reladr, int pmi, uint8_t *rcap_buf, int rcap_buf_len, uint8_t sense_len, uint32_t timeout) { struct scsi_read_capacity_16 *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)rcap_buf, /*dxfer_len*/rcap_buf_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SERVICE_ACTION_IN; scsi_cmd->service_action = SRC16_SERVICE_ACTION; scsi_u64to8b(lba, scsi_cmd->addr); scsi_ulto4b(rcap_buf_len, scsi_cmd->alloc_len); if (pmi) reladr |= SRC16_PMI; if (reladr) reladr |= SRC16_RELADR; } void scsi_report_luns(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t select_report, struct scsi_report_luns_data *rpl_buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_report_luns *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)rpl_buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_report_luns *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_LUNS; scsi_cmd->select_report = select_report; scsi_ulto4b(alloc_len, scsi_cmd->length); } void scsi_report_target_group(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t pdf, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_target_group *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_target_group *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_IN; scsi_cmd->service_action = REPORT_TARGET_PORT_GROUPS | pdf; scsi_ulto4b(alloc_len, scsi_cmd->length); } void scsi_report_timestamp(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t pdf, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_timestamp *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_timestamp *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_IN; scsi_cmd->service_action = REPORT_TIMESTAMP | pdf; scsi_ulto4b(alloc_len, scsi_cmd->length); } void scsi_set_target_group(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_target_group *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/(u_int8_t *)buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_target_group *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_OUT; scsi_cmd->service_action = SET_TARGET_PORT_GROUPS; scsi_ulto4b(alloc_len, scsi_cmd->length); } void scsi_create_timestamp(uint8_t *timestamp_6b_buf, uint64_t timestamp) { uint8_t buf[8]; scsi_u64to8b(timestamp, buf); /* * Using memcopy starting at buf[2] because the set timestamp parameters * only has six bytes for the timestamp to fit into, and we don't have a * scsi_u64to6b function. */ memcpy(timestamp_6b_buf, &buf[2], 6); } void scsi_set_timestamp(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_timestamp *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/(u_int8_t *) buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_timestamp *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_OUT; scsi_cmd->service_action = SET_TIMESTAMP; scsi_ulto4b(alloc_len, scsi_cmd->length); } /* * Syncronize the media to the contents of the cache for * the given lba/count pair. Specifying 0/0 means sync * the whole cache. */ void scsi_synchronize_cache(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t begin_lba, u_int16_t lb_count, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sync_cache *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_sync_cache *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SYNCHRONIZE_CACHE; scsi_ulto4b(begin_lba, scsi_cmd->begin_lba); scsi_ulto2b(lb_count, scsi_cmd->lb_count); } void scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int readop, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { int read; u_int8_t cdb_len; read = (readop & SCSI_RW_DIRMASK) == SCSI_RW_READ; /* * Use the smallest possible command to perform the operation * as some legacy hardware does not support the 10 byte commands. * If any of the bits in byte2 is set, we have to go with a larger * command. */ if ((minimum_cmd_size < 10) && ((lba & 0x1fffff) == lba) && ((block_count & 0xff) == block_count) && (byte2 == 0)) { /* * We can fit in a 6 byte cdb. */ struct scsi_rw_6 *scsi_cmd; scsi_cmd = (struct scsi_rw_6 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_6 : WRITE_6; scsi_ulto3b(lba, scsi_cmd->addr); scsi_cmd->length = block_count & 0xff; scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("6byte: %x%x%x:%d:%d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->length, dxfer_len)); } else if ((minimum_cmd_size < 12) && ((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* * Need a 10 byte cdb. */ struct scsi_rw_10 *scsi_cmd; scsi_cmd = (struct scsi_rw_10 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_10 : WRITE_10; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto2b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("10byte: %x%x%x%x:%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->length[0], scsi_cmd->length[1], dxfer_len)); } else if ((minimum_cmd_size < 16) && ((block_count & 0xffffffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* * The block count is too big for a 10 byte CDB, use a 12 * byte CDB. */ struct scsi_rw_12 *scsi_cmd; scsi_cmd = (struct scsi_rw_12 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_12 : WRITE_12; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("12byte: %x%x%x%x:%x%x%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->length[0], scsi_cmd->length[1], scsi_cmd->length[2], scsi_cmd->length[3], dxfer_len)); } else { /* * 16 byte CDB. We'll only get here if the LBA is larger * than 2^32, or if the user asks for a 16 byte command. */ struct scsi_rw_16 *scsi_cmd; scsi_cmd = (struct scsi_rw_16 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_16 : WRITE_16; scsi_cmd->byte2 = byte2; scsi_u64to8b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, retries, cbfcnp, (read ? CAM_DIR_IN : CAM_DIR_OUT) | ((readop & SCSI_RW_BIO) != 0 ? CAM_DATA_BIO : 0), tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_write_same(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { u_int8_t cdb_len; if ((minimum_cmd_size < 16) && ((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* * Need a 10 byte cdb. */ struct scsi_write_same_10 *scsi_cmd; scsi_cmd = (struct scsi_write_same_10 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = WRITE_SAME_10; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->group = 0; scsi_ulto2b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("10byte: %x%x%x%x:%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->length[0], scsi_cmd->length[1], dxfer_len)); } else { /* * 16 byte CDB. We'll only get here if the LBA is larger * than 2^32, or if the user asks for a 16 byte command. */ struct scsi_write_same_16 *scsi_cmd; scsi_cmd = (struct scsi_write_same_16 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = WRITE_SAME_16; scsi_cmd->byte2 = byte2; scsi_u64to8b(lba, scsi_cmd->addr); scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->group = 0; scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("16byte: %x%x%x%x%x%x%x%x:%x%x%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->addr[4], scsi_cmd->addr[5], scsi_cmd->addr[6], scsi_cmd->addr[7], scsi_cmd->length[0], scsi_cmd->length[1], scsi_cmd->length[2], scsi_cmd->length[3], dxfer_len)); } cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_ata_identify(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { scsi_ata_pass(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*protocol*/AP_PROTO_PIO_IN, /*ata_flags*/AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BYTES | AP_FLAG_TLEN_SECT_CNT, /*features*/0, /*sector_count*/dxfer_len, /*lba*/0, /*command*/ATA_ATA_IDENTIFY, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ 0, /*control*/0, data_ptr, dxfer_len, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*minimum_cmd_size*/ 0, sense_len, timeout); } void scsi_ata_trim(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int16_t block_count, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { scsi_ata_pass_16(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*protocol*/AP_EXTEND|AP_PROTO_DMA, /*ata_flags*/AP_FLAG_TLEN_SECT_CNT|AP_FLAG_BYT_BLOK_BLOCKS, /*features*/ATA_DSM_TRIM, /*sector_count*/block_count, /*lba*/0, /*command*/ATA_DATA_SET_MANAGEMENT, /*control*/0, data_ptr, dxfer_len, sense_len, timeout); } int scsi_ata_read_log(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint32_t log_address, uint32_t page_number, uint16_t block_count, uint8_t protocol, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { uint8_t command, protocol_out; uint16_t count_out; uint64_t lba; int retval; retval = 0; switch (protocol) { case AP_PROTO_DMA: count_out = block_count; command = ATA_READ_LOG_DMA_EXT; protocol_out = AP_PROTO_DMA; break; case AP_PROTO_PIO_IN: default: count_out = block_count; command = ATA_READ_LOG_EXT; protocol_out = AP_PROTO_PIO_IN; break; } lba = (((uint64_t)page_number & 0xff00) << 32) | ((page_number & 0x00ff) << 8) | (log_address & 0xff); protocol_out |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*protocol*/ protocol_out, /*ata_flags*/AP_FLAG_TLEN_SECT_CNT | AP_FLAG_BYT_BLOK_BLOCKS | AP_FLAG_TDIR_FROM_DEV, /*feature*/ 0, /*sector_count*/ count_out, /*lba*/ lba, /*command*/ command, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ 0, /*control*/0, data_ptr, dxfer_len, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*minimum_cmd_size*/ 0, sense_len, timeout); return (retval); } /* * Note! This is an unusual CDB building function because it can return * an error in the event that the command in question requires a variable * length CDB, but the caller has not given storage space for one or has not * given enough storage space. If there is enough space available in the * standard SCSI CCB CDB bytes, we'll prefer that over passed in storage. */ int scsi_ata_pass(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t tag_action, uint8_t protocol, uint8_t ata_flags, uint16_t features, uint16_t sector_count, uint64_t lba, uint8_t command, uint8_t device, uint8_t icc, uint32_t auxiliary, uint8_t control, u_int8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout) { uint32_t cam_flags; uint8_t *cdb_ptr; int cmd_size; int retval; uint8_t cdb_len; retval = 0; cam_flags = flags; /* * Round the user's request to the nearest command size that is at * least as big as what he requested. */ if (minimum_cmd_size <= 12) cmd_size = 12; else if (minimum_cmd_size > 16) cmd_size = 32; else cmd_size = 16; /* * If we have parameters that require a 48-bit ATA command, we have to * use the 16 byte ATA PASS-THROUGH command at least. */ if (((lba > ATA_MAX_28BIT_LBA) || (sector_count > 255) || (features > 255) || (protocol & AP_EXTEND)) && ((cmd_size < 16) || ((protocol & AP_EXTEND) == 0))) { if (cmd_size < 16) cmd_size = 16; protocol |= AP_EXTEND; } /* * The icc and auxiliary ATA registers are only supported in the * 32-byte version of the ATA PASS-THROUGH command. */ if ((icc != 0) || (auxiliary != 0)) { cmd_size = 32; protocol |= AP_EXTEND; } if ((cmd_size > sizeof(csio->cdb_io.cdb_bytes)) && ((cdb_storage == NULL) || (cdb_storage_len < cmd_size))) { retval = 1; goto bailout; } /* * At this point we know we have enough space to store the command * in one place or another. We prefer the built-in array, but used * the passed in storage if necessary. */ if (cmd_size <= sizeof(csio->cdb_io.cdb_bytes)) cdb_ptr = csio->cdb_io.cdb_bytes; else { cdb_ptr = cdb_storage; cam_flags |= CAM_CDB_POINTER; } if (cmd_size <= 12) { struct ata_pass_12 *cdb; cdb = (struct ata_pass_12 *)cdb_ptr; cdb_len = sizeof(*cdb); bzero(cdb, cdb_len); cdb->opcode = ATA_PASS_12; cdb->protocol = protocol; cdb->flags = ata_flags; cdb->features = features; cdb->sector_count = sector_count; cdb->lba_low = lba & 0xff; cdb->lba_mid = (lba >> 8) & 0xff; cdb->lba_high = (lba >> 16) & 0xff; cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA; cdb->command = command; cdb->control = control; } else if (cmd_size <= 16) { struct ata_pass_16 *cdb; cdb = (struct ata_pass_16 *)cdb_ptr; cdb_len = sizeof(*cdb); bzero(cdb, cdb_len); cdb->opcode = ATA_PASS_16; cdb->protocol = protocol; cdb->flags = ata_flags; cdb->features = features & 0xff; cdb->sector_count = sector_count & 0xff; cdb->lba_low = lba & 0xff; cdb->lba_mid = (lba >> 8) & 0xff; cdb->lba_high = (lba >> 16) & 0xff; /* * If AP_EXTEND is set, we're sending a 48-bit command. * Otherwise it's a 28-bit command. */ if (protocol & AP_EXTEND) { cdb->lba_low_ext = (lba >> 24) & 0xff; cdb->lba_mid_ext = (lba >> 32) & 0xff; cdb->lba_high_ext = (lba >> 40) & 0xff; cdb->features_ext = (features >> 8) & 0xff; cdb->sector_count_ext = (sector_count >> 8) & 0xff; cdb->device = device | ATA_DEV_LBA; } else { cdb->lba_low_ext = (lba >> 24) & 0xf; cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA; } cdb->command = command; cdb->control = control; } else { struct ata_pass_32 *cdb; uint8_t tmp_lba[8]; cdb = (struct ata_pass_32 *)cdb_ptr; cdb_len = sizeof(*cdb); bzero(cdb, cdb_len); cdb->opcode = VARIABLE_LEN_CDB; cdb->control = control; cdb->length = sizeof(*cdb) - __offsetof(struct ata_pass_32, service_action); scsi_ulto2b(ATA_PASS_32_SA, cdb->service_action); cdb->protocol = protocol; cdb->flags = ata_flags; if ((protocol & AP_EXTEND) == 0) { lba &= 0x0fffffff; cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA; features &= 0xff; sector_count &= 0xff; } else { cdb->device = device | ATA_DEV_LBA; } scsi_u64to8b(lba, tmp_lba); bcopy(&tmp_lba[2], cdb->lba, sizeof(cdb->lba)); scsi_ulto2b(features, cdb->features); scsi_ulto2b(sector_count, cdb->count); cdb->command = command; cdb->icc = icc; scsi_ulto4b(auxiliary, cdb->auxiliary); } cam_fill_csio(csio, retries, cbfcnp, cam_flags, tag_action, data_ptr, dxfer_len, sense_len, cmd_size, timeout); bailout: return (retval); } void scsi_ata_pass_16(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t protocol, u_int8_t ata_flags, u_int16_t features, u_int16_t sector_count, uint64_t lba, u_int8_t command, u_int8_t control, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct ata_pass_16 *ata_cmd; ata_cmd = (struct ata_pass_16 *)&csio->cdb_io.cdb_bytes; ata_cmd->opcode = ATA_PASS_16; ata_cmd->protocol = protocol; ata_cmd->flags = ata_flags; ata_cmd->features_ext = features >> 8; ata_cmd->features = features; ata_cmd->sector_count_ext = sector_count >> 8; ata_cmd->sector_count = sector_count; ata_cmd->lba_low = lba; ata_cmd->lba_mid = lba >> 8; ata_cmd->lba_high = lba >> 16; ata_cmd->device = ATA_DEV_LBA; if (protocol & AP_EXTEND) { ata_cmd->lba_low_ext = lba >> 24; ata_cmd->lba_mid_ext = lba >> 32; ata_cmd->lba_high_ext = lba >> 40; } else ata_cmd->device |= (lba >> 24) & 0x0f; ata_cmd->command = command; ata_cmd->control = control; cam_fill_csio(csio, retries, cbfcnp, flags, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*ata_cmd), timeout); } void scsi_unmap(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_unmap *scsi_cmd; scsi_cmd = (struct scsi_unmap *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = UNMAP; scsi_cmd->byte2 = byte2; scsi_ulto4b(0, scsi_cmd->reserved); scsi_cmd->group = 0; scsi_ulto2b(dxfer_len, scsi_cmd->length); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_receive_diagnostic_results(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int pcv, uint8_t page_code, uint8_t *data_ptr, uint16_t allocation_length, uint8_t sense_len, uint32_t timeout) { struct scsi_receive_diag *scsi_cmd; scsi_cmd = (struct scsi_receive_diag *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = RECEIVE_DIAGNOSTIC; if (pcv) { scsi_cmd->byte2 |= SRD_PCV; scsi_cmd->page_code = page_code; } scsi_ulto2b(allocation_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, allocation_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_diagnostic(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int unit_offline, int device_offline, int self_test, int page_format, int self_test_code, uint8_t *data_ptr, uint16_t param_list_length, uint8_t sense_len, uint32_t timeout) { struct scsi_send_diag *scsi_cmd; scsi_cmd = (struct scsi_send_diag *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_DIAGNOSTIC; /* * The default self-test mode control and specific test * control are mutually exclusive. */ if (self_test) self_test_code = SSD_SELF_TEST_CODE_NONE; scsi_cmd->byte2 = ((self_test_code << SSD_SELF_TEST_CODE_SHIFT) & SSD_SELF_TEST_CODE_MASK) | (unit_offline ? SSD_UNITOFFL : 0) | (device_offline ? SSD_DEVOFFL : 0) | (self_test ? SSD_SELFTEST : 0) | (page_format ? SSD_PF : 0); scsi_ulto2b(param_list_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/param_list_length ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, param_list_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_buffer(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int mode, uint8_t buffer_id, u_int32_t offset, uint8_t *data_ptr, uint32_t allocation_length, uint8_t sense_len, uint32_t timeout) { struct scsi_read_buffer *scsi_cmd; scsi_cmd = (struct scsi_read_buffer *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_BUFFER; scsi_cmd->byte2 = mode; scsi_cmd->buffer_id = buffer_id; scsi_ulto3b(offset, scsi_cmd->offset); scsi_ulto3b(allocation_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, allocation_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_write_buffer(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int mode, uint8_t buffer_id, u_int32_t offset, uint8_t *data_ptr, uint32_t param_list_length, uint8_t sense_len, uint32_t timeout) { struct scsi_write_buffer *scsi_cmd; scsi_cmd = (struct scsi_write_buffer *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = WRITE_BUFFER; scsi_cmd->byte2 = mode; scsi_cmd->buffer_id = buffer_id; scsi_ulto3b(offset, scsi_cmd->offset); scsi_ulto3b(param_list_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/param_list_length ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, param_list_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int start, int load_eject, int immediate, u_int8_t sense_len, u_int32_t timeout) { struct scsi_start_stop_unit *scsi_cmd; int extra_flags = 0; scsi_cmd = (struct scsi_start_stop_unit *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = START_STOP_UNIT; if (start != 0) { scsi_cmd->how |= SSS_START; /* it takes a lot of power to start a drive */ extra_flags |= CAM_HIGH_POWER; } if (load_eject != 0) scsi_cmd->how |= SSS_LOEJ; if (immediate != 0) scsi_cmd->byte2 |= SSS_IMMED; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE | extra_flags, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_attribute(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t service_action, uint32_t element, u_int8_t elem_type, int logical_volume, int partition, u_int32_t first_attribute, int cache, u_int8_t *data_ptr, u_int32_t length, int sense_len, u_int32_t timeout) { struct scsi_read_attribute *scsi_cmd; scsi_cmd = (struct scsi_read_attribute *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_ATTRIBUTE; scsi_cmd->service_action = service_action; scsi_ulto2b(element, scsi_cmd->element); scsi_cmd->elem_type = elem_type; scsi_cmd->logical_volume = logical_volume; scsi_cmd->partition = partition; scsi_ulto2b(first_attribute, scsi_cmd->first_attribute); scsi_ulto4b(length, scsi_cmd->length); if (cache != 0) scsi_cmd->cache |= SRA_CACHE; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_write_attribute(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, uint32_t element, int logical_volume, int partition, int wtc, u_int8_t *data_ptr, u_int32_t length, int sense_len, u_int32_t timeout) { struct scsi_write_attribute *scsi_cmd; scsi_cmd = (struct scsi_write_attribute *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = WRITE_ATTRIBUTE; if (wtc != 0) scsi_cmd->byte2 = SWA_WTC; scsi_ulto3b(element, scsi_cmd->element); scsi_cmd->logical_volume = logical_volume; scsi_cmd->partition = partition; scsi_ulto4b(length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_persistent_reserve_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int service_action, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_per_res_in *scsi_cmd; scsi_cmd = (struct scsi_per_res_in *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = PERSISTENT_RES_IN; scsi_cmd->action = service_action; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_persistent_reserve_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int service_action, int scope, int res_type, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_per_res_out *scsi_cmd; scsi_cmd = (struct scsi_per_res_out *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = PERSISTENT_RES_OUT; scsi_cmd->action = service_action; scsi_cmd->scope_type = scope | res_type; scsi_ulto4b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_security_protocol_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint32_t security_protocol, uint32_t security_protocol_specific, int byte4, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_security_protocol_in *scsi_cmd; scsi_cmd = (struct scsi_security_protocol_in *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SECURITY_PROTOCOL_IN; scsi_cmd->security_protocol = security_protocol; scsi_ulto2b(security_protocol_specific, scsi_cmd->security_protocol_specific); scsi_cmd->byte4 = byte4; scsi_ulto4b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_security_protocol_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint32_t security_protocol, uint32_t security_protocol_specific, int byte4, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_security_protocol_out *scsi_cmd; scsi_cmd = (struct scsi_security_protocol_out *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SECURITY_PROTOCOL_OUT; scsi_cmd->security_protocol = security_protocol; scsi_ulto2b(security_protocol_specific, scsi_cmd->security_protocol_specific); scsi_cmd->byte4 = byte4; scsi_ulto4b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_report_supported_opcodes(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int options, int req_opcode, int req_service_action, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_report_supported_opcodes *scsi_cmd; scsi_cmd = (struct scsi_report_supported_opcodes *) &csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_IN; scsi_cmd->service_action = REPORT_SUPPORTED_OPERATION_CODES; scsi_cmd->options = options; scsi_cmd->requested_opcode = req_opcode; scsi_ulto2b(req_service_action, scsi_cmd->requested_service_action); scsi_ulto4b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } /* * Try make as good a match as possible with * available sub drivers */ int scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry) { struct scsi_inquiry_pattern *entry; struct scsi_inquiry_data *inq; entry = (struct scsi_inquiry_pattern *)table_entry; inq = (struct scsi_inquiry_data *)inqbuffer; if (((SID_TYPE(inq) == entry->type) || (entry->type == T_ANY)) && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE : entry->media_type & SIP_MEDIA_FIXED) && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0) && (cam_strmatch(inq->product, entry->product, sizeof(inq->product)) == 0) && (cam_strmatch(inq->revision, entry->revision, sizeof(inq->revision)) == 0)) { return (0); } return (-1); } /* * Try make as good a match as possible with * available sub drivers */ int scsi_static_inquiry_match(caddr_t inqbuffer, caddr_t table_entry) { struct scsi_static_inquiry_pattern *entry; struct scsi_inquiry_data *inq; entry = (struct scsi_static_inquiry_pattern *)table_entry; inq = (struct scsi_inquiry_data *)inqbuffer; if (((SID_TYPE(inq) == entry->type) || (entry->type == T_ANY)) && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE : entry->media_type & SIP_MEDIA_FIXED) && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0) && (cam_strmatch(inq->product, entry->product, sizeof(inq->product)) == 0) && (cam_strmatch(inq->revision, entry->revision, sizeof(inq->revision)) == 0)) { return (0); } return (-1); } /** * Compare two buffers of vpd device descriptors for a match. * * \param lhs Pointer to first buffer of descriptors to compare. * \param lhs_len The length of the first buffer. * \param rhs Pointer to second buffer of descriptors to compare. * \param rhs_len The length of the second buffer. * * \return 0 on a match, -1 otherwise. * * Treat rhs and lhs as arrays of vpd device id descriptors. Walk lhs matching * against each element in rhs until all data are exhausted or we have found * a match. */ int scsi_devid_match(uint8_t *lhs, size_t lhs_len, uint8_t *rhs, size_t rhs_len) { struct scsi_vpd_id_descriptor *lhs_id; struct scsi_vpd_id_descriptor *lhs_last; struct scsi_vpd_id_descriptor *rhs_last; uint8_t *lhs_end; uint8_t *rhs_end; lhs_end = lhs + lhs_len; rhs_end = rhs + rhs_len; /* * rhs_last and lhs_last are the last posible position of a valid * descriptor assuming it had a zero length identifier. We use * these variables to insure we can safely dereference the length * field in our loop termination tests. */ lhs_last = (struct scsi_vpd_id_descriptor *) (lhs_end - __offsetof(struct scsi_vpd_id_descriptor, identifier)); rhs_last = (struct scsi_vpd_id_descriptor *) (rhs_end - __offsetof(struct scsi_vpd_id_descriptor, identifier)); lhs_id = (struct scsi_vpd_id_descriptor *)lhs; while (lhs_id <= lhs_last && (lhs_id->identifier + lhs_id->length) <= lhs_end) { struct scsi_vpd_id_descriptor *rhs_id; rhs_id = (struct scsi_vpd_id_descriptor *)rhs; while (rhs_id <= rhs_last && (rhs_id->identifier + rhs_id->length) <= rhs_end) { if ((rhs_id->id_type & (SVPD_ID_ASSOC_MASK | SVPD_ID_TYPE_MASK)) == (lhs_id->id_type & (SVPD_ID_ASSOC_MASK | SVPD_ID_TYPE_MASK)) && rhs_id->length == lhs_id->length && memcmp(rhs_id->identifier, lhs_id->identifier, rhs_id->length) == 0) return (0); rhs_id = (struct scsi_vpd_id_descriptor *) (rhs_id->identifier + rhs_id->length); } lhs_id = (struct scsi_vpd_id_descriptor *) (lhs_id->identifier + lhs_id->length); } return (-1); } #ifdef _KERNEL int scsi_vpd_supported_page(struct cam_periph *periph, uint8_t page_id) { struct cam_ed *device; struct scsi_vpd_supported_pages *vpds; int i, num_pages; device = periph->path->device; vpds = (struct scsi_vpd_supported_pages *)device->supported_vpds; if (vpds != NULL) { num_pages = device->supported_vpds_len - SVPD_SUPPORTED_PAGES_HDR_LEN; for (i = 0; i < num_pages; i++) { if (vpds->page_list[i] == page_id) return (1); } } return (0); } static void init_scsi_delay(void) { int delay; delay = SCSI_DELAY; TUNABLE_INT_FETCH("kern.cam.scsi_delay", &delay); if (set_scsi_delay(delay) != 0) { printf("cam: invalid value for tunable kern.cam.scsi_delay\n"); set_scsi_delay(SCSI_DELAY); } } SYSINIT(scsi_delay, SI_SUB_TUNABLES, SI_ORDER_ANY, init_scsi_delay, NULL); static int sysctl_scsi_delay(SYSCTL_HANDLER_ARGS) { int error, delay; delay = scsi_delay; error = sysctl_handle_int(oidp, &delay, 0, req); if (error != 0 || req->newptr == NULL) return (error); return (set_scsi_delay(delay)); } SYSCTL_PROC(_kern_cam, OID_AUTO, scsi_delay, CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_scsi_delay, "I", "Delay to allow devices to settle after a SCSI bus reset (ms)"); static int set_scsi_delay(int delay) { /* * If someone sets this to 0, we assume that they want the * minimum allowable bus settle delay. */ if (delay == 0) { printf("cam: using minimum scsi_delay (%dms)\n", SCSI_MIN_DELAY); delay = SCSI_MIN_DELAY; } if (delay < SCSI_MIN_DELAY) return (EINVAL); scsi_delay = delay; return (0); } #endif /* _KERNEL */ Index: head/sys/cam/scsi/scsi_cd.c =================================================================== --- head/sys/cam/scsi/scsi_cd.c (revision 326264) +++ head/sys/cam/scsi/scsi_cd.c (revision 326265) @@ -1,3715 +1,3717 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Portions of this driver taken from the original FreeBSD cd driver. * Written by Julian Elischer (julian@tfs.com) * for TRW Financial Systems for use under the MACH(2.5) operating system. * * TRW Financial Systems, in accordance with their agreement with Carnegie * Mellon University, makes this software available to CMU to distribute * or use in any manner that they see fit as long as this message is kept with * the software. For this reason TFS also grants any other persons or * organisations permission to use or modify this software. * * TFS supplies this software to be publicly redistributed * on the understanding that TFS is not responsible for the correct * functioning of this software in any circumstances. * * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 * * from: cd.c,v 1.83 1997/05/04 15:24:22 joerg Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_cd.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LEADOUT 0xaa /* leadout toc entry */ struct cd_params { u_int32_t blksize; u_long disksize; }; typedef enum { CD_Q_NONE = 0x00, CD_Q_NO_TOUCH = 0x01, CD_Q_BCD_TRACKS = 0x02, CD_Q_10_BYTE_ONLY = 0x10, CD_Q_RETRY_BUSY = 0x40 } cd_quirks; #define CD_Q_BIT_STRING \ "\020" \ "\001NO_TOUCH" \ "\002BCD_TRACKS" \ "\00510_BYTE_ONLY" \ "\007RETRY_BUSY" typedef enum { CD_FLAG_INVALID = 0x0001, CD_FLAG_NEW_DISC = 0x0002, CD_FLAG_DISC_LOCKED = 0x0004, CD_FLAG_DISC_REMOVABLE = 0x0008, CD_FLAG_SAW_MEDIA = 0x0010, CD_FLAG_ACTIVE = 0x0080, CD_FLAG_SCHED_ON_COMP = 0x0100, CD_FLAG_RETRY_UA = 0x0200, CD_FLAG_VALID_MEDIA = 0x0400, CD_FLAG_VALID_TOC = 0x0800, CD_FLAG_SCTX_INIT = 0x1000 } cd_flags; typedef enum { CD_CCB_PROBE = 0x01, CD_CCB_BUFFER_IO = 0x02, CD_CCB_TUR = 0x04, CD_CCB_TYPE_MASK = 0x0F, CD_CCB_RETRY_UA = 0x10 } cd_ccb_state; #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct cd_tocdata { struct ioc_toc_header header; struct cd_toc_entry entries[100]; }; struct cd_toc_single { struct ioc_toc_header header; struct cd_toc_entry entry; }; typedef enum { CD_STATE_PROBE, CD_STATE_NORMAL } cd_state; struct cd_softc { cam_pinfo pinfo; cd_state state; volatile cd_flags flags; struct bio_queue_head bio_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; struct cd_params params; union ccb saved_ccb; cd_quirks quirks; struct cam_periph *periph; int minimum_command_size; int outstanding_cmds; int tur; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; STAILQ_HEAD(, cd_mode_params) mode_queue; struct cd_tocdata toc; struct disk *disk; struct callout mediapoll_c; #define CD_ANNOUNCETMP_SZ 120 char announce_temp[CD_ANNOUNCETMP_SZ]; #define CD_ANNOUNCE_SZ 400 char announce_buf[CD_ANNOUNCE_SZ]; }; struct cd_page_sizes { int page; int page_size; }; static struct cd_page_sizes cd_page_size_table[] = { { AUDIO_PAGE, sizeof(struct cd_audio_page)} }; struct cd_quirk_entry { struct scsi_inquiry_pattern inq_pat; cd_quirks quirks; }; /* * NOTE ON 10_BYTE_ONLY quirks: Any 10_BYTE_ONLY quirks MUST be because * your device hangs when it gets a 10 byte command. Adding a quirk just * to get rid of the informative diagnostic message is not acceptable. All * 10_BYTE_ONLY quirks must be documented in full in a PR (which should be * referenced in a comment along with the quirk) , and must be approved by * ken@FreeBSD.org. Any quirks added that don't adhere to this policy may * be removed until the submitter can explain why they are needed. * 10_BYTE_ONLY quirks will be removed (as they will no longer be necessary) * when the CAM_NEW_TRAN_CODE work is done. */ static struct cd_quirk_entry cd_quirk_table[] = { { { T_CDROM, SIP_MEDIA_REMOVABLE, "CHINON", "CD-ROM CDS-535","*"}, /* quirks */ CD_Q_BCD_TRACKS }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. */ {T_CDROM, SIP_MEDIA_REMOVABLE, "NECVMWar", "VMware IDE CDR10", "*"}, /*quirks*/ CD_Q_RETRY_BUSY } }; static disk_open_t cdopen; static disk_close_t cdclose; static disk_ioctl_t cdioctl; static disk_strategy_t cdstrategy; static periph_init_t cdinit; static periph_ctor_t cdregister; static periph_dtor_t cdcleanup; static periph_start_t cdstart; static periph_oninv_t cdoninvalidate; static void cdasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static int cdcmdsizesysctl(SYSCTL_HANDLER_ARGS); static int cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags), u_int32_t cam_flags, u_int32_t sense_flags); static void cddone(struct cam_periph *periph, union ccb *start_ccb); static union cd_pages *cdgetpage(struct cd_mode_params *mode_params); static int cdgetpagesize(int page_num); static void cdprevent(struct cam_periph *periph, int action); static int cdcheckmedia(struct cam_periph *periph); static int cdsize(struct cam_periph *periph, u_int32_t *size); static int cd6byteworkaround(union ccb *ccb); static int cderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int cdreadtoc(struct cam_periph *periph, u_int32_t mode, u_int32_t start, u_int8_t *data, u_int32_t len, u_int32_t sense_flags); static int cdgetmode(struct cam_periph *periph, struct cd_mode_params *data, u_int32_t page); static int cdsetmode(struct cam_periph *periph, struct cd_mode_params *data); static int cdplay(struct cam_periph *periph, u_int32_t blk, u_int32_t len); static int cdreadsubchannel(struct cam_periph *periph, u_int32_t mode, u_int32_t format, int track, struct cd_sub_channel_info *data, u_int32_t len); static int cdplaymsf(struct cam_periph *periph, u_int32_t startm, u_int32_t starts, u_int32_t startf, u_int32_t endm, u_int32_t ends, u_int32_t endf); static int cdplaytracks(struct cam_periph *periph, u_int32_t strack, u_int32_t sindex, u_int32_t etrack, u_int32_t eindex); static int cdpause(struct cam_periph *periph, u_int32_t go); static int cdstopunit(struct cam_periph *periph, u_int32_t eject); static int cdstartunit(struct cam_periph *periph, int load); static int cdsetspeed(struct cam_periph *periph, u_int32_t rdspeed, u_int32_t wrspeed); static int cdreportkey(struct cam_periph *periph, struct dvd_authinfo *authinfo); static int cdsendkey(struct cam_periph *periph, struct dvd_authinfo *authinfo); static int cdreaddvdstructure(struct cam_periph *periph, struct dvd_struct *dvdstruct); static timeout_t cdmediapoll; static struct periph_driver cddriver = { cdinit, "cd", TAILQ_HEAD_INITIALIZER(cddriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(cd, cddriver); #ifndef CD_DEFAULT_POLL_PERIOD #define CD_DEFAULT_POLL_PERIOD 3 #endif #ifndef CD_DEFAULT_RETRY #define CD_DEFAULT_RETRY 4 #endif #ifndef CD_DEFAULT_TIMEOUT #define CD_DEFAULT_TIMEOUT 30000 #endif static int cd_poll_period = CD_DEFAULT_POLL_PERIOD; static int cd_retry_count = CD_DEFAULT_RETRY; static int cd_timeout = CD_DEFAULT_TIMEOUT; static SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD, 0, "CAM CDROM driver"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RWTUN, &cd_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, retry_count, CTLFLAG_RWTUN, &cd_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RWTUN, &cd_timeout, 0, "Timeout, in us, for read operations"); static MALLOC_DEFINE(M_SCSICD, "scsi_cd", "scsi_cd buffers"); static void cdinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, cdasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("cd: Failed to attach master async callback " "due to status 0x%x!\n", status); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void cddiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void cdoninvalidate(struct cam_periph *periph) { struct cd_softc *softc; softc = (struct cd_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, cdasync, periph, periph->path); softc->flags |= CD_FLAG_INVALID; /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ bioq_flush(&softc->bio_queue, NULL, ENXIO); disk_gone(softc->disk); } static void cdcleanup(struct cam_periph *periph) { struct cd_softc *softc; softc = (struct cd_softc *)periph->softc; cam_periph_unlock(periph); if ((softc->flags & CD_FLAG_SCTX_INIT) != 0 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void cdasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_CDROM && SID_TYPE(&cgd->inq_data) != T_WORM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(cdregister, cdoninvalidate, cdcleanup, cdstart, "cd", CAM_PERIPH_BIO, path, cdasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("cdasync: Unable to attach new device " "due to status 0x%x\n", status); break; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct cd_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all media change UNIT ATTENTIONs except * our own, as they will be handled by cderror(). */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x28 && ascq == 0x00) disk_media_changed(softc->disk, M_NOWAIT); } cam_periph_async(periph, code, path, arg); break; } case AC_SCSI_AEN: softc = (struct cd_softc *)periph->softc; if (softc->state == CD_STATE_NORMAL && !softc->tur) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { softc->tur = 1; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* FALLTHROUGH */ case AC_SENT_BDR: case AC_BUS_RESET: { struct ccb_hdr *ccbh; softc = (struct cd_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= CD_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= CD_CCB_RETRY_UA; /* FALLTHROUGH */ } default: cam_periph_async(periph, code, path, arg); break; } } static void cdsysctlinit(void *context, int pending) { struct cam_periph *periph; struct cd_softc *softc; char tmpstr[80], tmpstr2[80]; periph = (struct cam_periph *)context; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return; softc = (struct cd_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM CD unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= CD_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_cd), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("cdsysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } /* * Now register the sysctl handler, so the user can the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, &softc->minimum_command_size, 0, cdcmdsizesysctl, "I", "Minimum CDB size"); cam_periph_release(periph); } /* * We have a handler function for this so we can check the values when the * user sets them, instead of every time we look at them. */ static int cdcmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * The only real values we can have here are 6 or 10. I don't * really forsee having 12 be an option at any time in the future. * So if the user sets something less than or equal to 6, we'll set * it to 6. If he sets something greater than 6, we'll set it to 10. * * I suppose we could just return an error here for the wrong values, * but I don't think it's necessary to do so, as long as we can * determine the user's intent without too much trouble. */ if (value < 6) value = 6; else if (value > 6) value = 10; *(int *)arg1 = value; return (0); } static cam_status cdregister(struct cam_periph *periph, void *arg) { struct cd_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("cdregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct cd_softc *)malloc(sizeof(*softc),M_DEVBUF, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("cdregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); STAILQ_INIT(&softc->mode_queue); softc->state = CD_STATE_PROBE; bioq_init(&softc->bio_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= CD_FLAG_DISC_REMOVABLE; periph->softc = softc; softc->periph = periph; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)cd_quirk_table, nitems(cd_quirk_table), sizeof(*cd_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct cd_quirk_entry *)match)->quirks; else softc->quirks = CD_Q_NONE; /* Check if the SIM does not want 6 byte commands */ bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= CD_Q_10_BYTE_ONLY; TASK_INIT(&softc->sysctl_task, 0, cdsysctlinit, periph); /* The default is 6 byte commands, unless quirked otherwise */ if (softc->quirks & CD_Q_10_BYTE_ONLY) softc->minimum_command_size = 10; else softc->minimum_command_size = 6; /* * Refcount and block open attempts until we are setup * Can't block */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.cd.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_command_size); /* 6 and 10 are the only permissible values here. */ if (softc->minimum_command_size < 6) softc->minimum_command_size = 6; else if (softc->minimum_command_size > 6) softc->minimum_command_size = 10; /* * We need to register the statistics structure for this device, * but we don't have the blocksize yet for it. So, we register * the structure and indicate that we don't have the blocksize * yet. Unlike other SCSI peripheral drivers, we explicitly set * the device type here to be CDROM, rather than just ORing in * the device type. This is because this driver can attach to either * CDROM or WORM devices, and we want this peripheral driver to * show up in the devstat list as a CD peripheral driver, not a * WORM peripheral driver. WORM drives will also have the WORM * driver attached to them. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry("cd", periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, DEVSTAT_TYPE_CDROM | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_CD); softc->disk->d_open = cdopen; softc->disk->d_close = cdclose; softc->disk->d_strategy = cdstrategy; softc->disk->d_gone = cddiskgonecb; softc->disk->d_ioctl = cdioctl; softc->disk->d_name = "cd"; cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_unit = periph->unit_number; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->disk->d_maxsize = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->disk->d_maxsize = MAXPHYS; /* for safety */ else softc->disk->d_maxsize = cpi.maxio; softc->disk->d_flags = 0; softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add an async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_SCSI_AEN | AC_UNIT_ATTENTION, cdasync, periph, periph->path); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & CD_FLAG_DISC_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && cd_poll_period != 0) callout_reset(&softc->mediapoll_c, cd_poll_period * hz, cdmediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int cdopen(struct disk *dp) { struct cam_periph *periph; struct cd_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct cd_softc *)periph->softc; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return(ENXIO); cam_periph_lock(periph); if (softc->flags & CD_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("cdopen\n")); /* * Check for media, and set the appropriate flags. We don't bail * if we don't have media, but then we don't allow anything but the * CDIOCEJECT/CDIOCCLOSE ioctls if there is no media. */ cdcheckmedia(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdopen\n")); cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int cdclose(struct disk *dp) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)dp->d_drv1; softc = (struct cd_softc *)periph->softc; cam_periph_lock(periph); if (cam_periph_hold(periph, PRIBIO) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (0); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("cdclose\n")); if ((softc->flags & CD_FLAG_DISC_REMOVABLE) != 0) cdprevent(periph, PR_ALLOW); /* * Since we're closing this CD, mark the blocksize as unavailable. * It will be marked as available when the CD is opened again. */ softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; /* * We'll check the media and toc again at the next open(). */ softc->flags &= ~(CD_FLAG_VALID_MEDIA|CD_FLAG_VALID_TOC); cam_periph_unhold(periph); cam_periph_release_locked(periph); cam_periph_unlock(periph); return (0); } static int cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags), u_int32_t cam_flags, u_int32_t sense_flags) { struct cd_softc *softc; struct cam_periph *periph; int error; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; error = cam_periph_runccb(ccb, error_routine, cam_flags, sense_flags, softc->disk->d_devstat); return(error); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void cdstrategy(struct bio *bp) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cdstrategy(%p)\n", bp)); softc = (struct cd_softc *)periph->softc; /* * If the device has been made invalid, error out */ if ((softc->flags & CD_FLAG_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * If we don't have valid media, look for it before trying to * schedule the I/O. */ if ((softc->flags & CD_FLAG_VALID_MEDIA) == 0) { int error; error = cdcheckmedia(periph); if (error != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, error); return; } } /* * Place it in the queue of disk activities for this disk */ bioq_disksort(&softc->bio_queue, bp); xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); return; } static void cdstart(struct cam_periph *periph, union ccb *start_ccb) { struct cd_softc *softc; struct bio *bp; struct ccb_scsiio *csio; struct scsi_read_capacity_data *rcap; softc = (struct cd_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdstart\n")); switch (softc->state) { case CD_STATE_NORMAL: { bp = bioq_first(&softc->bio_queue); if (bp == NULL) { if (softc->tur) { softc->tur = 0; csio = &start_ccb->csio; scsi_test_unit_ready(csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, cd_timeout); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); } else { if (softc->tur) { softc->tur = 0; cam_periph_release_locked(periph); } bioq_remove(&softc->bio_queue, bp); scsi_read_write(&start_ccb->csio, /*retries*/ cd_retry_count, /* cbfcnp */ cddone, MSG_SIMPLE_Q_TAG, /* read */bp->bio_cmd == BIO_READ ? SCSI_RW_READ : SCSI_RW_WRITE, /* byte2 */ 0, /* minimum_cmd_size */ 10, /* lba */ bp->bio_offset / softc->params.blksize, bp->bio_bcount / softc->params.blksize, /* data_ptr */ bp->bio_data, /* dxfer_len */ bp->bio_bcount, /* sense_len */ cd_retry_count ? SSD_FULL_SIZE : SF_NO_PRINT, /* timeout */ cd_timeout); /* Use READ CD command for audio tracks. */ if (softc->params.blksize == 2352) { start_ccb->csio.cdb_io.cdb_bytes[0] = READ_CD; start_ccb->csio.cdb_io.cdb_bytes[9] = 0xf8; start_ccb->csio.cdb_io.cdb_bytes[10] = 0; start_ccb->csio.cdb_io.cdb_bytes[11] = 0; start_ccb->csio.cdb_len = 12; } start_ccb->ccb_h.ccb_state = CD_CCB_BUFFER_IO; LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); softc->outstanding_cmds++; /* We expect a unit attention from this device */ if ((softc->flags & CD_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= CD_CCB_RETRY_UA; softc->flags &= ~CD_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); xpt_action(start_ccb); } if (bp != NULL || softc->tur) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); } break; } case CD_STATE_PROBE: { rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), M_SCSICD, M_NOWAIT | M_ZERO); if (rcap == NULL) { xpt_print(periph->path, "cdstart: Couldn't malloc read_capacity data\n"); /* cd_free_periph??? */ break; } csio = &start_ccb->csio; scsi_read_capacity(csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/20000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_PROBE; xpt_action(start_ccb); break; } } } static void cddone(struct cam_periph *periph, union ccb *done_ccb) { struct cd_softc *softc; struct ccb_scsiio *csio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cddone\n")); softc = (struct cd_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_state & CD_CCB_TYPE_MASK) { case CD_CCB_BUFFER_IO: { struct bio *bp; int error; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int sf; if ((done_ccb->ccb_h.ccb_state & CD_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = cderror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } } if (error != 0) { xpt_print(periph->path, "cddone: got error %#x back\n", error); bioq_flush(&softc->bio_queue, NULL, EIO); bp->bio_resid = bp->bio_bcount; bp->bio_error = error; bp->bio_flags |= BIO_ERROR; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) { /* * Short transfer ??? * XXX: not sure this is correct for partial * transfers at EOM */ bp->bio_flags |= BIO_ERROR; } } LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); softc->outstanding_cmds--; biofinish(bp, NULL, 0); break; } case CD_CCB_PROBE: { struct scsi_read_capacity_data *rdcap; char *announce_buf; struct cd_params *cdp; int error; cdp = &softc->params; announce_buf = softc->announce_temp; rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; cdp->disksize = scsi_4btoul (rdcap->addr) + 1; cdp->blksize = scsi_4btoul (rdcap->length); /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP || (error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT)) == 0) { snprintf(announce_buf, CD_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors)", ((uintmax_t)cdp->disksize * cdp->blksize) / (1024 * 1024), (uintmax_t)cdp->disksize, cdp->blksize); } else { if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); status = done_ccb->ccb_h.status; xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * Attach to anything that claims to be a * CDROM or WORM device, as long as it * doesn't return a "Logical unit not * supported" (0x25) error. */ if ((have_sense) && (asc != 0x25) && (error_code == SSD_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, sizeof(announce_buf), "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else if ((have_sense == 0) && ((status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) && (csio->scsi_status == SCSI_STATUS_BUSY)) { snprintf(announce_buf, sizeof(announce_buf), "Attempt to query device " "size failed: SCSI Status: %s", scsi_status_string(csio)); } else if (SID_TYPE(&cgd.inq_data) == T_CDROM) { /* * We only print out an error for * CDROM type devices. For WORM * devices, we don't print out an * error since a few WORM devices * don't support CDROM commands. * If we have sense information, go * ahead and print it out. * Otherwise, just say that we * couldn't attach. */ /* * Just print out the error, not * the full probe message, when we * don't attach. */ if (have_sense) scsi_sense_print( &done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); /* * Invalidate this peripheral. */ cam_periph_invalidate(periph); announce_buf = NULL; } else { /* * Invalidate this peripheral. */ cam_periph_invalidate(periph); announce_buf = NULL; } } } free(rdcap, M_SCSICD); if (announce_buf != NULL) { struct sbuf sb; sbuf_new(&sb, softc->announce_buf, CD_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, CD_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ taskqueue_enqueue(taskqueue_thread,&softc->sysctl_task); } softc->state = CD_STATE_NORMAL; /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); cam_periph_unhold(periph); return; } case CD_CCB_TUR: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } xpt_release_ccb(done_ccb); cam_periph_release_locked(periph); return; } default: break; } xpt_release_ccb(done_ccb); } static union cd_pages * cdgetpage(struct cd_mode_params *mode_params) { union cd_pages *page; if (mode_params->cdb_size == 10) page = (union cd_pages *)find_mode_page_10( (struct scsi_mode_header_10 *)mode_params->mode_buf); else page = (union cd_pages *)find_mode_page_6( (struct scsi_mode_header_6 *)mode_params->mode_buf); return (page); } static int cdgetpagesize(int page_num) { u_int i; for (i = 0; i < nitems(cd_page_size_table); i++) { if (cd_page_size_table[i].page == page_num) return (cd_page_size_table[i].page_size); } return (-1); } static int cdioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) { struct cam_periph *periph; struct cd_softc *softc; int nocopyout, error = 0; periph = (struct cam_periph *)dp->d_drv1; cam_periph_lock(periph); softc = (struct cd_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cdioctl(%#lx)\n", cmd)); if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } /* * If we don't have media loaded, check for it. If still don't * have media loaded, we can only do a load or eject. * * We only care whether media is loaded if this is a cd-specific ioctl * (thus the IOCGROUP check below). Note that this will break if * anyone adds any ioctls into the switch statement below that don't * have their ioctl group set to 'c'. */ if (((softc->flags & CD_FLAG_VALID_MEDIA) == 0) && ((cmd != CDIOCCLOSE) && (cmd != CDIOCEJECT)) && (IOCGROUP(cmd) == 'c')) { error = cdcheckmedia(periph); if (error != 0) { cam_periph_unhold(periph); cam_periph_unlock(periph); return (error); } } /* * Drop the lock here so later mallocs can use WAITOK. The periph * is essentially locked still with the cam_periph_hold call above. */ cam_periph_unlock(periph); nocopyout = 0; switch (cmd) { case CDIOCPLAYTRACKS: { struct ioc_play_track *args = (struct ioc_play_track *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYTRACKS\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } /* * This was originally implemented with the PLAY * AUDIO TRACK INDEX command, but that command was * deprecated after SCSI-2. Most (all?) SCSI CDROM * drives support it but ATAPI and ATAPI-derivative * drives don't seem to support it. So we keep a * cache of the table of contents and translate * track numbers to MSF format. */ if (softc->flags & CD_FLAG_VALID_TOC) { union msf_lba *sentry, *eentry; int st, et; if (args->end_track < softc->toc.header.ending_track + 1) args->end_track++; if (args->end_track > softc->toc.header.ending_track + 1) args->end_track = softc->toc.header.ending_track + 1; st = args->start_track - softc->toc.header.starting_track; et = args->end_track - softc->toc.header.starting_track; if ((st < 0) || (et < 0) || (st > (softc->toc.header.ending_track - softc->toc.header.starting_track))) { error = EINVAL; cam_periph_unlock(periph); break; } sentry = &softc->toc.entries[st].addr; eentry = &softc->toc.entries[et].addr; error = cdplaymsf(periph, sentry->msf.minute, sentry->msf.second, sentry->msf.frame, eentry->msf.minute, eentry->msf.second, eentry->msf.frame); } else { /* * If we don't have a valid TOC, try the * play track index command. It is part of * the SCSI-2 spec, but was removed in the * MMC specs. ATAPI and ATAPI-derived * drives don't support it. */ if (softc->quirks & CD_Q_BCD_TRACKS) { args->start_track = bin2bcd(args->start_track); args->end_track = bin2bcd(args->end_track); } error = cdplaytracks(periph, args->start_track, args->start_index, args->end_track, args->end_index); } cam_periph_unlock(periph); } break; case CDIOCPLAYMSF: { struct ioc_play_msf *args = (struct ioc_play_msf *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYMSF\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } error = cdplaymsf(periph, args->start_m, args->start_s, args->start_f, args->end_m, args->end_s, args->end_f); cam_periph_unlock(periph); } break; case CDIOCPLAYBLOCKS: { struct ioc_play_blocks *args = (struct ioc_play_blocks *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYBLOCKS\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } error = cdplay(periph, args->blk, args->len); cam_periph_unlock(periph); } break; case CDIOCREADSUBCHANNEL_SYSSPACE: nocopyout = 1; /* Fallthrough */ case CDIOCREADSUBCHANNEL: { struct ioc_read_subchannel *args = (struct ioc_read_subchannel *) addr; struct cd_sub_channel_info *data; u_int32_t len = args->data_len; data = malloc(sizeof(struct cd_sub_channel_info), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCREADSUBCHANNEL\n")); if ((len > sizeof(struct cd_sub_channel_info)) || (len < sizeof(struct cd_sub_channel_header))) { printf( "scsi_cd: cdioctl: " "cdioreadsubchannel: error, len=%d\n", len); error = EINVAL; free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) args->track = bin2bcd(args->track); error = cdreadsubchannel(periph, args->address_format, args->data_format, args->track, data, len); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) data->what.track_info.track_number = bcd2bin(data->what.track_info.track_number); len = min(len, ((data->header.data_len[0] << 8) + data->header.data_len[1] + sizeof(struct cd_sub_channel_header))); cam_periph_unlock(periph); if (nocopyout == 0) { if (copyout(data, args->data, len) != 0) { error = EFAULT; } } else { bcopy(data, args->data, len); } free(data, M_SCSICD); } break; case CDIOREADTOCHEADER: { struct ioc_toc_header *th; th = malloc(sizeof(struct ioc_toc_header), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCHEADER\n")); error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/SF_NO_PRINT); if (error) { free(th, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } th->len = ntohs(th->len); bcopy(th, addr, sizeof(*th)); free(th, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOREADTOCENTRYS: { struct cd_tocdata *data; struct cd_toc_single *lead; struct ioc_read_toc_entry *te = (struct ioc_read_toc_entry *) addr; struct ioc_toc_header *th; u_int32_t len, readlen, idx, num; u_int32_t starting_track = te->starting_track; data = malloc(sizeof(*data), M_SCSICD, M_WAITOK | M_ZERO); lead = malloc(sizeof(*lead), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCENTRYS\n")); if (te->data_len < sizeof(struct cd_toc_entry) || (te->data_len % sizeof(struct cd_toc_entry)) != 0 || (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT)) { error = EINVAL; printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } th = &data->header; error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } if (starting_track == 0) starting_track = th->starting_track; else if (starting_track == LEADOUT) starting_track = th->ending_track + 1; else if (starting_track < th->starting_track || starting_track > th->ending_track + 1) { printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); error = EINVAL; break; } /* calculate reading length without leadout entry */ readlen = (th->ending_track - starting_track + 1) * sizeof(struct cd_toc_entry); /* and with leadout entry */ len = readlen + sizeof(struct cd_toc_entry); if (te->data_len < len) { len = te->data_len; if (readlen > len) readlen = len; } if (len > sizeof(data->entries)) { printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); error = EINVAL; free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } num = len / sizeof(struct cd_toc_entry); if (readlen > 0) { error = cdreadtoc(periph, te->address_format, starting_track, (u_int8_t *)data, readlen + sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } } /* make leadout entry if needed */ idx = starting_track + num - 1; if (softc->quirks & CD_Q_BCD_TRACKS) th->ending_track = bcd2bin(th->ending_track); if (idx == th->ending_track + 1) { error = cdreadtoc(periph, te->address_format, LEADOUT, (u_int8_t *)lead, sizeof(*lead), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } data->entries[idx - starting_track] = lead->entry; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (idx = 0; idx < num - 1; idx++) { data->entries[idx].track = bcd2bin(data->entries[idx].track); } } cam_periph_unlock(periph); error = copyout(data->entries, te->data, len); free(data, M_SCSICD); free(lead, M_SCSICD); } break; case CDIOREADTOCENTRY: { struct cd_toc_single *data; struct ioc_read_toc_single_entry *te = (struct ioc_read_toc_single_entry *) addr; struct ioc_toc_header *th; u_int32_t track; data = malloc(sizeof(*data), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCENTRY\n")); if (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT) { printf("error in readtocentry, " " returning EINVAL\n"); free(data, M_SCSICD); error = EINVAL; cam_periph_unlock(periph); break; } th = &data->header; error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } track = te->track; if (track == 0) track = th->starting_track; else if (track == LEADOUT) /* OK */; else if (track < th->starting_track || track > th->ending_track + 1) { printf("error in readtocentry, " " returning EINVAL\n"); free(data, M_SCSICD); error = EINVAL; cam_periph_unlock(periph); break; } error = cdreadtoc(periph, te->address_format, track, (u_int8_t *)data, sizeof(*data), /*sense_flags*/0); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) data->entry.track = bcd2bin(data->entry.track); bcopy(&data->entry, &te->entry, sizeof(struct cd_toc_entry)); free(data, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETPATCH: { struct ioc_patch *arg = (struct ioc_patch *)addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETPATCH\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = arg->patch[0]; page->audio.port[RIGHT_PORT].channels = arg->patch[1]; page->audio.port[2].channels = arg->patch[2]; page->audio.port[3].channels = arg->patch[3]; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCGETVOL: { struct ioc_vol *arg = (struct ioc_vol *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCGETVOL\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); arg->vol[LEFT_PORT] = page->audio.port[LEFT_PORT].volume; arg->vol[RIGHT_PORT] = page->audio.port[RIGHT_PORT].volume; arg->vol[2] = page->audio.port[2].volume; arg->vol[3] = page->audio.port[3].volume; free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETVOL: { struct ioc_vol *arg = (struct ioc_vol *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETVOL\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = CHANNEL_0; page->audio.port[LEFT_PORT].volume = arg->vol[LEFT_PORT]; page->audio.port[RIGHT_PORT].channels = CHANNEL_1; page->audio.port[RIGHT_PORT].volume = arg->vol[RIGHT_PORT]; page->audio.port[2].volume = arg->vol[2]; page->audio.port[3].volume = arg->vol[3]; error = cdsetmode(periph, ¶ms); cam_periph_unlock(periph); free(params.mode_buf, M_SCSICD); } break; case CDIOCSETMONO: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETMONO\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL | RIGHT_CHANNEL; page->audio.port[RIGHT_PORT].channels = LEFT_CHANNEL | RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); cam_periph_unlock(periph); free(params.mode_buf, M_SCSICD); } break; case CDIOCSETSTEREO: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETSTEREO\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL; page->audio.port[RIGHT_PORT].channels = RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETMUTE: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETMUTE\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = 0; page->audio.port[RIGHT_PORT].channels = 0; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETLEFT: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETLEFT\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL; page->audio.port[RIGHT_PORT].channels = LEFT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETRIGHT: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETRIGHT\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = RIGHT_CHANNEL; page->audio.port[RIGHT_PORT].channels = RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCRESUME: cam_periph_lock(periph); error = cdpause(periph, 1); cam_periph_unlock(periph); break; case CDIOCPAUSE: cam_periph_lock(periph); error = cdpause(periph, 0); cam_periph_unlock(periph); break; case CDIOCSTART: cam_periph_lock(periph); error = cdstartunit(periph, 0); cam_periph_unlock(periph); break; case CDIOCCLOSE: cam_periph_lock(periph); error = cdstartunit(periph, 1); cam_periph_unlock(periph); break; case CDIOCSTOP: cam_periph_lock(periph); error = cdstopunit(periph, 0); cam_periph_unlock(periph); break; case CDIOCEJECT: cam_periph_lock(periph); error = cdstopunit(periph, 1); cam_periph_unlock(periph); break; case CDIOCALLOW: cam_periph_lock(periph); cdprevent(periph, PR_ALLOW); cam_periph_unlock(periph); break; case CDIOCPREVENT: cam_periph_lock(periph); cdprevent(periph, PR_PREVENT); cam_periph_unlock(periph); break; case CDIOCSETDEBUG: /* sc_link->flags |= (SDEV_DB1 | SDEV_DB2); */ error = ENOTTY; break; case CDIOCCLRDEBUG: /* sc_link->flags &= ~(SDEV_DB1 | SDEV_DB2); */ error = ENOTTY; break; case CDIOCRESET: /* return (cd_reset(periph)); */ error = ENOTTY; break; case CDRIOCREADSPEED: cam_periph_lock(periph); error = cdsetspeed(periph, *(u_int32_t *)addr, CDR_MAX_SPEED); cam_periph_unlock(periph); break; case CDRIOCWRITESPEED: cam_periph_lock(periph); error = cdsetspeed(periph, CDR_MAX_SPEED, *(u_int32_t *)addr); cam_periph_unlock(periph); break; case CDRIOCGETBLOCKSIZE: *(int *)addr = softc->params.blksize; break; case CDRIOCSETBLOCKSIZE: if (*(int *)addr <= 0) { error = EINVAL; break; } softc->disk->d_sectorsize = softc->params.blksize = *(int *)addr; break; case DVDIOCSENDKEY: case DVDIOCREPORTKEY: { struct dvd_authinfo *authinfo; authinfo = (struct dvd_authinfo *)addr; if (cmd == DVDIOCREPORTKEY) error = cdreportkey(periph, authinfo); else error = cdsendkey(periph, authinfo); break; } case DVDIOCREADSTRUCTURE: { struct dvd_struct *dvdstruct; dvdstruct = (struct dvd_struct *)addr; error = cdreaddvdstructure(periph, dvdstruct); break; } default: cam_periph_lock(periph); error = cam_periph_ioctl(periph, cmd, addr, cderror); cam_periph_unlock(periph); break; } cam_periph_lock(periph); cam_periph_unhold(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdioctl\n")); if (error && bootverbose) { printf("scsi_cd.c::ioctl cmd=%08lx error=%d\n", cmd, error); } cam_periph_unlock(periph); return (error); } static void cdprevent(struct cam_periph *periph, int action) { union ccb *ccb; struct cd_softc *softc; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdprevent\n")); softc = (struct cd_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & CD_FLAG_DISC_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & CD_FLAG_DISC_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, /* timeout */60000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT); xpt_release_ccb(ccb); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~CD_FLAG_DISC_LOCKED; else softc->flags |= CD_FLAG_DISC_LOCKED; } } /* * XXX: the disk media and sector size is only really able to change * XXX: while the device is closed. */ static int cdcheckmedia(struct cam_periph *periph) { struct cd_softc *softc; struct ioc_toc_header *toch; struct cd_toc_single leadout; u_int32_t size, toclen; int error, num_entries, cdindex; softc = (struct cd_softc *)periph->softc; cdprevent(periph, PR_PREVENT); softc->disk->d_sectorsize = 2048; softc->disk->d_mediasize = 0; /* * Get the disc size and block size. If we can't get it, we don't * have media, most likely. */ if ((error = cdsize(periph, &size)) != 0) { softc->flags &= ~(CD_FLAG_VALID_MEDIA|CD_FLAG_VALID_TOC); cdprevent(periph, PR_ALLOW); return (error); } else { softc->flags |= CD_FLAG_SAW_MEDIA | CD_FLAG_VALID_MEDIA; softc->disk->d_sectorsize = softc->params.blksize; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; } /* * Now we check the table of contents. This (currently) is only * used for the CDIOCPLAYTRACKS ioctl. It may be used later to do * things like present a separate entry in /dev for each track, * like that acd(4) driver does. */ bzero(&softc->toc, sizeof(softc->toc)); toch = &softc->toc.header; /* * We will get errors here for media that doesn't have a table of * contents. According to the MMC-3 spec: "When a Read TOC/PMA/ATIP * command is presented for a DDCD/CD-R/RW media, where the first TOC * has not been recorded (no complete session) and the Format codes * 0000b, 0001b, or 0010b are specified, this command shall be rejected * with an INVALID FIELD IN CDB. Devices that are not capable of * reading an incomplete session on DDC/CD-R/RW media shall report * CANNOT READ MEDIUM - INCOMPATIBLE FORMAT." * * So this isn't fatal if we can't read the table of contents, it * just means that the user won't be able to issue the play tracks * ioctl, and likely lots of other stuff won't work either. They * need to burn the CD before we can do a whole lot with it. So * we don't print anything here if we get an error back. */ error = cdreadtoc(periph, 0, 0, (u_int8_t *)toch, sizeof(*toch), SF_NO_PRINT); /* * Errors in reading the table of contents aren't fatal, we just * won't have a valid table of contents cached. */ if (error != 0) { error = 0; bzero(&softc->toc, sizeof(softc->toc)); goto bailout; } if (softc->quirks & CD_Q_BCD_TRACKS) { toch->starting_track = bcd2bin(toch->starting_track); toch->ending_track = bcd2bin(toch->ending_track); } /* Number of TOC entries, plus leadout */ num_entries = (toch->ending_track - toch->starting_track) + 2; if (num_entries <= 0) goto bailout; toclen = num_entries * sizeof(struct cd_toc_entry); error = cdreadtoc(periph, CD_MSF_FORMAT, toch->starting_track, (u_int8_t *)&softc->toc, toclen + sizeof(*toch), SF_NO_PRINT); if (error != 0) { error = 0; bzero(&softc->toc, sizeof(softc->toc)); goto bailout; } if (softc->quirks & CD_Q_BCD_TRACKS) { toch->starting_track = bcd2bin(toch->starting_track); toch->ending_track = bcd2bin(toch->ending_track); } /* * XXX KDM is this necessary? Probably only if the drive doesn't * return leadout information with the table of contents. */ cdindex = toch->starting_track + num_entries -1; if (cdindex == toch->ending_track + 1) { error = cdreadtoc(periph, CD_MSF_FORMAT, LEADOUT, (u_int8_t *)&leadout, sizeof(leadout), SF_NO_PRINT); if (error != 0) { error = 0; goto bailout; } softc->toc.entries[cdindex - toch->starting_track] = leadout.entry; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (cdindex = 0; cdindex < num_entries - 1; cdindex++) { softc->toc.entries[cdindex].track = bcd2bin(softc->toc.entries[cdindex].track); } } softc->flags |= CD_FLAG_VALID_TOC; /* If the first track is audio, correct sector size. */ if ((softc->toc.entries[0].control & 4) == 0) { softc->disk->d_sectorsize = softc->params.blksize = 2352; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; } bailout: /* * We unconditionally (re)set the blocksize each time the * CD device is opened. This is because the CD can change, * and therefore the blocksize might change. * XXX problems here if some slice or partition is still * open with the old size? */ if ((softc->disk->d_devstat->flags & DEVSTAT_BS_UNAVAILABLE) != 0) softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; softc->disk->d_devstat->block_size = softc->params.blksize; return (error); } static int cdsize(struct cam_periph *periph, u_int32_t *size) { struct cd_softc *softc; union ccb *ccb; struct scsi_read_capacity_data *rcap_buf; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdsize\n")); softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); /* XXX Should be M_WAITOK */ rcap_buf = malloc(sizeof(struct scsi_read_capacity_data), M_SCSICD, M_NOWAIT | M_ZERO); if (rcap_buf == NULL) return (ENOMEM); scsi_read_capacity(&ccb->csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, rcap_buf, SSD_FULL_SIZE, /* timeout */20000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT); xpt_release_ccb(ccb); softc->params.disksize = scsi_4btoul(rcap_buf->addr) + 1; softc->params.blksize = scsi_4btoul(rcap_buf->length); /* Make sure we got at least some block size. */ if (error == 0 && softc->params.blksize == 0) error = EIO; /* * SCSI-3 mandates that the reported blocksize shall be 2048. * Older drives sometimes report funny values, trim it down to * 2048, or other parts of the kernel will get confused. * * XXX we leave drives alone that might report 512 bytes, as * well as drives reporting more weird sizes like perhaps 4K. */ if (softc->params.blksize > 2048 && softc->params.blksize <= 2352) softc->params.blksize = 2048; free(rcap_buf, M_SCSICD); *size = softc->params.disksize; return (error); } static int cd6byteworkaround(union ccb *ccb) { u_int8_t *cdb; struct cam_periph *periph; struct cd_softc *softc; struct cd_mode_params *params; int frozen, found; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; cdb = ccb->csio.cdb_io.cdb_bytes; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) || ((cdb[0] != MODE_SENSE_6) && (cdb[0] != MODE_SELECT_6))) return (0); /* * Because there is no convenient place to stash the overall * cd_mode_params structure pointer, we have to grab it like this. * This means that ALL MODE_SENSE and MODE_SELECT requests in the * cd(4) driver MUST go through cdgetmode() and cdsetmode()! * * XXX It would be nice if, at some point, we could increase the * number of available peripheral private pointers. Both pointers * are currently used in most every peripheral driver. */ found = 0; STAILQ_FOREACH(params, &softc->mode_queue, links) { if (params->mode_buf == ccb->csio.data_ptr) { found = 1; break; } } /* * This shouldn't happen. All mode sense and mode select * operations in the cd(4) driver MUST go through cdgetmode() and * cdsetmode()! */ if (found == 0) { xpt_print(periph->path, "mode buffer not found in mode queue!\n"); return (0); } params->cdb_size = 10; softc->minimum_command_size = 10; xpt_print(ccb->ccb_h.path, "%s(6) failed, increasing minimum CDB size to 10 bytes\n", (cdb[0] == MODE_SENSE_6) ? "MODE_SENSE" : "MODE_SELECT"); if (cdb[0] == MODE_SENSE_6) { struct scsi_mode_sense_10 ms10; struct scsi_mode_sense_6 *ms6; int len; ms6 = (struct scsi_mode_sense_6 *)cdb; bzero(&ms10, sizeof(ms10)); ms10.opcode = MODE_SENSE_10; ms10.byte2 = ms6->byte2; ms10.page = ms6->page; /* * 10 byte mode header, block descriptor, * sizeof(union cd_pages) */ len = sizeof(struct cd_mode_data_10); ccb->csio.dxfer_len = len; scsi_ulto2b(len, ms10.length); ms10.control = ms6->control; bcopy(&ms10, cdb, 10); ccb->csio.cdb_len = 10; } else { struct scsi_mode_select_10 ms10; struct scsi_mode_select_6 *ms6; struct scsi_mode_header_6 *header6; struct scsi_mode_header_10 *header10; struct scsi_mode_page_header *page_header; int blk_desc_len, page_num, page_size, len; ms6 = (struct scsi_mode_select_6 *)cdb; bzero(&ms10, sizeof(ms10)); ms10.opcode = MODE_SELECT_10; ms10.byte2 = ms6->byte2; header6 = (struct scsi_mode_header_6 *)params->mode_buf; header10 = (struct scsi_mode_header_10 *)params->mode_buf; page_header = find_mode_page_6(header6); page_num = page_header->page_code; blk_desc_len = header6->blk_desc_len; page_size = cdgetpagesize(page_num); if (page_size != (page_header->page_length + sizeof(*page_header))) page_size = page_header->page_length + sizeof(*page_header); len = sizeof(*header10) + blk_desc_len + page_size; len = min(params->alloc_len, len); /* * Since the 6 byte parameter header is shorter than the 10 * byte parameter header, we need to copy the actual mode * page data, and the block descriptor, if any, so things wind * up in the right place. The regions will overlap, but * bcopy() does the right thing. */ bcopy(params->mode_buf + sizeof(*header6), params->mode_buf + sizeof(*header10), len - sizeof(*header10)); /* Make sure these fields are set correctly. */ scsi_ulto2b(0, header10->data_length); header10->medium_type = 0; scsi_ulto2b(blk_desc_len, header10->blk_desc_len); ccb->csio.dxfer_len = len; scsi_ulto2b(len, ms10.length); ms10.control = ms6->control; bcopy(&ms10, cdb, 10); ccb->csio.cdb_len = 10; } frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*openings*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static int cderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct cd_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; error = 0; /* * We use a status of CAM_REQ_INVALID as shorthand -- if a 6 byte * CDB comes back with this particular error, try transforming it * into the 10 byte version. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cd6byteworkaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cd6byteworkaround(ccb); else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) disk_media_changed(softc->disk, M_NOWAIT); else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & CD_FLAG_SAW_MEDIA)) { softc->flags &= ~CD_FLAG_SAW_MEDIA; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (error); /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & CD_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return (cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static void cdmediapoll(void *arg) { struct cam_periph *periph = arg; struct cd_softc *softc = periph->softc; if (softc->state == CD_STATE_NORMAL && !softc->tur && softc->outstanding_cmds == 0) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { softc->tur = 1; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* Queue us up again */ if (cd_poll_period != 0) callout_schedule(&softc->mediapoll_c, cd_poll_period * hz); } /* * Read table of contents */ static int cdreadtoc(struct cam_periph *periph, u_int32_t mode, u_int32_t start, u_int8_t *data, u_int32_t len, u_int32_t sense_flags) { struct scsi_read_toc *scsi_cmd; u_int32_t ntoc; struct ccb_scsiio *csio; union ccb *ccb; int error; ntoc = len; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_IN, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ data, /* dxfer_len */ len, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_read_toc), /* timeout */ 50000); scsi_cmd = (struct scsi_read_toc *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); if (mode == CD_MSF_FORMAT) scsi_cmd->byte2 |= CD_MSF; scsi_cmd->from_track = start; /* scsi_ulto2b(ntoc, (u_int8_t *)scsi_cmd->data_len); */ scsi_cmd->data_len[0] = (ntoc) >> 8; scsi_cmd->data_len[1] = (ntoc) & 0xff; scsi_cmd->op_code = READ_TOC; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA | sense_flags); xpt_release_ccb(ccb); return(error); } static int cdreadsubchannel(struct cam_periph *periph, u_int32_t mode, u_int32_t format, int track, struct cd_sub_channel_info *data, u_int32_t len) { struct scsi_read_subchannel *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_IN, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ (u_int8_t *)data, /* dxfer_len */ len, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_read_subchannel), /* timeout */ 50000); scsi_cmd = (struct scsi_read_subchannel *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = READ_SUBCHANNEL; if (mode == CD_MSF_FORMAT) scsi_cmd->byte1 |= CD_MSF; scsi_cmd->byte2 = SRS_SUBQ; scsi_cmd->subchan_format = format; scsi_cmd->track = track; scsi_ulto2b(len, (u_int8_t *)scsi_cmd->data_len); scsi_cmd->control = 0; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } /* * All MODE_SENSE requests in the cd(4) driver MUST go through this * routine. See comments in cd6byteworkaround() for details. */ static int cdgetmode(struct cam_periph *periph, struct cd_mode_params *data, u_int32_t page) { struct ccb_scsiio *csio; struct cd_softc *softc; union ccb *ccb; int param_len; int error; softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; data->cdb_size = softc->minimum_command_size; if (data->cdb_size < 10) param_len = sizeof(struct cd_mode_data); else param_len = sizeof(struct cd_mode_data_10); /* Don't say we've got more room than we actually allocated */ param_len = min(param_len, data->alloc_len); scsi_mode_sense_len(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ 0, /* page_code */ SMS_PAGE_CTRL_CURRENT, /* page */ page, /* param_buf */ data->mode_buf, /* param_len */ param_len, /* minimum_cmd_size */ softc->minimum_command_size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); /* * It would be nice not to have to do this, but there's no * available pointer in the CCB that would allow us to stuff the * mode params structure in there and retrieve it in * cd6byteworkaround(), so we can set the cdb size. The cdb size * lets the caller know what CDB size we ended up using, so they * can find the actual mode page offset. */ STAILQ_INSERT_TAIL(&softc->mode_queue, data, links); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); STAILQ_REMOVE(&softc->mode_queue, data, cd_mode_params, links); /* * This is a bit of belt-and-suspenders checking, but if we run * into a situation where the target sends back multiple block * descriptors, we might not have enough space in the buffer to * see the whole mode page. Better to return an error than * potentially access memory beyond our malloced region. */ if (error == 0) { u_int32_t data_len; if (data->cdb_size == 10) { struct scsi_mode_header_10 *hdr10; hdr10 = (struct scsi_mode_header_10 *)data->mode_buf; data_len = scsi_2btoul(hdr10->data_length); data_len += sizeof(hdr10->data_length); } else { struct scsi_mode_header_6 *hdr6; hdr6 = (struct scsi_mode_header_6 *)data->mode_buf; data_len = hdr6->data_length; data_len += sizeof(hdr6->data_length); } /* * Complain if there is more mode data available than we * allocated space for. This could potentially happen if * we miscalculated the page length for some reason, if the * drive returns multiple block descriptors, or if it sets * the data length incorrectly. */ if (data_len > data->alloc_len) { xpt_print(periph->path, "allocated modepage %d length " "%d < returned length %d\n", page, data->alloc_len, data_len); error = ENOSPC; } } return (error); } /* * All MODE_SELECT requests in the cd(4) driver MUST go through this * routine. See comments in cd6byteworkaround() for details. */ static int cdsetmode(struct cam_periph *periph, struct cd_mode_params *data) { struct ccb_scsiio *csio; struct cd_softc *softc; union ccb *ccb; int cdb_size, param_len; int error; softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; error = 0; /* * If the data is formatted for the 10 byte version of the mode * select parameter list, we need to use the 10 byte CDB. * Otherwise, we use whatever the stored minimum command size. */ if (data->cdb_size == 10) cdb_size = data->cdb_size; else cdb_size = softc->minimum_command_size; if (cdb_size >= 10) { struct scsi_mode_header_10 *mode_header; u_int32_t data_len; mode_header = (struct scsi_mode_header_10 *)data->mode_buf; data_len = scsi_2btoul(mode_header->data_length); scsi_ulto2b(0, mode_header->data_length); /* * SONY drives do not allow a mode select with a medium_type * value that has just been returned by a mode sense; use a * medium_type of 0 (Default) instead. */ mode_header->medium_type = 0; /* * Pass back whatever the drive passed to us, plus the size * of the data length field. */ param_len = data_len + sizeof(mode_header->data_length); } else { struct scsi_mode_header_6 *mode_header; mode_header = (struct scsi_mode_header_6 *)data->mode_buf; param_len = mode_header->data_length + 1; mode_header->data_length = 0; /* * SONY drives do not allow a mode select with a medium_type * value that has just been returned by a mode sense; use a * medium_type of 0 (Default) instead. */ mode_header->medium_type = 0; } /* Don't say we've got more room than we actually allocated */ param_len = min(param_len, data->alloc_len); scsi_mode_select_len(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* scsi_page_fmt */ 1, /* save_pages */ 0, /* param_buf */ data->mode_buf, /* param_len */ param_len, /* minimum_cmd_size */ cdb_size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); /* See comments in cdgetmode() and cd6byteworkaround(). */ STAILQ_INSERT_TAIL(&softc->mode_queue, data, links); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); STAILQ_REMOVE(&softc->mode_queue, data, cd_mode_params, links); return (error); } static int cdplay(struct cam_periph *periph, u_int32_t blk, u_int32_t len) { struct ccb_scsiio *csio; union ccb *ccb; int error; u_int8_t cdb_len; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; /* * Use the smallest possible command to perform the operation. */ if ((len & 0xffff0000) == 0) { /* * We can fit in a 10 byte cdb. */ struct scsi_play_10 *scsi_cmd; scsi_cmd = (struct scsi_play_10 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_10; scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr); scsi_ulto2b(len, (u_int8_t *)scsi_cmd->xfer_len); cdb_len = sizeof(*scsi_cmd); } else { struct scsi_play_12 *scsi_cmd; scsi_cmd = (struct scsi_play_12 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_12; scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr); scsi_ulto4b(len, (u_int8_t *)scsi_cmd->xfer_len); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, /*retries*/ cd_retry_count, cddone, /*flags*/CAM_DIR_NONE, MSG_SIMPLE_Q_TAG, /*dataptr*/NULL, /*datalen*/0, /*sense_len*/SSD_FULL_SIZE, cdb_len, /*timeout*/50 * 1000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplaymsf(struct cam_periph *periph, u_int32_t startm, u_int32_t starts, u_int32_t startf, u_int32_t endm, u_int32_t ends, u_int32_t endf) { struct scsi_play_msf *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_play_msf), /* timeout */ 50000); scsi_cmd = (struct scsi_play_msf *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_MSF; scsi_cmd->start_m = startm; scsi_cmd->start_s = starts; scsi_cmd->start_f = startf; scsi_cmd->end_m = endm; scsi_cmd->end_s = ends; scsi_cmd->end_f = endf; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplaytracks(struct cam_periph *periph, u_int32_t strack, u_int32_t sindex, u_int32_t etrack, u_int32_t eindex) { struct scsi_play_track *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_play_track), /* timeout */ 50000); scsi_cmd = (struct scsi_play_track *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_TRACK; scsi_cmd->start_track = strack; scsi_cmd->start_index = sindex; scsi_cmd->end_track = etrack; scsi_cmd->end_index = eindex; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdpause(struct cam_periph *periph, u_int32_t go) { struct scsi_pause *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_pause), /* timeout */ 50000); scsi_cmd = (struct scsi_pause *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PAUSE; scsi_cmd->resume = go; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdstartunit(struct cam_periph *periph, int load) { union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_start_stop(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* start */ TRUE, /* load_eject */ load, /* immediate */ FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdstopunit(struct cam_periph *periph, u_int32_t eject) { union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_start_stop(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* start */ FALSE, /* load_eject */ eject, /* immediate */ FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdsetspeed(struct cam_periph *periph, u_int32_t rdspeed, u_int32_t wrspeed) { struct scsi_set_speed *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; /* Preserve old behavior: units in multiples of CDROM speed */ if (rdspeed < 177) rdspeed *= 177; if (wrspeed < 177) wrspeed *= 177; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_set_speed), /* timeout */ 50000); scsi_cmd = (struct scsi_set_speed *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SET_CD_SPEED; scsi_ulto2b(rdspeed, scsi_cmd->readspeed); scsi_ulto2b(wrspeed, scsi_cmd->writespeed); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdreportkey(struct cam_periph *periph, struct dvd_authinfo *authinfo) { union ccb *ccb; u_int8_t *databuf; u_int32_t lba; int error; int length; error = 0; databuf = NULL; lba = 0; switch (authinfo->format) { case DVD_REPORT_AGID: length = sizeof(struct scsi_report_key_data_agid); break; case DVD_REPORT_CHALLENGE: length = sizeof(struct scsi_report_key_data_challenge); break; case DVD_REPORT_KEY1: length = sizeof(struct scsi_report_key_data_key1_key2); break; case DVD_REPORT_TITLE_KEY: length = sizeof(struct scsi_report_key_data_title); /* The lba field is only set for the title key */ lba = authinfo->lba; break; case DVD_REPORT_ASF: length = sizeof(struct scsi_report_key_data_asf); break; case DVD_REPORT_RPC: length = sizeof(struct scsi_report_key_data_rpc); break; case DVD_INVALIDATE_AGID: length = 0; break; default: return (EINVAL); } if (length != 0) { databuf = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); } else databuf = NULL; cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_report_key(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* lba */ lba, /* agid */ authinfo->agid, /* key_format */ authinfo->format, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); if (error != 0) goto bailout; if (ccb->csio.resid != 0) { xpt_print(periph->path, "warning, residual for report key " "command is %d\n", ccb->csio.resid); } switch(authinfo->format) { case DVD_REPORT_AGID: { struct scsi_report_key_data_agid *agid_data; agid_data = (struct scsi_report_key_data_agid *)databuf; authinfo->agid = (agid_data->agid & RKD_AGID_MASK) >> RKD_AGID_SHIFT; break; } case DVD_REPORT_CHALLENGE: { struct scsi_report_key_data_challenge *chal_data; chal_data = (struct scsi_report_key_data_challenge *)databuf; bcopy(chal_data->challenge_key, authinfo->keychal, min(sizeof(chal_data->challenge_key), sizeof(authinfo->keychal))); break; } case DVD_REPORT_KEY1: { struct scsi_report_key_data_key1_key2 *key1_data; key1_data = (struct scsi_report_key_data_key1_key2 *)databuf; bcopy(key1_data->key1, authinfo->keychal, min(sizeof(key1_data->key1), sizeof(authinfo->keychal))); break; } case DVD_REPORT_TITLE_KEY: { struct scsi_report_key_data_title *title_data; title_data = (struct scsi_report_key_data_title *)databuf; authinfo->cpm = (title_data->byte0 & RKD_TITLE_CPM) >> RKD_TITLE_CPM_SHIFT; authinfo->cp_sec = (title_data->byte0 & RKD_TITLE_CP_SEC) >> RKD_TITLE_CP_SEC_SHIFT; authinfo->cgms = (title_data->byte0 & RKD_TITLE_CMGS_MASK) >> RKD_TITLE_CMGS_SHIFT; bcopy(title_data->title_key, authinfo->keychal, min(sizeof(title_data->title_key), sizeof(authinfo->keychal))); break; } case DVD_REPORT_ASF: { struct scsi_report_key_data_asf *asf_data; asf_data = (struct scsi_report_key_data_asf *)databuf; authinfo->asf = asf_data->success & RKD_ASF_SUCCESS; break; } case DVD_REPORT_RPC: { struct scsi_report_key_data_rpc *rpc_data; rpc_data = (struct scsi_report_key_data_rpc *)databuf; authinfo->reg_type = (rpc_data->byte4 & RKD_RPC_TYPE_MASK) >> RKD_RPC_TYPE_SHIFT; authinfo->vend_rsts = (rpc_data->byte4 & RKD_RPC_VENDOR_RESET_MASK) >> RKD_RPC_VENDOR_RESET_SHIFT; authinfo->user_rsts = rpc_data->byte4 & RKD_RPC_USER_RESET_MASK; authinfo->region = rpc_data->region_mask; authinfo->rpc_scheme = rpc_data->rpc_scheme1; break; } case DVD_INVALIDATE_AGID: break; default: /* This should be impossible, since we checked above */ error = EINVAL; goto bailout; break; /* NOTREACHED */ } bailout: xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } static int cdsendkey(struct cam_periph *periph, struct dvd_authinfo *authinfo) { union ccb *ccb; u_int8_t *databuf; int length; int error; error = 0; databuf = NULL; switch(authinfo->format) { case DVD_SEND_CHALLENGE: { struct scsi_report_key_data_challenge *challenge_data; length = sizeof(*challenge_data); challenge_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)challenge_data; scsi_ulto2b(length - sizeof(challenge_data->data_len), challenge_data->data_len); bcopy(authinfo->keychal, challenge_data->challenge_key, min(sizeof(authinfo->keychal), sizeof(challenge_data->challenge_key))); break; } case DVD_SEND_KEY2: { struct scsi_report_key_data_key1_key2 *key2_data; length = sizeof(*key2_data); key2_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)key2_data; scsi_ulto2b(length - sizeof(key2_data->data_len), key2_data->data_len); bcopy(authinfo->keychal, key2_data->key1, min(sizeof(authinfo->keychal), sizeof(key2_data->key1))); break; } case DVD_SEND_RPC: { struct scsi_send_key_data_rpc *rpc_data; length = sizeof(*rpc_data); rpc_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)rpc_data; scsi_ulto2b(length - sizeof(rpc_data->data_len), rpc_data->data_len); rpc_data->region_code = authinfo->region; break; } default: return (EINVAL); } cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_send_key(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* agid */ authinfo->agid, /* key_format */ authinfo->format, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } static int cdreaddvdstructure(struct cam_periph *periph, struct dvd_struct *dvdstruct) { union ccb *ccb; u_int8_t *databuf; u_int32_t address; int error; int length; error = 0; databuf = NULL; /* The address is reserved for many of the formats */ address = 0; switch(dvdstruct->format) { case DVD_STRUCT_PHYSICAL: length = sizeof(struct scsi_read_dvd_struct_data_physical); break; case DVD_STRUCT_COPYRIGHT: length = sizeof(struct scsi_read_dvd_struct_data_copyright); break; case DVD_STRUCT_DISCKEY: length = sizeof(struct scsi_read_dvd_struct_data_disc_key); break; case DVD_STRUCT_BCA: length = sizeof(struct scsi_read_dvd_struct_data_bca); break; case DVD_STRUCT_MANUFACT: length = sizeof(struct scsi_read_dvd_struct_data_manufacturer); break; case DVD_STRUCT_CMI: return (ENODEV); case DVD_STRUCT_PROTDISCID: length = sizeof(struct scsi_read_dvd_struct_data_prot_discid); break; case DVD_STRUCT_DISCKEYBLOCK: length = sizeof(struct scsi_read_dvd_struct_data_disc_key_blk); break; case DVD_STRUCT_DDS: length = sizeof(struct scsi_read_dvd_struct_data_dds); break; case DVD_STRUCT_MEDIUM_STAT: length = sizeof(struct scsi_read_dvd_struct_data_medium_status); break; case DVD_STRUCT_SPARE_AREA: length = sizeof(struct scsi_read_dvd_struct_data_spare_area); break; case DVD_STRUCT_RMD_LAST: return (ENODEV); case DVD_STRUCT_RMD_RMA: return (ENODEV); case DVD_STRUCT_PRERECORDED: length = sizeof(struct scsi_read_dvd_struct_data_leadin); break; case DVD_STRUCT_UNIQUEID: length = sizeof(struct scsi_read_dvd_struct_data_disc_id); break; case DVD_STRUCT_DCB: return (ENODEV); case DVD_STRUCT_LIST: /* * This is the maximum allocation length for the READ DVD * STRUCTURE command. There's nothing in the MMC3 spec * that indicates a limit in the amount of data that can * be returned from this call, other than the limits * imposed by the 2-byte length variables. */ length = 65535; break; default: return (EINVAL); } if (length != 0) { databuf = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); } else databuf = NULL; cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_read_dvd_structure(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* lba */ address, /* layer_number */ dvdstruct->layer_num, /* key_format */ dvdstruct->format, /* agid */ dvdstruct->agid, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); if (error != 0) goto bailout; switch(dvdstruct->format) { case DVD_STRUCT_PHYSICAL: { struct scsi_read_dvd_struct_data_layer_desc *inlayer; struct dvd_layer *outlayer; struct scsi_read_dvd_struct_data_physical *phys_data; phys_data = (struct scsi_read_dvd_struct_data_physical *)databuf; inlayer = &phys_data->layer_desc; outlayer = (struct dvd_layer *)&dvdstruct->data; dvdstruct->length = sizeof(*inlayer); outlayer->book_type = (inlayer->book_type_version & RDSD_BOOK_TYPE_MASK) >> RDSD_BOOK_TYPE_SHIFT; outlayer->book_version = (inlayer->book_type_version & RDSD_BOOK_VERSION_MASK); outlayer->disc_size = (inlayer->disc_size_max_rate & RDSD_DISC_SIZE_MASK) >> RDSD_DISC_SIZE_SHIFT; outlayer->max_rate = (inlayer->disc_size_max_rate & RDSD_MAX_RATE_MASK); outlayer->nlayers = (inlayer->layer_info & RDSD_NUM_LAYERS_MASK) >> RDSD_NUM_LAYERS_SHIFT; outlayer->track_path = (inlayer->layer_info & RDSD_TRACK_PATH_MASK) >> RDSD_TRACK_PATH_SHIFT; outlayer->layer_type = (inlayer->layer_info & RDSD_LAYER_TYPE_MASK); outlayer->linear_density = (inlayer->density & RDSD_LIN_DENSITY_MASK) >> RDSD_LIN_DENSITY_SHIFT; outlayer->track_density = (inlayer->density & RDSD_TRACK_DENSITY_MASK); outlayer->bca = (inlayer->bca & RDSD_BCA_MASK) >> RDSD_BCA_SHIFT; outlayer->start_sector = scsi_3btoul(inlayer->main_data_start); outlayer->end_sector = scsi_3btoul(inlayer->main_data_end); outlayer->end_sector_l0 = scsi_3btoul(inlayer->end_sector_layer0); break; } case DVD_STRUCT_COPYRIGHT: { struct scsi_read_dvd_struct_data_copyright *copy_data; copy_data = (struct scsi_read_dvd_struct_data_copyright *) databuf; dvdstruct->cpst = copy_data->cps_type; dvdstruct->rmi = copy_data->region_info; dvdstruct->length = 0; break; } default: /* * Tell the user what the overall length is, no matter * what we can actually fit in the data buffer. */ dvdstruct->length = length - ccb->csio.resid - sizeof(struct scsi_read_dvd_struct_data_header); /* * But only actually copy out the smaller of what we read * in or what the structure can take. */ bcopy(databuf + sizeof(struct scsi_read_dvd_struct_data_header), dvdstruct->data, min(sizeof(dvdstruct->data), dvdstruct->length)); break; } bailout: xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } void scsi_report_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t lba, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_report_key *scsi_cmd; scsi_cmd = (struct scsi_report_key *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_KEY; scsi_ulto4b(lba, scsi_cmd->lba); scsi_ulto2b(dxfer_len, scsi_cmd->alloc_len); scsi_cmd->agid_keyformat = (agid << RK_KF_AGID_SHIFT) | (key_format & RK_KF_KEYFORMAT_MASK); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len == 0) ? CAM_DIR_NONE : CAM_DIR_IN, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_key *scsi_cmd; scsi_cmd = (struct scsi_send_key *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_KEY; scsi_ulto2b(dxfer_len, scsi_cmd->param_len); scsi_cmd->agid_keyformat = (agid << RK_KF_AGID_SHIFT) | (key_format & RK_KF_KEYFORMAT_MASK); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_OUT, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_dvd_structure(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t address, u_int8_t layer_number, u_int8_t format, u_int8_t agid, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_dvd_structure *scsi_cmd; scsi_cmd = (struct scsi_read_dvd_structure *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_DVD_STRUCTURE; scsi_ulto4b(address, scsi_cmd->address); scsi_cmd->layer_number = layer_number; scsi_cmd->format = format; scsi_ulto2b(dxfer_len, scsi_cmd->alloc_len); /* The AGID is the top two bits of this byte */ scsi_cmd->agid = agid << 6; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_cd.h =================================================================== --- head/sys/cam/scsi/scsi_cd.h (revision 326264) +++ head/sys/cam/scsi/scsi_cd.h (revision 326265) @@ -1,875 +1,877 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2000, 2002 Kenneth D. Merry * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Written by Julian Elischer (julian@tfs.com) * for TRW Financial Systems. * * TRW Financial Systems, in accordance with their agreement with Carnegie * Mellon University, makes this software available to CMU to distribute * or use in any manner that they see fit as long as this message is kept with * the software. For this reason TFS also grants any other persons or * organisations permission to use or modify this software. * * TFS supplies this software to be publicly redistributed * on the understanding that TFS is not responsible for the correct * functioning of this software in any circumstances. * * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 * * from: scsi_cd.h,v 1.10 1997/02/22 09:44:28 peter Exp $ * $FreeBSD$ */ #ifndef _SCSI_SCSI_CD_H #define _SCSI_SCSI_CD_H 1 /* * Define two bits always in the same place in byte 2 (flag byte) */ #define CD_RELADDR 0x01 #define CD_MSF 0x02 /* * SCSI command format */ struct scsi_get_config { uint8_t opcode; uint8_t rt; #define SGC_RT_ALL 0x00 #define SGC_RT_CURRENT 0x01 #define SGC_RT_SPECIFIC 0x02 #define SGC_RT_MASK 0x03 uint8_t starting_feature[2]; uint8_t reserved[3]; uint8_t length[2]; uint8_t control; }; struct scsi_get_config_header { uint8_t data_length[4]; uint8_t reserved[2]; uint8_t current_profile[2]; }; struct scsi_get_config_feature { uint8_t feature_code[2]; uint8_t flags; #define SGC_F_CURRENT 0x01 #define SGC_F_PERSISTENT 0x02 #define SGC_F_VERSION_MASK 0x2C #define SGC_F_VERSION_SHIFT 2 uint8_t add_length; uint8_t feature_data[]; }; struct scsi_get_event_status { uint8_t opcode; uint8_t byte2; #define SGESN_POLLED 1 uint8_t reserved[2]; uint8_t notif_class; uint8_t reserved2[2]; uint8_t length[2]; uint8_t control; }; struct scsi_get_event_status_header { uint8_t descr_length[4]; uint8_t nea_class; #define SGESN_NEA 0x80 uint8_t supported_class; }; struct scsi_get_event_status_descr { uint8_t event_code; uint8_t event_info[]; }; struct scsi_mechanism_status { uint8_t opcode; uint8_t reserved[7]; uint8_t length[2]; uint8_t reserved2; uint8_t control; }; struct scsi_mechanism_status_header { uint8_t state1; uint8_t state2; uint8_t lba[3]; uint8_t slots_num; uint8_t slots_length[2]; }; struct scsi_pause { u_int8_t op_code; u_int8_t byte2; u_int8_t unused[6]; u_int8_t resume; u_int8_t control; }; #define PA_PAUSE 1 #define PA_RESUME 0 struct scsi_play_msf { u_int8_t op_code; u_int8_t byte2; u_int8_t unused; u_int8_t start_m; u_int8_t start_s; u_int8_t start_f; u_int8_t end_m; u_int8_t end_s; u_int8_t end_f; u_int8_t control; }; struct scsi_play_track { u_int8_t op_code; u_int8_t byte2; u_int8_t unused[2]; u_int8_t start_track; u_int8_t start_index; u_int8_t unused1; u_int8_t end_track; u_int8_t end_index; u_int8_t control; }; struct scsi_play_10 { u_int8_t op_code; u_int8_t byte2; u_int8_t blk_addr[4]; u_int8_t unused; u_int8_t xfer_len[2]; u_int8_t control; }; struct scsi_play_12 { u_int8_t op_code; u_int8_t byte2; /* same as above */ u_int8_t blk_addr[4]; u_int8_t xfer_len[4]; u_int8_t unused; u_int8_t control; }; struct scsi_play_rel_12 { u_int8_t op_code; u_int8_t byte2; /* same as above */ u_int8_t blk_addr[4]; u_int8_t xfer_len[4]; u_int8_t track; u_int8_t control; }; struct scsi_read_header { u_int8_t op_code; u_int8_t byte2; u_int8_t blk_addr[4]; u_int8_t unused; u_int8_t data_len[2]; u_int8_t control; }; struct scsi_read_subchannel { u_int8_t op_code; u_int8_t byte1; u_int8_t byte2; #define SRS_SUBQ 0x40 u_int8_t subchan_format; u_int8_t unused[2]; u_int8_t track; u_int8_t data_len[2]; u_int8_t control; }; struct scsi_read_toc { u_int8_t op_code; u_int8_t byte2; u_int8_t format; u_int8_t unused[3]; u_int8_t from_track; u_int8_t data_len[2]; u_int8_t control; }; struct scsi_read_toc_hdr { uint8_t data_length[2]; uint8_t first; uint8_t last; }; struct scsi_read_toc_type01_descr { uint8_t reserved; uint8_t addr_ctl; uint8_t track_number; uint8_t reserved2; uint8_t track_start[4]; }; struct scsi_read_cd_capacity { u_int8_t op_code; u_int8_t byte2; u_int8_t addr_3; /* Most Significant */ u_int8_t addr_2; u_int8_t addr_1; u_int8_t addr_0; /* Least Significant */ u_int8_t unused[3]; u_int8_t control; }; struct scsi_set_speed { u_int8_t opcode; u_int8_t byte2; u_int8_t readspeed[2]; u_int8_t writespeed[2]; u_int8_t reserved[5]; u_int8_t control; }; struct scsi_report_key { u_int8_t opcode; u_int8_t reserved0; u_int8_t lba[4]; u_int8_t reserved1[2]; u_int8_t alloc_len[2]; u_int8_t agid_keyformat; #define RK_KF_AGID_MASK 0xc0 #define RK_KF_AGID_SHIFT 6 #define RK_KF_KEYFORMAT_MASK 0x3f #define RK_KF_AGID 0x00 #define RK_KF_CHALLENGE 0x01 #define RF_KF_KEY1 0x02 #define RK_KF_KEY2 0x03 #define RF_KF_TITLE 0x04 #define RF_KF_ASF 0x05 #define RK_KF_RPC_SET 0x06 #define RF_KF_RPC_REPORT 0x08 #define RF_KF_INV_AGID 0x3f u_int8_t control; }; /* * See the report key structure for key format and AGID definitions. */ struct scsi_send_key { u_int8_t opcode; u_int8_t reserved[7]; u_int8_t param_len[2]; u_int8_t agid_keyformat; u_int8_t control; }; struct scsi_read_dvd_structure { u_int8_t opcode; u_int8_t reserved; u_int8_t address[4]; u_int8_t layer_number; u_int8_t format; #define RDS_FORMAT_PHYSICAL 0x00 #define RDS_FORMAT_COPYRIGHT 0x01 #define RDS_FORMAT_DISC_KEY 0x02 #define RDS_FORMAT_BCA 0x03 #define RDS_FORMAT_MANUFACTURER 0x04 #define RDS_FORMAT_CMGS_CPM 0x05 #define RDS_FORMAT_PROT_DISCID 0x06 #define RDS_FORMAT_DISC_KEY_BLOCK 0x07 #define RDS_FORMAT_DDS 0x08 #define RDS_FORMAT_DVDRAM_MEDIA_STAT 0x09 #define RDS_FORMAT_SPARE_AREA 0x0a #define RDS_FORMAT_RMD_BORDEROUT 0x0c #define RDS_FORMAT_RMD 0x0d #define RDS_FORMAT_LEADIN 0x0e #define RDS_FORMAT_DISC_ID 0x0f #define RDS_FORMAT_DCB 0x30 #define RDS_FORMAT_WRITE_PROT 0xc0 #define RDS_FORMAT_STRUCTURE_LIST 0xff u_int8_t alloc_len[2]; u_int8_t agid; u_int8_t control; }; /* * Opcodes */ #define READ_CD_CAPACITY 0x25 /* slightly different from disk */ #define READ_SUBCHANNEL 0x42 /* cdrom read Subchannel */ #define READ_TOC 0x43 /* cdrom read TOC */ #define READ_HEADER 0x44 /* cdrom read header */ #define PLAY_10 0x45 /* cdrom play 'play audio' mode */ #define GET_CONFIGURATION 0x46 /* Get device configuration */ #define PLAY_MSF 0x47 /* cdrom play Min,Sec,Frames mode */ #define PLAY_TRACK 0x48 /* cdrom play track/index mode */ #define PLAY_TRACK_REL 0x49 /* cdrom play track/index mode */ #define GET_EVENT_STATUS 0x4a /* Get event status notification */ #define PAUSE 0x4b /* cdrom pause in 'play audio' mode */ #define SEND_KEY 0xa3 /* dvd send key command */ #define REPORT_KEY 0xa4 /* dvd report key command */ #define PLAY_12 0xa5 /* cdrom pause in 'play audio' mode */ #define PLAY_TRACK_REL_BIG 0xa9 /* cdrom play track/index mode */ #define READ_DVD_STRUCTURE 0xad /* read dvd structure */ #define SET_CD_SPEED 0xbb /* set c/dvd speed */ #define MECHANISM_STATUS 0xbd /* get status of c/dvd mechanics */ struct scsi_report_key_data_header { u_int8_t data_len[2]; u_int8_t reserved[2]; }; struct scsi_report_key_data_agid { u_int8_t data_len[2]; u_int8_t reserved[5]; u_int8_t agid; #define RKD_AGID_MASK 0xc0 #define RKD_AGID_SHIFT 6 }; struct scsi_report_key_data_challenge { u_int8_t data_len[2]; u_int8_t reserved0[2]; u_int8_t challenge_key[10]; u_int8_t reserved1[2]; }; struct scsi_report_key_data_key1_key2 { u_int8_t data_len[2]; u_int8_t reserved0[2]; u_int8_t key1[5]; u_int8_t reserved1[3]; }; struct scsi_report_key_data_title { u_int8_t data_len[2]; u_int8_t reserved0[2]; u_int8_t byte0; #define RKD_TITLE_CPM 0x80 #define RKD_TITLE_CPM_SHIFT 7 #define RKD_TITLE_CP_SEC 0x40 #define RKD_TITLE_CP_SEC_SHIFT 6 #define RKD_TITLE_CMGS_MASK 0x30 #define RKD_TITLE_CMGS_SHIFT 4 #define RKD_TITLE_CMGS_NO_RST 0x00 #define RKD_TITLE_CMGS_RSVD 0x10 #define RKD_TITLE_CMGS_1_GEN 0x20 #define RKD_TITLE_CMGS_NO_COPY 0x30 u_int8_t title_key[5]; u_int8_t reserved1[2]; }; struct scsi_report_key_data_asf { u_int8_t data_len[2]; u_int8_t reserved[5]; u_int8_t success; #define RKD_ASF_SUCCESS 0x01 }; struct scsi_report_key_data_rpc { u_int8_t data_len[2]; u_int8_t rpc_scheme0; #define RKD_RPC_SCHEME_UNKNOWN 0x00 #define RKD_RPC_SCHEME_PHASE_II 0x01 u_int8_t reserved0; u_int8_t byte4; #define RKD_RPC_TYPE_MASK 0xC0 #define RKD_RPC_TYPE_SHIFT 6 #define RKD_RPC_TYPE_NONE 0x00 #define RKD_RPC_TYPE_SET 0x40 #define RKD_RPC_TYPE_LAST_CHANCE 0x80 #define RKD_RPC_TYPE_PERM 0xC0 #define RKD_RPC_VENDOR_RESET_MASK 0x38 #define RKD_RPC_VENDOR_RESET_SHIFT 3 #define RKD_RPC_USER_RESET_MASK 0x07 #define RKD_RPC_USER_RESET_SHIFT 0 u_int8_t region_mask; u_int8_t rpc_scheme1; u_int8_t reserved1; }; struct scsi_send_key_data_rpc { u_int8_t data_len[2]; u_int8_t reserved0[2]; u_int8_t region_code; u_int8_t reserved1[3]; }; /* * Common header for the return data from the READ DVD STRUCTURE command. */ struct scsi_read_dvd_struct_data_header { u_int8_t data_len[2]; u_int8_t reserved[2]; }; struct scsi_read_dvd_struct_data_layer_desc { u_int8_t book_type_version; #define RDSD_BOOK_TYPE_DVD_ROM 0x00 #define RDSD_BOOK_TYPE_DVD_RAM 0x10 #define RDSD_BOOK_TYPE_DVD_R 0x20 #define RDSD_BOOK_TYPE_DVD_RW 0x30 #define RDSD_BOOK_TYPE_DVD_PRW 0x90 #define RDSD_BOOK_TYPE_MASK 0xf0 #define RDSD_BOOK_TYPE_SHIFT 4 #define RDSD_BOOK_VERSION_MASK 0x0f /* * The lower 4 bits of this field is referred to as the "minimum * rate" field in MMC2, and the "maximum rate" field in MMC3. Ugh. */ u_int8_t disc_size_max_rate; #define RDSD_DISC_SIZE_120MM 0x00 #define RDSD_DISC_SIZE_80MM 0x10 #define RDSD_DISC_SIZE_MASK 0xf0 #define RDSD_DISC_SIZE_SHIFT 4 #define RDSD_MAX_RATE_0252 0x00 #define RDSD_MAX_RATE_0504 0x01 #define RDSD_MAX_RATE_1008 0x02 #define RDSD_MAX_RATE_NOT_SPEC 0x0f #define RDSD_MAX_RATE_MASK 0x0f u_int8_t layer_info; #define RDSD_NUM_LAYERS_MASK 0x60 #define RDSD_NUM_LAYERS_SHIFT 5 #define RDSD_NL_ONE_LAYER 0x00 #define RDSD_NL_TWO_LAYERS 0x20 #define RDSD_TRACK_PATH_MASK 0x10 #define RDSD_TRACK_PATH_SHIFT 4 #define RDSD_TP_PTP 0x00 #define RDSD_TP_OTP 0x10 #define RDSD_LAYER_TYPE_RO 0x01 #define RDSD_LAYER_TYPE_RECORD 0x02 #define RDSD_LAYER_TYPE_RW 0x04 #define RDSD_LAYER_TYPE_MASK 0x0f u_int8_t density; #define RDSD_LIN_DENSITY_0267 0x00 #define RDSD_LIN_DENSITY_0293 0x10 #define RDSD_LIN_DENSITY_0409_0435 0x20 #define RDSD_LIN_DENSITY_0280_0291 0x40 /* XXX MMC2 uses 0.176um/bit instead of 0.353 as in MMC3 */ #define RDSD_LIN_DENSITY_0353 0x80 #define RDSD_LIN_DENSITY_MASK 0xf0 #define RDSD_LIN_DENSITY_SHIFT 4 #define RDSD_TRACK_DENSITY_074 0x00 #define RDSD_TRACK_DENSITY_080 0x01 #define RDSD_TRACK_DENSITY_0615 0x02 #define RDSD_TRACK_DENSITY_MASK 0x0f u_int8_t zeros0; u_int8_t main_data_start[3]; #define RDSD_MAIN_DATA_START_DVD_RO 0x30000 #define RDSD_MAIN_DATA_START_DVD_RW 0x31000 u_int8_t zeros1; u_int8_t main_data_end[3]; u_int8_t zeros2; u_int8_t end_sector_layer0[3]; u_int8_t bca; #define RDSD_BCA 0x80 #define RDSD_BCA_MASK 0x80 #define RDSD_BCA_SHIFT 7 u_int8_t media_specific[2031]; }; struct scsi_read_dvd_struct_data_physical { u_int8_t data_len[2]; u_int8_t reserved[2]; struct scsi_read_dvd_struct_data_layer_desc layer_desc; }; struct scsi_read_dvd_struct_data_copyright { u_int8_t data_len[2]; u_int8_t reserved0[2]; u_int8_t cps_type; #define RDSD_CPS_NOT_PRESENT 0x00 #define RDSD_CPS_DATA_EXISTS 0x01 u_int8_t region_info; u_int8_t reserved1[2]; }; struct scsi_read_dvd_struct_data_disc_key { u_int8_t data_len[2]; u_int8_t reserved[2]; u_int8_t disc_key[2048]; }; struct scsi_read_dvd_struct_data_bca { u_int8_t data_len[2]; u_int8_t reserved[2]; u_int8_t bca_info[188]; /* XXX 12-188 bytes */ }; struct scsi_read_dvd_struct_data_manufacturer { u_int8_t data_len[2]; u_int8_t reserved[2]; u_int8_t manuf_info[2048]; }; struct scsi_read_dvd_struct_data_copy_manage { u_int8_t data_len[2]; u_int8_t reserved0[2]; u_int8_t byte4; #define RDSD_CPM_NO_COPYRIGHT 0x00 #define RDSD_CPM_HAS_COPYRIGHT 0x80 #define RDSD_CPM_MASK 0x80 #define RDSD_CMGS_COPY_ALLOWED 0x00 #define RDSD_CMGS_ONE_COPY 0x20 #define RDSD_CMGS_NO_COPIES 0x30 #define RDSD_CMGS_MASK 0x30 u_int8_t reserved1[3]; }; struct scsi_read_dvd_struct_data_prot_discid { u_int8_t data_len[2]; u_int8_t reserved[2]; u_int8_t prot_discid_data[16]; }; struct scsi_read_dvd_struct_data_disc_key_blk { /* * Length is 0x6ffe == 28670 for CPRM, 0x3002 == 12990 for CSS2. */ u_int8_t data_len[2]; u_int8_t reserved; u_int8_t total_packs; u_int8_t disc_key_pack_data[28668]; }; struct scsi_read_dvd_struct_data_dds { u_int8_t data_len[2]; u_int8_t reserved[2]; u_int8_t dds_info[2048]; }; struct scsi_read_dvd_struct_data_medium_status { u_int8_t data_len[2]; u_int8_t reserved0[2]; u_int8_t byte4; #define RDSD_MS_CARTRIDGE 0x80 #define RDSD_MS_OUT 0x40 #define RDSD_MS_MSWI 0x08 #define RDSD_MS_CWP 0x04 #define RDSD_MS_PWP 0x02 u_int8_t disc_type_id; #define RDSD_DT_NEED_CARTRIDGE 0x00 #define RDSD_DT_NO_CART_NEEDED 0x01 u_int8_t reserved1; u_int8_t ram_swi_info; #define RDSD_SWI_NO_BARE 0x01 #define RDSD_SWI_UNSPEC 0xff }; struct scsi_read_dvd_struct_data_spare_area { u_int8_t data_len[2]; u_int8_t reserved[2]; u_int8_t unused_primary[4]; u_int8_t unused_supl[4]; u_int8_t allocated_supl[4]; }; struct scsi_read_dvd_struct_data_rmd_borderout { u_int8_t data_len[2]; u_int8_t reserved[2]; u_int8_t rmd[30720]; /* maximum is 30720 bytes */ }; struct scsi_read_dvd_struct_data_rmd { u_int8_t data_len[2]; u_int8_t reserved[2]; u_int8_t last_sector_num[4]; u_int8_t rmd_bytes[32768]; /* This is the maximum */ }; /* * XXX KDM this is the MMC2 version of the structure. * The variable positions have changed (in a semi-conflicting way) in the * MMC3 spec, although the overall length of the structure is the same. */ struct scsi_read_dvd_struct_data_leadin { u_int8_t data_len[2]; u_int8_t reserved0[2]; u_int8_t field_id_1; u_int8_t app_code; u_int8_t disc_physical_data; u_int8_t last_addr[3]; u_int8_t reserved1[2]; u_int8_t field_id_2; u_int8_t rwp; u_int8_t rwp_wavelength; u_int8_t optimum_write_strategy; u_int8_t reserved2[4]; u_int8_t field_id_3; u_int8_t manuf_id_17_12[6]; u_int8_t reserved3; u_int8_t field_id_4; u_int8_t manuf_id_11_6[6]; u_int8_t reserved4; u_int8_t field_id_5; u_int8_t manuf_id_5_0[6]; u_int8_t reserved5[25]; }; struct scsi_read_dvd_struct_data_disc_id { u_int8_t data_len[2]; u_int8_t reserved[4]; u_int8_t random_num[2]; u_int8_t year[4]; u_int8_t month[2]; u_int8_t day[2]; u_int8_t hour[2]; u_int8_t minute[2]; u_int8_t second[2]; }; struct scsi_read_dvd_struct_data_generic_dcb { u_int8_t content_desc[4]; #define SCSI_RCB u_int8_t unknown_desc_actions[4]; #define RDSD_ACTION_RECORDING 0x0001 #define RDSD_ACTION_READING 0x0002 #define RDSD_ACTION_FORMAT 0x0004 #define RDSD_ACTION_MODIFY_DCB 0x0008 u_int8_t vendor_id[32]; u_int8_t dcb_data[32728]; }; struct scsi_read_dvd_struct_data_dcb { u_int8_t data_len[2]; u_int8_t reserved[2]; struct scsi_read_dvd_struct_data_generic_dcb dcb; }; struct read_dvd_struct_write_prot { u_int8_t data_len[2]; u_int8_t reserved0[2]; u_int8_t write_prot_status; #define RDSD_WPS_MSWI 0x08 #define RDSD_WPS_CWP 0x04 #define RDSD_WPS_PWP 0x02 #define RDSD_WPS_SWPP 0x01 u_int8_t reserved[3]; }; struct read_dvd_struct_list_entry { u_int8_t format_code; u_int8_t sds_rds; #define RDSD_SDS_NOT_WRITEABLE 0x00 #define RDSD_SDS_WRITEABLE 0x80 #define RDSD_SDS_MASK 0x80 #define RDSD_RDS_NOT_READABLE 0x00 #define RDSD_RDS_READABLE 0x40 #define RDSD_RDS_MASK 0x40 u_int8_t struct_len[2]; }; struct read_dvd_struct_data_list { u_int8_t data_len[2]; u_int8_t reserved[2]; struct read_dvd_struct_list_entry entries[0]; }; struct scsi_read_cd_cap_data { u_int8_t addr_3; /* Most significant */ u_int8_t addr_2; u_int8_t addr_1; u_int8_t addr_0; /* Least significant */ u_int8_t length_3; /* Most significant */ u_int8_t length_2; u_int8_t length_1; u_int8_t length_0; /* Least significant */ }; struct cd_audio_page { u_int8_t page_code; #define CD_PAGE_CODE 0x3F #define AUDIO_PAGE 0x0e #define CD_PAGE_PS 0x80 u_int8_t param_len; u_int8_t flags; #define CD_PA_SOTC 0x02 #define CD_PA_IMMED 0x04 u_int8_t unused[2]; u_int8_t format_lba; #define CD_PA_FORMAT_LBA 0x0F #define CD_PA_APR_VALID 0x80 u_int8_t lb_per_sec[2]; struct port_control { u_int8_t channels; #define CHANNEL 0x0F #define CHANNEL_0 1 #define CHANNEL_1 2 #define CHANNEL_2 4 #define CHANNEL_3 8 #define LEFT_CHANNEL CHANNEL_0 #define RIGHT_CHANNEL CHANNEL_1 u_int8_t volume; } port[4]; #define LEFT_PORT 0 #define RIGHT_PORT 1 }; struct scsi_cddvd_capabilities_page_sd { uint8_t reserved; uint8_t rotation_control; uint8_t write_speed_supported[2]; }; struct scsi_cddvd_capabilities_page { uint8_t page_code; #define SMS_CDDVD_CAPS_PAGE 0x2a uint8_t page_length; uint8_t caps1; uint8_t caps2; uint8_t caps3; uint8_t caps4; uint8_t caps5; uint8_t caps6; uint8_t obsolete[2]; uint8_t nvol_levels[2]; uint8_t buffer_size[2]; uint8_t obsolete2[2]; uint8_t reserved; uint8_t digital; uint8_t obsolete3; uint8_t copy_management; uint8_t reserved2; uint8_t rotation_control; uint8_t cur_write_speed; uint8_t num_speed_descr; struct scsi_cddvd_capabilities_page_sd speed_descr[]; }; union cd_pages { struct cd_audio_page audio; }; struct cd_mode_data_10 { struct scsi_mode_header_10 header; struct scsi_mode_blk_desc blk_desc; union cd_pages page; }; struct cd_mode_data { struct scsi_mode_header_6 header; struct scsi_mode_blk_desc blk_desc; union cd_pages page; }; union cd_mode_data_6_10 { struct cd_mode_data mode_data_6; struct cd_mode_data_10 mode_data_10; }; struct cd_mode_params { STAILQ_ENTRY(cd_mode_params) links; int cdb_size; int alloc_len; u_int8_t *mode_buf; }; __BEGIN_DECLS void scsi_report_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t lba, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_send_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_read_dvd_structure(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t address, u_int8_t layer_number, u_int8_t format, u_int8_t agid, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); __END_DECLS #endif /*_SCSI_SCSI_CD_H*/ Index: head/sys/cam/scsi/scsi_ch.c =================================================================== --- head/sys/cam/scsi/scsi_ch.c (revision 326264) +++ head/sys/cam/scsi/scsi_ch.c (revision 326265) @@ -1,1940 +1,1942 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-4-Clause + * * Copyright (c) 1997 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Derived from the NetBSD SCSI changer driver. * * $NetBSD: ch.c,v 1.32 1998/01/12 09:49:12 thorpej Exp $ * */ /*- * Copyright (c) 1996, 1997 Jason R. Thorpe * All rights reserved. * * Partially based on an autochanger driver written by Stefan Grefen * and on an autochanger driver written by the Systems Programming Group * at the University of Utah Computer Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgements: * This product includes software developed by Jason R. Thorpe * for And Communications, http://www.and.com/ * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Timeout definitions for various changer related commands. They may * be too short for some devices (especially the timeout for INITIALIZE * ELEMENT STATUS). */ static const u_int32_t CH_TIMEOUT_MODE_SENSE = 6000; static const u_int32_t CH_TIMEOUT_MOVE_MEDIUM = 15 * 60 * 1000; static const u_int32_t CH_TIMEOUT_EXCHANGE_MEDIUM = 15 * 60 * 1000; static const u_int32_t CH_TIMEOUT_POSITION_TO_ELEMENT = 15 * 60 * 1000; static const u_int32_t CH_TIMEOUT_READ_ELEMENT_STATUS = 5 * 60 * 1000; static const u_int32_t CH_TIMEOUT_SEND_VOLTAG = 10000; static const u_int32_t CH_TIMEOUT_INITIALIZE_ELEMENT_STATUS = 500000; typedef enum { CH_FLAG_INVALID = 0x001 } ch_flags; typedef enum { CH_STATE_PROBE, CH_STATE_NORMAL } ch_state; typedef enum { CH_CCB_PROBE } ch_ccb_types; typedef enum { CH_Q_NONE = 0x00, CH_Q_NO_DBD = 0x01, CH_Q_NO_DVCID = 0x02 } ch_quirks; #define CH_Q_BIT_STRING \ "\020" \ "\001NO_DBD" \ "\002NO_DVCID" #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct scsi_mode_sense_data { struct scsi_mode_header_6 header; struct scsi_mode_blk_desc blk_desc; union { struct page_element_address_assignment ea; struct page_transport_geometry_parameters tg; struct page_device_capabilities cap; } pages; }; struct ch_softc { ch_flags flags; ch_state state; ch_quirks quirks; union ccb saved_ccb; struct devstat *device_stats; struct cdev *dev; int open_count; int sc_picker; /* current picker */ /* * The following information is obtained from the * element address assignment page. */ int sc_firsts[CHET_MAX + 1]; /* firsts */ int sc_counts[CHET_MAX + 1]; /* counts */ /* * The following mask defines the legal combinations * of elements for the MOVE MEDIUM command. */ u_int8_t sc_movemask[CHET_MAX + 1]; /* * As above, but for EXCHANGE MEDIUM. */ u_int8_t sc_exchangemask[CHET_MAX + 1]; /* * Quirks; see below. XXX KDM not implemented yet */ int sc_settledelay; /* delay for settle */ }; static d_open_t chopen; static d_close_t chclose; static d_ioctl_t chioctl; static periph_init_t chinit; static periph_ctor_t chregister; static periph_oninv_t choninvalidate; static periph_dtor_t chcleanup; static periph_start_t chstart; static void chasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void chdone(struct cam_periph *periph, union ccb *done_ccb); static int cherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int chmove(struct cam_periph *periph, struct changer_move *cm); static int chexchange(struct cam_periph *periph, struct changer_exchange *ce); static int chposition(struct cam_periph *periph, struct changer_position *cp); static int chgetelemstatus(struct cam_periph *periph, int scsi_version, u_long cmd, struct changer_element_status_request *csr); static int chsetvoltag(struct cam_periph *periph, struct changer_set_voltag_request *csvr); static int chielem(struct cam_periph *periph, unsigned int timeout); static int chgetparams(struct cam_periph *periph); static int chscsiversion(struct cam_periph *periph); static struct periph_driver chdriver = { chinit, "ch", TAILQ_HEAD_INITIALIZER(chdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(ch, chdriver); static struct cdevsw ch_cdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_open = chopen, .d_close = chclose, .d_ioctl = chioctl, .d_name = "ch", }; static MALLOC_DEFINE(M_SCSICH, "scsi_ch", "scsi_ch buffers"); static void chinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, chasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ch: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void chdevgonecb(void *arg) { struct ch_softc *softc; struct cam_periph *periph; struct mtx *mtx; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct ch_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for the device node, it is gone now. */ cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void choninvalidate(struct cam_periph *periph) { struct ch_softc *softc; softc = (struct ch_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, chasync, periph, periph->path); softc->flags |= CH_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, chdevgonecb, periph); } static void chcleanup(struct cam_periph *periph) { struct ch_softc *softc; softc = (struct ch_softc *)periph->softc; devstat_remove_entry(softc->device_stats); free(softc, M_DEVBUF); } static void chasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch(code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data)!= T_CHANGER) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(chregister, choninvalidate, chcleanup, chstart, "ch", CAM_PERIPH_BIO, path, chasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("chasync: Unable to probe new device " "due to status 0x%x\n", status); break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status chregister(struct cam_periph *periph, void *arg) { struct ch_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; int error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("chregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct ch_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); if (softc == NULL) { printf("chregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); softc->state = CH_STATE_PROBE; periph->softc = softc; softc->quirks = CH_Q_NONE; /* * The DVCID and CURDATA bits were not introduced until the SMC * spec. If this device claims SCSI-2 or earlier support, then it * very likely does not support these bits. */ if (cgd->inq_data.version <= SCSI_REV_2) softc->quirks |= CH_Q_NO_DVCID; bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* * Changers don't have a blocksize, and obviously don't support * tagged queueing. */ cam_periph_unlock(periph); softc->device_stats = devstat_new_entry("ch", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | DEVSTAT_NO_ORDERED_TAGS, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_OTHER); /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* Register the device */ make_dev_args_init(&args); args.mda_devsw = &ch_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, periph->unit_number); cam_periph_lock(periph); if (error != 0) { cam_periph_release_locked(periph); return (CAM_REQ_CMP_ERR); } /* * Add an async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_LOST_DEVICE, chasync, periph, periph->path); /* * Lock this periph until we are setup. * This first call can't block */ (void)cam_periph_hold(periph, PRIBIO); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int chopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct ch_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); softc = (struct ch_softc *)periph->softc; cam_periph_lock(periph); if (softc->flags & CH_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } /* * Load information about this changer device into the softc. */ if ((error = chgetparams(periph)) != 0) { cam_periph_unhold(periph); cam_periph_release_locked(periph); cam_periph_unlock(periph); return(error); } cam_periph_unhold(periph); softc->open_count++; cam_periph_unlock(periph); return(error); } static int chclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct ch_softc *softc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct ch_softc *)periph->softc; softc->open_count--; cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return(0); } static void chstart(struct cam_periph *periph, union ccb *start_ccb) { struct ch_softc *softc; softc = (struct ch_softc *)periph->softc; switch (softc->state) { case CH_STATE_NORMAL: { xpt_release_ccb(start_ccb); break; } case CH_STATE_PROBE: { int mode_buffer_len; void *mode_buffer; /* * Include the block descriptor when calculating the mode * buffer length, */ mode_buffer_len = sizeof(struct scsi_mode_header_6) + sizeof(struct scsi_mode_blk_desc) + sizeof(struct page_element_address_assignment); mode_buffer = malloc(mode_buffer_len, M_SCSICH, M_NOWAIT); if (mode_buffer == NULL) { printf("chstart: couldn't malloc mode sense data\n"); break; } bzero(mode_buffer, mode_buffer_len); /* * Get the element address assignment page. */ scsi_mode_sense(&start_ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ (softc->quirks & CH_Q_NO_DBD) ? FALSE : TRUE, /* pc */ SMS_PAGE_CTRL_CURRENT, /* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE, /* param_buf */ (u_int8_t *)mode_buffer, /* param_len */ mode_buffer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MODE_SENSE); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CH_CCB_PROBE; xpt_action(start_ccb); break; } } } static void chdone(struct cam_periph *periph, union ccb *done_ccb) { struct ch_softc *softc; struct ccb_scsiio *csio; softc = (struct ch_softc *)periph->softc; csio = &done_ccb->csio; switch(done_ccb->ccb_h.ccb_state) { case CH_CCB_PROBE: { struct scsi_mode_header_6 *mode_header; struct page_element_address_assignment *ea; char announce_buf[80]; mode_header = (struct scsi_mode_header_6 *)csio->data_ptr; ea = (struct page_element_address_assignment *) find_mode_page_6(mode_header); if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP){ softc->sc_firsts[CHET_MT] = scsi_2btoul(ea->mtea); softc->sc_counts[CHET_MT] = scsi_2btoul(ea->nmte); softc->sc_firsts[CHET_ST] = scsi_2btoul(ea->fsea); softc->sc_counts[CHET_ST] = scsi_2btoul(ea->nse); softc->sc_firsts[CHET_IE] = scsi_2btoul(ea->fieea); softc->sc_counts[CHET_IE] = scsi_2btoul(ea->niee); softc->sc_firsts[CHET_DT] = scsi_2btoul(ea->fdtea); softc->sc_counts[CHET_DT] = scsi_2btoul(ea->ndte); softc->sc_picker = softc->sc_firsts[CHET_MT]; #define PLURAL(c) (c) == 1 ? "" : "s" snprintf(announce_buf, sizeof(announce_buf), "%d slot%s, %d drive%s, " "%d picker%s, %d portal%s", softc->sc_counts[CHET_ST], PLURAL(softc->sc_counts[CHET_ST]), softc->sc_counts[CHET_DT], PLURAL(softc->sc_counts[CHET_DT]), softc->sc_counts[CHET_MT], PLURAL(softc->sc_counts[CHET_MT]), softc->sc_counts[CHET_IE], PLURAL(softc->sc_counts[CHET_IE])); #undef PLURAL if (announce_buf[0] != '\0') { xpt_announce_periph(periph, announce_buf); xpt_announce_quirks(periph, softc->quirks, CH_Q_BIT_STRING); } } else { int error; error = cherror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ return; } else if (error != 0) { struct scsi_mode_sense_6 *sms; int frozen, retry_scheduled; sms = (struct scsi_mode_sense_6 *) done_ccb->csio.cdb_io.cdb_bytes; frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; /* * Check to see if block descriptors were * disabled. Some devices don't like that. * We're taking advantage of the fact that * the first few bytes of the 6 and 10 byte * mode sense commands are the same. If * block descriptors were disabled, enable * them and re-send the command. */ if ((sms->byte2 & SMS_DBD) != 0 && (periph->flags & CAM_PERIPH_INVALID) == 0) { sms->byte2 &= ~SMS_DBD; xpt_action(done_ccb); softc->quirks |= CH_Q_NO_DBD; retry_scheduled = 1; } else retry_scheduled = 0; /* Don't wedge this device's queue */ if (frozen) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (retry_scheduled) return; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) scsi_sense_print(&done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, failed " "to attach to device\n"); cam_periph_invalidate(periph); } } softc->state = CH_STATE_NORMAL; free(mode_header, M_SCSICH); /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); cam_periph_unhold(periph); return; } default: break; } xpt_release_ccb(done_ccb); } static int cherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct ch_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct ch_softc *)periph->softc; return (cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static int chioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct ch_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering chioctl\n")); softc = (struct ch_softc *)periph->softc; error = 0; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("trying to do ioctl %#lx\n", cmd)); /* * If this command can change the device's state, we must * have the device open for writing. */ switch (cmd) { case CHIOGPICKER: case CHIOGPARAMS: case OCHIOGSTATUS: case CHIOGSTATUS: break; default: if ((flag & FWRITE) == 0) { cam_periph_unlock(periph); return (EBADF); } } switch (cmd) { case CHIOMOVE: error = chmove(periph, (struct changer_move *)addr); break; case CHIOEXCHANGE: error = chexchange(periph, (struct changer_exchange *)addr); break; case CHIOPOSITION: error = chposition(periph, (struct changer_position *)addr); break; case CHIOGPICKER: *(int *)addr = softc->sc_picker - softc->sc_firsts[CHET_MT]; break; case CHIOSPICKER: { int new_picker = *(int *)addr; if (new_picker > (softc->sc_counts[CHET_MT] - 1)) { error = EINVAL; break; } softc->sc_picker = softc->sc_firsts[CHET_MT] + new_picker; break; } case CHIOGPARAMS: { struct changer_params *cp = (struct changer_params *)addr; cp->cp_npickers = softc->sc_counts[CHET_MT]; cp->cp_nslots = softc->sc_counts[CHET_ST]; cp->cp_nportals = softc->sc_counts[CHET_IE]; cp->cp_ndrives = softc->sc_counts[CHET_DT]; break; } case CHIOIELEM: error = chielem(periph, *(unsigned int *)addr); break; case OCHIOGSTATUS: { error = chgetelemstatus(periph, SCSI_REV_2, cmd, (struct changer_element_status_request *)addr); break; } case CHIOGSTATUS: { int scsi_version; scsi_version = chscsiversion(periph); if (scsi_version >= SCSI_REV_0) { error = chgetelemstatus(periph, scsi_version, cmd, (struct changer_element_status_request *)addr); } else { /* unable to determine the SCSI version */ cam_periph_unlock(periph); return (ENXIO); } break; } case CHIOSETVOLTAG: { error = chsetvoltag(periph, (struct changer_set_voltag_request *) addr); break; } /* Implement prevent/allow? */ default: error = cam_periph_ioctl(periph, cmd, addr, cherror); break; } cam_periph_unlock(periph); return (error); } static int chmove(struct cam_periph *periph, struct changer_move *cm) { struct ch_softc *softc; u_int16_t fromelem, toelem; union ccb *ccb; int error; error = 0; softc = (struct ch_softc *)periph->softc; /* * Check arguments. */ if ((cm->cm_fromtype > CHET_DT) || (cm->cm_totype > CHET_DT)) return (EINVAL); if ((cm->cm_fromunit > (softc->sc_counts[cm->cm_fromtype] - 1)) || (cm->cm_tounit > (softc->sc_counts[cm->cm_totype] - 1))) return (ENODEV); /* * Check the request against the changer's capabilities. */ if ((softc->sc_movemask[cm->cm_fromtype] & (1 << cm->cm_totype)) == 0) return (ENODEV); /* * Calculate the source and destination elements. */ fromelem = softc->sc_firsts[cm->cm_fromtype] + cm->cm_fromunit; toelem = softc->sc_firsts[cm->cm_totype] + cm->cm_tounit; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_move_medium(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* tea */ softc->sc_picker, /* src */ fromelem, /* dst */ toelem, /* invert */ (cm->cm_flags & CM_INVERT) ? TRUE : FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MOVE_MEDIUM); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } static int chexchange(struct cam_periph *periph, struct changer_exchange *ce) { struct ch_softc *softc; u_int16_t src, dst1, dst2; union ccb *ccb; int error; error = 0; softc = (struct ch_softc *)periph->softc; /* * Check arguments. */ if ((ce->ce_srctype > CHET_DT) || (ce->ce_fdsttype > CHET_DT) || (ce->ce_sdsttype > CHET_DT)) return (EINVAL); if ((ce->ce_srcunit > (softc->sc_counts[ce->ce_srctype] - 1)) || (ce->ce_fdstunit > (softc->sc_counts[ce->ce_fdsttype] - 1)) || (ce->ce_sdstunit > (softc->sc_counts[ce->ce_sdsttype] - 1))) return (ENODEV); /* * Check the request against the changer's capabilities. */ if (((softc->sc_exchangemask[ce->ce_srctype] & (1 << ce->ce_fdsttype)) == 0) || ((softc->sc_exchangemask[ce->ce_fdsttype] & (1 << ce->ce_sdsttype)) == 0)) return (ENODEV); /* * Calculate the source and destination elements. */ src = softc->sc_firsts[ce->ce_srctype] + ce->ce_srcunit; dst1 = softc->sc_firsts[ce->ce_fdsttype] + ce->ce_fdstunit; dst2 = softc->sc_firsts[ce->ce_sdsttype] + ce->ce_sdstunit; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_exchange_medium(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* tea */ softc->sc_picker, /* src */ src, /* dst1 */ dst1, /* dst2 */ dst2, /* invert1 */ (ce->ce_flags & CE_INVERT1) ? TRUE : FALSE, /* invert2 */ (ce->ce_flags & CE_INVERT2) ? TRUE : FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_EXCHANGE_MEDIUM); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } static int chposition(struct cam_periph *periph, struct changer_position *cp) { struct ch_softc *softc; u_int16_t dst; union ccb *ccb; int error; error = 0; softc = (struct ch_softc *)periph->softc; /* * Check arguments. */ if (cp->cp_type > CHET_DT) return (EINVAL); if (cp->cp_unit > (softc->sc_counts[cp->cp_type] - 1)) return (ENODEV); /* * Calculate the destination element. */ dst = softc->sc_firsts[cp->cp_type] + cp->cp_unit; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_position_to_element(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* tea */ softc->sc_picker, /* dst */ dst, /* invert */ (cp->cp_flags & CP_INVERT) ? TRUE : FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_POSITION_TO_ELEMENT); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } /* * Copy a volume tag to a volume_tag struct, converting SCSI byte order * to host native byte order in the volume serial number. The volume * label as returned by the changer is transferred to user mode as * nul-terminated string. Volume labels are truncated at the first * space, as suggested by SCSI-2. */ static void copy_voltag(struct changer_voltag *uvoltag, struct volume_tag *voltag) { int i; for (i=0; ivif[i]; if (c && c != ' ') uvoltag->cv_volid[i] = c; else break; } uvoltag->cv_serial = scsi_2btoul(voltag->vsn); } /* * Copy an element status descriptor to a user-mode * changer_element_status structure. */ static void copy_element_status(struct ch_softc *softc, u_int16_t flags, struct read_element_status_descriptor *desc, struct changer_element_status *ces, int scsi_version) { u_int16_t eaddr = scsi_2btoul(desc->eaddr); u_int16_t et; struct volume_tag *pvol_tag = NULL, *avol_tag = NULL; struct read_element_status_device_id *devid = NULL; ces->ces_int_addr = eaddr; /* set up logical address in element status */ for (et = CHET_MT; et <= CHET_DT; et++) { if ((softc->sc_firsts[et] <= eaddr) && ((softc->sc_firsts[et] + softc->sc_counts[et]) > eaddr)) { ces->ces_addr = eaddr - softc->sc_firsts[et]; ces->ces_type = et; break; } } ces->ces_flags = desc->flags1; ces->ces_sensecode = desc->sense_code; ces->ces_sensequal = desc->sense_qual; if (desc->flags2 & READ_ELEMENT_STATUS_INVERT) ces->ces_flags |= CES_INVERT; if (desc->flags2 & READ_ELEMENT_STATUS_SVALID) { eaddr = scsi_2btoul(desc->ssea); /* convert source address to logical format */ for (et = CHET_MT; et <= CHET_DT; et++) { if ((softc->sc_firsts[et] <= eaddr) && ((softc->sc_firsts[et] + softc->sc_counts[et]) > eaddr)) { ces->ces_source_addr = eaddr - softc->sc_firsts[et]; ces->ces_source_type = et; ces->ces_flags |= CES_SOURCE_VALID; break; } } if (!(ces->ces_flags & CES_SOURCE_VALID)) printf("ch: warning: could not map element source " "address %ud to a valid element type\n", eaddr); } /* * pvoltag and avoltag are common between SCSI-2 and later versions */ if (flags & READ_ELEMENT_STATUS_PVOLTAG) pvol_tag = &desc->voltag_devid.pvoltag; if (flags & READ_ELEMENT_STATUS_AVOLTAG) avol_tag = (flags & READ_ELEMENT_STATUS_PVOLTAG) ? &desc->voltag_devid.voltag[1] :&desc->voltag_devid.pvoltag; /* * For SCSI-3 and later, element status can carry designator and * other information. */ if (scsi_version >= SCSI_REV_SPC) { if ((flags & READ_ELEMENT_STATUS_PVOLTAG) ^ (flags & READ_ELEMENT_STATUS_AVOLTAG)) devid = &desc->voltag_devid.pvol_and_devid.devid; else if (!(flags & READ_ELEMENT_STATUS_PVOLTAG) && !(flags & READ_ELEMENT_STATUS_AVOLTAG)) devid = &desc->voltag_devid.devid; else /* Have both PVOLTAG and AVOLTAG */ devid = &desc->voltag_devid.vol_tags_and_devid.devid; } if (pvol_tag) copy_voltag(&(ces->ces_pvoltag), pvol_tag); if (avol_tag) copy_voltag(&(ces->ces_pvoltag), avol_tag); if (devid != NULL) { if (devid->designator_length > 0) { bcopy((void *)devid->designator, (void *)ces->ces_designator, devid->designator_length); ces->ces_designator_length = devid->designator_length; /* * Make sure we are always NUL terminated. The * This won't matter for the binary code set, * since the user will only pay attention to the * length field. */ ces->ces_designator[devid->designator_length]= '\0'; } if (devid->piv_assoc_designator_type & READ_ELEMENT_STATUS_PIV_SET) { ces->ces_flags |= CES_PIV; ces->ces_protocol_id = READ_ELEMENT_STATUS_PROTOCOL_ID( devid->prot_code_set); } ces->ces_code_set = READ_ELEMENT_STATUS_CODE_SET(devid->prot_code_set); ces->ces_assoc = READ_ELEMENT_STATUS_ASSOCIATION( devid->piv_assoc_designator_type); ces->ces_designator_type = READ_ELEMENT_STATUS_DESIGNATOR_TYPE( devid->piv_assoc_designator_type); } else if (scsi_version > SCSI_REV_2) { /* SCSI-SPC and No devid, no designator */ ces->ces_designator_length = 0; ces->ces_designator[0] = '\0'; ces->ces_protocol_id = CES_PROTOCOL_ID_FCP_4; } if (scsi_version <= SCSI_REV_2) { if (desc->dt_or_obsolete.scsi_2.dt_scsi_flags & READ_ELEMENT_STATUS_DT_IDVALID) { ces->ces_flags |= CES_SCSIID_VALID; ces->ces_scsi_id = desc->dt_or_obsolete.scsi_2.dt_scsi_addr; } if (desc->dt_or_obsolete.scsi_2.dt_scsi_addr & READ_ELEMENT_STATUS_DT_LUVALID) { ces->ces_flags |= CES_LUN_VALID; ces->ces_scsi_lun = desc->dt_or_obsolete.scsi_2.dt_scsi_flags & READ_ELEMENT_STATUS_DT_LUNMASK; } } } static int chgetelemstatus(struct cam_periph *periph, int scsi_version, u_long cmd, struct changer_element_status_request *cesr) { struct read_element_status_header *st_hdr; struct read_element_status_page_header *pg_hdr; struct read_element_status_descriptor *desc; caddr_t data = NULL; size_t size, desclen; int avail, i, error = 0; int curdata, dvcid, sense_flags; int try_no_dvcid = 0; struct changer_element_status *user_data = NULL; struct ch_softc *softc; union ccb *ccb; int chet = cesr->cesr_element_type; int want_voltags = (cesr->cesr_flags & CESR_VOLTAGS) ? 1 : 0; softc = (struct ch_softc *)periph->softc; /* perform argument checking */ /* * Perform a range check on the cesr_element_{base,count} * request argument fields. */ if ((softc->sc_counts[chet] - cesr->cesr_element_base) <= 0 || (cesr->cesr_element_base + cesr->cesr_element_count) > softc->sc_counts[chet]) return (EINVAL); /* * Request one descriptor for the given element type. This * is used to determine the size of the descriptor so that * we can allocate enough storage for all of them. We assume * that the first one can fit into 1k. */ cam_periph_unlock(periph); data = (caddr_t)malloc(1024, M_DEVBUF, M_WAITOK); cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); sense_flags = SF_RETRY_UA; if (softc->quirks & CH_Q_NO_DVCID) { dvcid = 0; curdata = 0; } else { dvcid = 1; curdata = 1; /* * Don't print anything for an Illegal Request, because * these flags can cause some changers to complain. We'll * retry without them if we get an error. */ sense_flags |= SF_QUIET_IR; } retry_einval: scsi_read_element_status(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* voltag */ want_voltags, /* sea */ softc->sc_firsts[chet], /* curdata */ curdata, /* dvcid */ dvcid, /* count */ 1, /* data_ptr */ data, /* dxfer_len */ 1024, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_READ_ELEMENT_STATUS); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ sense_flags, softc->device_stats); /* * An Illegal Request sense key (only used if there is no asc/ascq) * or 0x24,0x00 for an ASC/ASCQ both map to EINVAL. If dvcid or * curdata are set (we set both or neither), try turning them off * and see if the command is successful. */ if ((error == EINVAL) && (dvcid || curdata)) { dvcid = 0; curdata = 0; error = 0; /* At this point we want to report any Illegal Request */ sense_flags &= ~SF_QUIET_IR; try_no_dvcid = 1; goto retry_einval; } /* * In this case, we tried a read element status with dvcid and * curdata set, and it failed. We retried without those bits, and * it succeeded. Suggest to the user that he set a quirk, so we * don't go through the retry process the first time in the future. * This should only happen on changers that claim SCSI-3 or higher, * but don't support these bits. */ if ((try_no_dvcid != 0) && (error == 0)) softc->quirks |= CH_Q_NO_DVCID; if (error) goto done; cam_periph_unlock(periph); st_hdr = (struct read_element_status_header *)data; pg_hdr = (struct read_element_status_page_header *)((uintptr_t)st_hdr + sizeof(struct read_element_status_header)); desclen = scsi_2btoul(pg_hdr->edl); size = sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header) + (desclen * cesr->cesr_element_count); /* * Reallocate storage for descriptors and get them from the * device. */ free(data, M_DEVBUF); data = (caddr_t)malloc(size, M_DEVBUF, M_WAITOK); cam_periph_lock(periph); scsi_read_element_status(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* voltag */ want_voltags, /* sea */ softc->sc_firsts[chet] + cesr->cesr_element_base, /* curdata */ curdata, /* dvcid */ dvcid, /* count */ cesr->cesr_element_count, /* data_ptr */ data, /* dxfer_len */ size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_READ_ELEMENT_STATUS); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); if (error) goto done; cam_periph_unlock(periph); /* * Fill in the user status array. */ st_hdr = (struct read_element_status_header *)data; pg_hdr = (struct read_element_status_page_header *)((uintptr_t)st_hdr + sizeof(struct read_element_status_header)); avail = scsi_2btoul(st_hdr->count); if (avail != cesr->cesr_element_count) { xpt_print(periph->path, "warning, READ ELEMENT STATUS avail != count\n"); } user_data = (struct changer_element_status *) malloc(avail * sizeof(struct changer_element_status), M_DEVBUF, M_WAITOK | M_ZERO); desc = (struct read_element_status_descriptor *)((uintptr_t)data + sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header)); /* * Set up the individual element status structures */ for (i = 0; i < avail; ++i) { struct changer_element_status *ces; /* * In the changer_element_status structure, fields from * the beginning to the field of ces_scsi_lun are common * between SCSI-2 and SCSI-3, while all the rest are new * from SCSI-3. In order to maintain backward compatibility * of the chio command, the ces pointer, below, is computed * such that it lines up with the structure boundary * corresponding to the SCSI version. */ ces = cmd == OCHIOGSTATUS ? (struct changer_element_status *) ((unsigned char *)user_data + i * (offsetof(struct changer_element_status,ces_scsi_lun)+1)): &user_data[i]; copy_element_status(softc, pg_hdr->flags, desc, ces, scsi_version); desc = (struct read_element_status_descriptor *) ((unsigned char *)desc + desclen); } /* Copy element status structures out to userspace. */ if (cmd == OCHIOGSTATUS) error = copyout(user_data, cesr->cesr_element_status, avail* (offsetof(struct changer_element_status, ces_scsi_lun) + 1)); else error = copyout(user_data, cesr->cesr_element_status, avail * sizeof(struct changer_element_status)); cam_periph_lock(periph); done: xpt_release_ccb(ccb); if (data != NULL) free(data, M_DEVBUF); if (user_data != NULL) free(user_data, M_DEVBUF); return (error); } static int chielem(struct cam_periph *periph, unsigned int timeout) { union ccb *ccb; struct ch_softc *softc; int error; if (!timeout) { timeout = CH_TIMEOUT_INITIALIZE_ELEMENT_STATUS; } else { timeout *= 1000; } error = 0; softc = (struct ch_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_initialize_element_status(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } static int chsetvoltag(struct cam_periph *periph, struct changer_set_voltag_request *csvr) { union ccb *ccb; struct ch_softc *softc; u_int16_t ea; u_int8_t sac; struct scsi_send_volume_tag_parameters ssvtp; int error; int i; error = 0; softc = (struct ch_softc *)periph->softc; bzero(&ssvtp, sizeof(ssvtp)); for (i=0; icsvr_type > CHET_DT) return EINVAL; if (csvr->csvr_addr > (softc->sc_counts[csvr->csvr_type] - 1)) return ENODEV; ea = softc->sc_firsts[csvr->csvr_type] + csvr->csvr_addr; if (csvr->csvr_flags & CSVR_ALTERNATE) { switch (csvr->csvr_flags & CSVR_MODE_MASK) { case CSVR_MODE_SET: sac = SEND_VOLUME_TAG_ASSERT_ALTERNATE; break; case CSVR_MODE_REPLACE: sac = SEND_VOLUME_TAG_REPLACE_ALTERNATE; break; case CSVR_MODE_CLEAR: sac = SEND_VOLUME_TAG_UNDEFINED_ALTERNATE; break; default: error = EINVAL; goto out; } } else { switch (csvr->csvr_flags & CSVR_MODE_MASK) { case CSVR_MODE_SET: sac = SEND_VOLUME_TAG_ASSERT_PRIMARY; break; case CSVR_MODE_REPLACE: sac = SEND_VOLUME_TAG_REPLACE_PRIMARY; break; case CSVR_MODE_CLEAR: sac = SEND_VOLUME_TAG_UNDEFINED_PRIMARY; break; default: error = EINVAL; goto out; } } memcpy(ssvtp.vitf, csvr->csvr_voltag.cv_volid, min(strlen(csvr->csvr_voltag.cv_volid), sizeof(ssvtp.vitf))); scsi_ulto2b(csvr->csvr_voltag.cv_serial, ssvtp.minvsn); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_send_volume_tag(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* element_address */ ea, /* send_action_code */ sac, /* parameters */ &ssvtp, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_SEND_VOLTAG); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); out: return error; } static int chgetparams(struct cam_periph *periph) { union ccb *ccb; struct ch_softc *softc; void *mode_buffer; int mode_buffer_len; struct page_element_address_assignment *ea; struct page_device_capabilities *cap; int error, from, dbd; u_int8_t *moves, *exchanges; error = 0; softc = (struct ch_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); /* * The scsi_mode_sense_data structure is just a convenience * structure that allows us to easily calculate the worst-case * storage size of the mode sense buffer. */ mode_buffer_len = sizeof(struct scsi_mode_sense_data); mode_buffer = malloc(mode_buffer_len, M_SCSICH, M_NOWAIT); if (mode_buffer == NULL) { printf("chgetparams: couldn't malloc mode sense data\n"); xpt_release_ccb(ccb); return(ENOSPC); } bzero(mode_buffer, mode_buffer_len); if (softc->quirks & CH_Q_NO_DBD) dbd = FALSE; else dbd = TRUE; /* * Get the element address assignment page. */ scsi_mode_sense(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ dbd, /* pc */ SMS_PAGE_CTRL_CURRENT, /* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE, /* param_buf */ (u_int8_t *)mode_buffer, /* param_len */ mode_buffer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MODE_SENSE); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /* sense_flags */ SF_RETRY_UA|SF_NO_PRINT, softc->device_stats); if (error) { if (dbd) { struct scsi_mode_sense_6 *sms; sms = (struct scsi_mode_sense_6 *) ccb->csio.cdb_io.cdb_bytes; sms->byte2 &= ~SMS_DBD; error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); } else { /* * Since we disabled sense printing above, print * out the sense here since we got an error. */ scsi_sense_print(&ccb->csio); } if (error) { xpt_print(periph->path, "chgetparams: error getting element " "address page\n"); xpt_release_ccb(ccb); free(mode_buffer, M_SCSICH); return(error); } } ea = (struct page_element_address_assignment *) find_mode_page_6((struct scsi_mode_header_6 *)mode_buffer); softc->sc_firsts[CHET_MT] = scsi_2btoul(ea->mtea); softc->sc_counts[CHET_MT] = scsi_2btoul(ea->nmte); softc->sc_firsts[CHET_ST] = scsi_2btoul(ea->fsea); softc->sc_counts[CHET_ST] = scsi_2btoul(ea->nse); softc->sc_firsts[CHET_IE] = scsi_2btoul(ea->fieea); softc->sc_counts[CHET_IE] = scsi_2btoul(ea->niee); softc->sc_firsts[CHET_DT] = scsi_2btoul(ea->fdtea); softc->sc_counts[CHET_DT] = scsi_2btoul(ea->ndte); bzero(mode_buffer, mode_buffer_len); /* * Now get the device capabilities page. */ scsi_mode_sense(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ dbd, /* pc */ SMS_PAGE_CTRL_CURRENT, /* page */ CH_DEVICE_CAP_PAGE, /* param_buf */ (u_int8_t *)mode_buffer, /* param_len */ mode_buffer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MODE_SENSE); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /* sense_flags */ SF_RETRY_UA | SF_NO_PRINT, softc->device_stats); if (error) { if (dbd) { struct scsi_mode_sense_6 *sms; sms = (struct scsi_mode_sense_6 *) ccb->csio.cdb_io.cdb_bytes; sms->byte2 &= ~SMS_DBD; error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); } else { /* * Since we disabled sense printing above, print * out the sense here since we got an error. */ scsi_sense_print(&ccb->csio); } if (error) { xpt_print(periph->path, "chgetparams: error getting device " "capabilities page\n"); xpt_release_ccb(ccb); free(mode_buffer, M_SCSICH); return(error); } } xpt_release_ccb(ccb); cap = (struct page_device_capabilities *) find_mode_page_6((struct scsi_mode_header_6 *)mode_buffer); bzero(softc->sc_movemask, sizeof(softc->sc_movemask)); bzero(softc->sc_exchangemask, sizeof(softc->sc_exchangemask)); moves = cap->move_from; exchanges = cap->exchange_with; for (from = CHET_MT; from <= CHET_MAX; ++from) { softc->sc_movemask[from] = moves[from]; softc->sc_exchangemask[from] = exchanges[from]; } free(mode_buffer, M_SCSICH); return(error); } static int chscsiversion(struct cam_periph *periph) { struct scsi_inquiry_data *inq_data; struct ccb_getdev *cgd; int dev_scsi_version; cam_periph_assert(periph, MA_OWNED); if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) == NULL) return (-1); /* * Get the device information. */ xpt_setup_ccb(&cgd->ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); if (cgd->ccb_h.status != CAM_REQ_CMP) { xpt_free_ccb((union ccb *)cgd); return -1; } inq_data = &cgd->inq_data; dev_scsi_version = inq_data->version; xpt_free_ccb((union ccb *)cgd); return dev_scsi_version; } void scsi_move_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t src, u_int32_t dst, int invert, u_int8_t sense_len, u_int32_t timeout) { struct scsi_move_medium *scsi_cmd; scsi_cmd = (struct scsi_move_medium *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MOVE_MEDIUM; scsi_ulto2b(tea, scsi_cmd->tea); scsi_ulto2b(src, scsi_cmd->src); scsi_ulto2b(dst, scsi_cmd->dst); if (invert) scsi_cmd->invert |= MOVE_MEDIUM_INVERT; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_exchange_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t src, u_int32_t dst1, u_int32_t dst2, int invert1, int invert2, u_int8_t sense_len, u_int32_t timeout) { struct scsi_exchange_medium *scsi_cmd; scsi_cmd = (struct scsi_exchange_medium *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = EXCHANGE_MEDIUM; scsi_ulto2b(tea, scsi_cmd->tea); scsi_ulto2b(src, scsi_cmd->src); scsi_ulto2b(dst1, scsi_cmd->fdst); scsi_ulto2b(dst2, scsi_cmd->sdst); if (invert1) scsi_cmd->invert |= EXCHANGE_MEDIUM_INV1; if (invert2) scsi_cmd->invert |= EXCHANGE_MEDIUM_INV2; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_position_to_element(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t dst, int invert, u_int8_t sense_len, u_int32_t timeout) { struct scsi_position_to_element *scsi_cmd; scsi_cmd = (struct scsi_position_to_element *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = POSITION_TO_ELEMENT; scsi_ulto2b(tea, scsi_cmd->tea); scsi_ulto2b(dst, scsi_cmd->dst); if (invert) scsi_cmd->invert |= POSITION_TO_ELEMENT_INVERT; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_element_status(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int voltag, u_int32_t sea, int curdata, int dvcid, u_int32_t count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_element_status *scsi_cmd; scsi_cmd = (struct scsi_read_element_status *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_ELEMENT_STATUS; scsi_ulto2b(sea, scsi_cmd->sea); scsi_ulto2b(count, scsi_cmd->count); scsi_ulto3b(dxfer_len, scsi_cmd->len); if (dvcid) scsi_cmd->flags |= READ_ELEMENT_STATUS_DVCID; if (curdata) scsi_cmd->flags |= READ_ELEMENT_STATUS_CURDATA; if (voltag) scsi_cmd->byte2 |= READ_ELEMENT_STATUS_VOLTAG; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_initialize_element_status(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_initialize_element_status *scsi_cmd; scsi_cmd = (struct scsi_initialize_element_status *) &csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = INITIALIZE_ELEMENT_STATUS; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /* data_ptr */ NULL, /* dxfer_len */ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_volume_tag(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int16_t element_address, u_int8_t send_action_code, struct scsi_send_volume_tag_parameters *parameters, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_volume_tag *scsi_cmd; scsi_cmd = (struct scsi_send_volume_tag *) &csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_VOLUME_TAG; scsi_ulto2b(element_address, scsi_cmd->ea); scsi_cmd->sac = send_action_code; scsi_ulto2b(sizeof(*parameters), scsi_cmd->pll); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_OUT, tag_action, /* data_ptr */ (u_int8_t *) parameters, sizeof(*parameters), sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_ch.h =================================================================== --- head/sys/cam/scsi/scsi_ch.h (revision 326264) +++ head/sys/cam/scsi/scsi_ch.h (revision 326265) @@ -1,510 +1,512 @@ /* $FreeBSD$ */ /* $NetBSD: scsi_changer.h,v 1.11 1998/02/13 08:28:32 enami Exp $ */ /*- + * SPDX-License-Identifier: BSD-4-Clause + * * Copyright (c) 1996 Jason R. Thorpe * All rights reserved. * * Partially based on an autochanger driver written by Stefan Grefen * and on an autochanger driver written by the Systems Programming Group * at the University of Utah Computer Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgements: * This product includes software developed by Jason R. Thorpe * for And Communications, http://www.and.com/ * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * SCSI changer interface description */ /*- * Partially derived from software written by Stefan Grefen * (grefen@goofy.zdv.uni-mainz.de soon grefen@convex.com) * based on the SCSI System by written Julian Elischer (julian@tfs.com) * for TRW Financial Systems. * * TRW Financial Systems, in accordance with their agreement with Carnegie * Mellon University, makes this software available to CMU to distribute * or use in any manner that they see fit as long as this message is kept with * the software. For this reason TFS also grants any other persons or * organisations permission to use or modify this software. * * TFS supplies this software to be publicly redistributed * on the understanding that TFS is not responsible for the correct * functioning of this software in any circumstances. * * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 */ #ifndef _SCSI_SCSI_CH_H #define _SCSI_SCSI_CH_H 1 #include /* * SCSI command format */ /* * Exchange the medium in the source element with the medium * located at the destination element. */ struct scsi_exchange_medium { u_int8_t opcode; #define EXCHANGE_MEDIUM 0xa6 u_int8_t byte2; u_int8_t tea[2]; /* transport element address */ u_int8_t src[2]; /* source address */ u_int8_t fdst[2]; /* first destination address */ u_int8_t sdst[2]; /* second destination address */ u_int8_t invert; #define EXCHANGE_MEDIUM_INV1 0x01 #define EXCHANGE_MEDIUM_INV2 0x02 u_int8_t control; }; /* * Cause the medium changer to check all elements for medium and any * other status relevant to the element. */ struct scsi_initialize_element_status { u_int8_t opcode; #define INITIALIZE_ELEMENT_STATUS 0x07 u_int8_t byte2; u_int8_t reserved[3]; u_int8_t control; }; /* * Request the changer to move a unit of media from the source element * to the destination element. */ struct scsi_move_medium { u_int8_t opcode; u_int8_t byte2; u_int8_t tea[2]; /* transport element address */ u_int8_t src[2]; /* source element address */ u_int8_t dst[2]; /* destination element address */ u_int8_t reserved[2]; u_int8_t invert; #define MOVE_MEDIUM_INVERT 0x01 u_int8_t control; }; /* * Position the specified transport element (picker) in front of * the destination element specified. */ struct scsi_position_to_element { u_int8_t opcode; u_int8_t byte2; u_int8_t tea[2]; /* transport element address */ u_int8_t dst[2]; /* destination element address */ u_int8_t reserved[2]; u_int8_t invert; #define POSITION_TO_ELEMENT_INVERT 0x01 u_int8_t control; }; /* * Request that the changer report the status of its internal elements. */ struct scsi_read_element_status { u_int8_t opcode; u_int8_t byte2; #define READ_ELEMENT_STATUS_VOLTAG 0x10 /* report volume tag info */ /* ...next 4 bits are an element type code... */ u_int8_t sea[2]; /* starting element address */ u_int8_t count[2]; /* number of elements */ u_int8_t flags; #define READ_ELEMENT_STATUS_DVCID 0x01 /* report device serial number */ #define READ_ELEMENT_STATUS_CURDATA 0x02 /* allow motion during command */ u_int8_t len[3]; /* length of data buffer */ u_int8_t reserved1; u_int8_t control; }; struct scsi_request_volume_element_address { u_int8_t opcode; u_int8_t byte2; #define REQUEST_VOLUME_ELEMENT_ADDRESS_VOLTAG 0x10 /* ...next 4 bits are an element type code... */ u_int8_t eaddr[2]; /* element address */ u_int8_t count[2]; /* number of elements */ u_int8_t reserved0; u_int8_t len[3]; /* length of data buffer */ u_int8_t reserved1; u_int8_t control; }; /* XXX scsi_release */ /* * Changer-specific mode page numbers. */ #define CH_ELEMENT_ADDR_ASSIGN_PAGE 0x1D #define CH_TRANS_GEOM_PARAMS_PAGE 0x1E #define CH_DEVICE_CAP_PAGE 0x1F /* * Data returned by READ ELEMENT STATUS consists of an 8-byte header * followed by one or more read_element_status_pages. */ struct read_element_status_header { u_int8_t fear[2]; /* first element address reported */ u_int8_t count[2]; /* number of elements available */ u_int8_t reserved; u_int8_t nbytes[3]; /* byte count of all pages */ }; struct read_element_status_page_header { u_int8_t type; /* element type code; see type codes below */ u_int8_t flags; #define READ_ELEMENT_STATUS_AVOLTAG 0x40 #define READ_ELEMENT_STATUS_PVOLTAG 0x80 u_int8_t edl[2]; /* element descriptor length */ u_int8_t reserved; u_int8_t nbytes[3]; /* byte count of all descriptors */ }; /* * Format of a volume tag */ struct volume_tag { u_int8_t vif[32]; /* volume identification field */ u_int8_t reserved[2]; u_int8_t vsn[2]; /* volume sequence number */ }; struct read_element_status_device_id { u_int8_t prot_code_set; #define READ_ELEMENT_STATUS_CODE_SET(p) ((p) & 0x0F) #define READ_ELEMENT_STATUS_PROTOCOL_ID(p) ((p) >> 4) u_int8_t piv_assoc_designator_type; #define READ_ELEMENT_STATUS_PIV_SET 0x80 #define READ_ELEMENT_STATUS_ASSOCIATION(p) ((p) >> 4) #define READ_ELEMENT_STATUS_DESIGNATOR_TYPE(p) ((p) & 0x0F) u_int8_t reserved2; u_int8_t designator_length; u_int8_t designator[256]; /* Allocate max length */ }; struct read_element_status_descriptor { u_int8_t eaddr[2]; /* element address */ u_int8_t flags1; #define READ_ELEMENT_STATUS_FULL 0x01 #define READ_ELEMENT_STATUS_IMPEXP 0x02 #define READ_ELEMENT_STATUS_EXCEPT 0x04 #define READ_ELEMENT_STATUS_ACCESS 0x08 #define READ_ELEMENT_STATUS_EXENAB 0x10 #define READ_ELEMENT_STATUS_INENAB 0x20 #define READ_ELEMENT_STATUS_MT_MASK1 0x05 #define READ_ELEMENT_STATUS_ST_MASK1 0x0c #define READ_ELEMENT_STATUS_IE_MASK1 0x3f #define READ_ELEMENT_STATUS_DT_MASK1 0x0c u_int8_t reserved0; u_int8_t sense_code; u_int8_t sense_qual; union { struct { u_int8_t dt_scsi_flags; #define READ_ELEMENT_STATUS_DT_LUNMASK 0x07 #define READ_ELEMENT_STATUS_DT_LUVALID 0x10 #define READ_ELEMENT_STATUS_DT_IDVALID 0x20 #define READ_ELEMENT_STATUS_DT_NOTBUS 0x80 u_int8_t dt_scsi_addr; u_int8_t reserved1; } scsi_2; /* reserved and obsolete (as of SCSI-3) fields */ u_int8_t reserved_or_obsolete[3]; } dt_or_obsolete; u_int8_t flags2; #define READ_ELEMENT_STATUS_INVERT 0x40 #define READ_ELEMENT_STATUS_SVALID 0x80 #define READ_ELEMENT_STATUS_ED 0x80 #define READ_ELEMENT_STATUS_MEDIA_TYPE_MASK 0x07 u_int8_t ssea[2]; /* source storage element address */ union { struct volume_tag pvoltag; struct volume_tag voltag[2]; struct read_element_status_device_id devid; struct { struct volume_tag pvoltag; struct read_element_status_device_id devid; } pvol_and_devid; struct { struct volume_tag voltag[2]; struct read_element_status_device_id devid; } vol_tags_and_devid; } voltag_devid; }; /* XXX add data returned by REQUEST VOLUME ELEMENT ADDRESS */ /* Element type codes */ #define ELEMENT_TYPE_MASK 0x0f /* Note: these aren't bits */ #define ELEMENT_TYPE_ALL 0x00 #define ELEMENT_TYPE_MT 0x01 #define ELEMENT_TYPE_ST 0x02 #define ELEMENT_TYPE_IE 0x03 #define ELEMENT_TYPE_DT 0x04 /* * XXX The following definitions should be common to all SCSI device types. */ #define PGCODE_MASK 0x3f /* valid page number bits in pg_code */ #define PGCODE_PS 0x80 /* indicates page is savable */ /* * Send volume tag information to the changer */ struct scsi_send_volume_tag { u_int8_t opcode; #define SEND_VOLUME_TAG 0xb6 u_int8_t byte2; u_int8_t ea[2]; /* element address */ u_int8_t reserved2; u_int8_t sac; /* send action code */ #define SEND_VOLUME_TAG_ASSERT_PRIMARY 0x08 #define SEND_VOLUME_TAG_ASSERT_ALTERNATE 0x09 #define SEND_VOLUME_TAG_REPLACE_PRIMARY 0x0a #define SEND_VOLUME_TAG_REPLACE_ALTERNATE 0x0b #define SEND_VOLUME_TAG_UNDEFINED_PRIMARY 0x0c #define SEND_VOLUME_TAG_UNDEFINED_ALTERNATE 0x0d u_int8_t reserved4[2]; u_int8_t pll[2]; /* parameter list length */ u_int8_t reserved5; u_int8_t control; }; /* * Parameter format for SEND VOLUME TAG */ struct scsi_send_volume_tag_parameters { u_int8_t vitf[32]; /* volume tag identification template */ u_int8_t reserved1[2]; u_int8_t minvsn[2]; /* minimum volume sequence number */ u_int8_t reserved2[2]; u_int8_t maxvsn[2]; /* maximum volume sequence number */ }; /* * Device capabilities page. * * This page defines characteristics of the element types in the * medium changer device. * * Note in the definitions below, the following abbreviations are * used: * MT Medium transport element (picker) * ST Storage transport element (slot) * IE Import/export element (portal) * DT Data transfer element (tape/disk drive) */ struct page_device_capabilities { u_int8_t pg_code; /* page code (0x1f) */ u_int8_t pg_length; /* page length (0x12) */ /* * The STOR_xx bits indicate that an element of a given * type may provide independent storage for a unit of * media. The top four bits of this value are reserved. */ u_int8_t stor; #define STOR_MT 0x01 #define STOR_ST 0x02 #define STOR_IE 0x04 #define STOR_DT 0x08 u_int8_t reserved0; /* * The MOVE_TO_yy bits indicate the changer supports * moving a unit of medium from an element of a given type to an * element of type yy. This is used to determine if a given * MOVE MEDIUM command is legal. The top four bits of each * of these values are reserved. */ u_int8_t move_from[CHET_MAX + 1]; #define MOVE_TO_MT 0x01 #define MOVE_TO_ST 0x02 #define MOVE_TO_IE 0x04 #define MOVE_TO_DT 0x08 u_int8_t reserved1[4]; /* * Similar to above, but for EXCHANGE MEDIUM. */ u_int8_t exchange_with[CHET_MAX + 1]; #define EXCHANGE_WITH_MT 0x01 #define EXCHANGE_WITH_ST 0x02 #define EXCHANGE_WITH_IE 0x04 #define EXCHANGE_WITH_DT 0x08 }; /* * Medium changer elemement address assignment page. * * Some of these fields can be a little confusing, so an explanation * is in order. * * Each component within a medium changer apparatus is called an * "element". * * The "medium transport element address" is the address of the first * picker (robotic arm). "Number of medium transport elements" tells * us how many pickers exist in the changer. * * The "first storage element address" is the address of the first * slot in the tape or disk magazine. "Number of storage elements" tells * us how many slots exist in the changer. * * The "first import/export element address" is the address of the first * medium portal accessible both by the medium changer and an outside * human operator. This is where the changer might deposit tapes destined * for some vault. The "number of import/export elements" tells us * not many of these portals exist in the changer. NOTE: this number may * be 0. * * The "first data transfer element address" is the address of the first * tape or disk drive in the changer. "Number of data transfer elements" * tells us how many drives exist in the changer. */ struct page_element_address_assignment { u_int8_t pg_code; /* page code (0x1d) */ u_int8_t pg_length; /* page length (0x12) */ /* Medium transport element address */ u_int8_t mtea[2]; /* Number of medium transport elements */ u_int8_t nmte[2]; /* First storage element address */ u_int8_t fsea[2]; /* Number of storage elements */ u_int8_t nse[2]; /* First import/export element address */ u_int8_t fieea[2]; /* Number of import/export elements */ u_int8_t niee[2]; /* First data transfer element address */ u_int8_t fdtea[2]; /* Number of data trafer elements */ u_int8_t ndte[2]; u_int8_t reserved[2]; }; /* * Transport geometry parameters page. * * Defines whether each medium transport element is a member of a set of * elements that share a common robotics subsystem and whether the element * is capable of media rotation. One transport geometry descriptor is * transferred for each medium transport element, beginning with the first * medium transport element (other than the default transport element address * of 0). */ struct page_transport_geometry_parameters { u_int8_t pg_code; /* page code (0x1e) */ u_int8_t pg_length; /* page length; variable */ /* Transport geometry descriptor(s) are here. */ u_int8_t misc; #define CAN_ROTATE 0x01 /* Member number in transport element set. */ u_int8_t member; }; __BEGIN_DECLS void scsi_move_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t src, u_int32_t dst, int invert, u_int8_t sense_len, u_int32_t timeout); void scsi_exchange_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t src, u_int32_t dst1, u_int32_t dst2, int invert1, int invert2, u_int8_t sense_len, u_int32_t timeout); void scsi_position_to_element(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t dst, int invert, u_int8_t sense_len, u_int32_t timeout); void scsi_read_element_status(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int voltag, u_int32_t sea, int curdata, int dvcid, u_int32_t count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_initialize_element_status(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout); void scsi_send_volume_tag(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int16_t element_address, u_int8_t send_action_code, struct scsi_send_volume_tag_parameters *parameters, u_int8_t sense_len, u_int32_t timeout); __END_DECLS #endif /* _SCSI_SCSI_CH_H */ Index: head/sys/cam/scsi/scsi_da.c =================================================================== --- head/sys/cam/scsi/scsi_da.c (revision 326264) +++ head/sys/cam/scsi/scsi_da.c (revision 326265) @@ -1,6069 +1,6071 @@ /*- * Implementation of SCSI Direct Access Peripheral driver for CAM. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #include #ifdef _KERNEL /* * Note that there are probe ordering dependencies here. The order isn't * controlled by this enumeration, but by explicit state transitions in * dastart() and dadone(). Here are some of the dependencies: * * 1. RC should come first, before RC16, unless there is evidence that RC16 * is supported. * 2. BDC needs to come before any of the ATA probes, or the ZONE probe. * 3. The ATA probes should go in this order: * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE */ typedef enum { DA_STATE_PROBE_RC, DA_STATE_PROBE_RC16, DA_STATE_PROBE_LBP, DA_STATE_PROBE_BLK_LIMITS, DA_STATE_PROBE_BDC, DA_STATE_PROBE_ATA, DA_STATE_PROBE_ATA_LOGDIR, DA_STATE_PROBE_ATA_IDDIR, DA_STATE_PROBE_ATA_SUP, DA_STATE_PROBE_ATA_ZONE, DA_STATE_PROBE_ZONE, DA_STATE_NORMAL } da_state; typedef enum { DA_FLAG_PACK_INVALID = 0x000001, DA_FLAG_NEW_PACK = 0x000002, DA_FLAG_PACK_LOCKED = 0x000004, DA_FLAG_PACK_REMOVABLE = 0x000008, DA_FLAG_NEED_OTAG = 0x000020, DA_FLAG_WAS_OTAG = 0x000040, DA_FLAG_RETRY_UA = 0x000080, DA_FLAG_OPEN = 0x000100, DA_FLAG_SCTX_INIT = 0x000200, DA_FLAG_CAN_RC16 = 0x000400, DA_FLAG_PROBED = 0x000800, DA_FLAG_DIRTY = 0x001000, DA_FLAG_ANNOUNCED = 0x002000, DA_FLAG_CAN_ATA_DMA = 0x004000, DA_FLAG_CAN_ATA_LOG = 0x008000, DA_FLAG_CAN_ATA_IDLOG = 0x010000, DA_FLAG_CAN_ATA_SUPCAP = 0x020000, DA_FLAG_CAN_ATA_ZONE = 0x040000 } da_flags; typedef enum { DA_Q_NONE = 0x00, DA_Q_NO_SYNC_CACHE = 0x01, DA_Q_NO_6_BYTE = 0x02, DA_Q_NO_PREVENT = 0x04, DA_Q_4K = 0x08, DA_Q_NO_RC16 = 0x10, DA_Q_NO_UNMAP = 0x20, DA_Q_RETRY_BUSY = 0x40, DA_Q_SMR_DM = 0x80, DA_Q_STRICT_UNMAP = 0x100 } da_quirks; #define DA_Q_BIT_STRING \ "\020" \ "\001NO_SYNC_CACHE" \ "\002NO_6_BYTE" \ "\003NO_PREVENT" \ "\0044K" \ "\005NO_RC16" \ "\006NO_UNMAP" \ "\007RETRY_BUSY" \ "\010SMR_DM" \ "\011STRICT_UNMAP" typedef enum { DA_CCB_PROBE_RC = 0x01, DA_CCB_PROBE_RC16 = 0x02, DA_CCB_PROBE_LBP = 0x03, DA_CCB_PROBE_BLK_LIMITS = 0x04, DA_CCB_PROBE_BDC = 0x05, DA_CCB_PROBE_ATA = 0x06, DA_CCB_BUFFER_IO = 0x07, DA_CCB_DUMP = 0x0A, DA_CCB_DELETE = 0x0B, DA_CCB_TUR = 0x0C, DA_CCB_PROBE_ZONE = 0x0D, DA_CCB_PROBE_ATA_LOGDIR = 0x0E, DA_CCB_PROBE_ATA_IDDIR = 0x0F, DA_CCB_PROBE_ATA_SUP = 0x10, DA_CCB_PROBE_ATA_ZONE = 0x11, DA_CCB_TYPE_MASK = 0x1F, DA_CCB_RETRY_UA = 0x20 } da_ccb_state; /* * Order here is important for method choice * * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes * using ATA_TRIM than the corresponding UNMAP results for a real world mysql * import taking 5mins. * */ typedef enum { DA_DELETE_NONE, DA_DELETE_DISABLE, DA_DELETE_ATA_TRIM, DA_DELETE_UNMAP, DA_DELETE_WS16, DA_DELETE_WS10, DA_DELETE_ZERO, DA_DELETE_MIN = DA_DELETE_ATA_TRIM, DA_DELETE_MAX = DA_DELETE_ZERO } da_delete_methods; /* * For SCSI, host managed drives show up as a separate device type. For * ATA, host managed drives also have a different device signature. * XXX KDM figure out the ATA host managed signature. */ typedef enum { DA_ZONE_NONE = 0x00, DA_ZONE_DRIVE_MANAGED = 0x01, DA_ZONE_HOST_AWARE = 0x02, DA_ZONE_HOST_MANAGED = 0x03 } da_zone_mode; /* * We distinguish between these interface cases in addition to the drive type: * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC * o ATA drive behind a SCSI translation layer that does not know about * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this * case, we would need to share the ATA code with the ada(4) driver. * o SCSI drive. */ typedef enum { DA_ZONE_IF_SCSI, DA_ZONE_IF_ATA_PASS, DA_ZONE_IF_ATA_SAT, } da_zone_interface; typedef enum { DA_ZONE_FLAG_RZ_SUP = 0x0001, DA_ZONE_FLAG_OPEN_SUP = 0x0002, DA_ZONE_FLAG_CLOSE_SUP = 0x0004, DA_ZONE_FLAG_FINISH_SUP = 0x0008, DA_ZONE_FLAG_RWP_SUP = 0x0010, DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP | DA_ZONE_FLAG_OPEN_SUP | DA_ZONE_FLAG_CLOSE_SUP | DA_ZONE_FLAG_FINISH_SUP | DA_ZONE_FLAG_RWP_SUP), DA_ZONE_FLAG_URSWRZ = 0x0020, DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET | DA_ZONE_FLAG_OPT_NONSEQ_SET | DA_ZONE_FLAG_MAX_SEQ_SET) } da_zone_flags; static struct da_zone_desc { da_zone_flags value; const char *desc; } da_zone_desc_table[] = { {DA_ZONE_FLAG_RZ_SUP, "Report Zones" }, {DA_ZONE_FLAG_OPEN_SUP, "Open" }, {DA_ZONE_FLAG_CLOSE_SUP, "Close" }, {DA_ZONE_FLAG_FINISH_SUP, "Finish" }, {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, }; typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, struct bio *bp); static da_delete_func_t da_delete_trim; static da_delete_func_t da_delete_unmap; static da_delete_func_t da_delete_ws; static const void * da_delete_functions[] = { NULL, NULL, da_delete_trim, da_delete_unmap, da_delete_ws, da_delete_ws, da_delete_ws }; static const char *da_delete_method_names[] = { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; static const char *da_delete_method_desc[] = { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", "WRITE SAME(10) with UNMAP", "ZERO" }; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct disk_params { u_int8_t heads; u_int32_t cylinders; u_int8_t secs_per_track; u_int32_t secsize; /* Number of bytes/sector */ u_int64_t sectors; /* total number sectors */ u_int stripesize; u_int stripeoffset; }; #define UNMAP_RANGE_MAX 0xffffffff #define UNMAP_HEAD_SIZE 8 #define UNMAP_RANGE_SIZE 16 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ UNMAP_HEAD_SIZE) #define WS10_MAX_BLKS 0xffff #define WS16_MAX_BLKS 0xffffffff #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) #define DA_WORK_TUR (1 << 16) struct da_softc { struct cam_iosched_softc *cam_iosched; struct bio_queue_head delete_run_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; int refcount; /* Active xpt_action() calls */ da_state state; da_flags flags; da_quirks quirks; int minimum_cmd_size; int error_inject; int trim_max_ranges; int delete_available; /* Delete methods possibly available */ da_zone_mode zone_mode; da_zone_interface zone_interface; da_zone_flags zone_flags; struct ata_gp_log_dir ata_logdir; int valid_logdir_len; struct ata_identify_log_pages ata_iddir; int valid_iddir_len; uint64_t optimal_seq_zones; uint64_t optimal_nonseq_zones; uint64_t max_seq_zones; u_int maxio; uint32_t unmap_max_ranges; uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ uint32_t unmap_gran; uint32_t unmap_gran_align; uint64_t ws_max_blks; da_delete_methods delete_method_pref; da_delete_methods delete_method; da_delete_func_t *delete_func; int unmappedio; int rotating; struct disk_params params; struct disk *disk; union ccb saved_ccb; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; uint64_t wwpn; uint8_t unmap_buf[UNMAP_BUF_SIZE]; struct scsi_read_capacity_data_long rcaplong; struct callout mediapoll_c; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int errors; u_int timeouts; u_int invalidations; #endif #define DA_ANNOUNCETMP_SZ 80 char announce_temp[DA_ANNOUNCETMP_SZ]; #define DA_ANNOUNCE_SZ 400 char announcebuf[DA_ANNOUNCE_SZ]; }; #define dadeleteflag(softc, delete_method, enable) \ if (enable) { \ softc->delete_available |= (1 << delete_method); \ } else { \ softc->delete_available &= ~(1 << delete_method); \ } struct da_quirk_entry { struct scsi_inquiry_pattern inq_pat; da_quirks quirks; }; static const char quantum[] = "QUANTUM"; static const char microp[] = "MICROP"; static struct da_quirk_entry da_quirk_table[] = { /* SPI, FC devices */ { /* * Fujitsu M2513A MO drives. * Tested devices: M2513A2 firmware versions 1200 & 1300. * (dip switch selects whether T_DIRECT or T_OPTICAL device) * Reported by: W.Scholten */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* See above. */ {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This particular Fujitsu drive doesn't like the * synchronize cache command. * Reported by: Tom Jackson */ {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Matthew Jacob * in NetBSD PR kern/6027, August 24, 1998. */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Hellmuth Michaelis (hm@kts.org) * (PR 8882). */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't work correctly with 6 byte reads/writes. * Returns illegal request, and points to byte 9 of the * 6-byte CDB. * Reported by: Adam McDougall */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* See above. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The CISS RAID controllers do not support SYNC_CACHE */ {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The STEC SSDs sometimes hang on UNMAP. */ {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, /*quirks*/ DA_Q_NO_UNMAP }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. * Also VMware returns odd errors on misaligned UNMAPs. */ {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP }, /* USB mass storage devices supported by umass(4) */ { /* * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player * PR: kern/51675 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Power Quotient Int. (PQI) USB flash key * PR: kern/53067 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative Nomad MUVO mp3 player (USB) * PR: kern/53094 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Jungsoft NEXDISK USB flash key * PR: kern/54737 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * FreeDik USB Mini Data Drive * PR: kern/54786 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sigmatel USB Flash MP3 Player * PR: kern/57046 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Neuros USB Digital Audio Computer * PR: kern/63645 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SEAGRAND NP-900 MP3 Player * PR: kern/64563 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * iRiver iFP MP3 player (with UMS Firmware) * PR: kern/54881, i386/63941, kern/66124 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 * PR: kern/70158 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ZICPlay USB MP3 Player with FM * PR: kern/75057 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TEAC USB floppy mechanisms */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler II+ USB Pen-Drive. * Reported by: Pawel Jakub Dawidek */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * USB DISK Pro PMAP * Reported by: jhs * PR: usb/96381 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Motorola E398 Mobile Phone (TransFlash memory card). * Reported by: Wojciech A. Koszek * PR: usb/89889 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Qware BeatZkey! Pro * PR: usb/79164 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Time DPA20B 1GB MP3 Player * PR: usb/81846 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung USB key 128Mb * PR: usb/90081 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler 2.0 USB Flash memory. * PR: usb/89196 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative MUVO Slim mp3 player (USB) * PR: usb/86131 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) * PR: usb/80487 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SanDisk Micro Cruzer 128MB * PR: usb/75970 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TOSHIBA TransMemory USB sticks * PR: kern/94660 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * PNY USB 3.0 Flash Drives */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 }, { /* * PNY USB Flash keys * PR: usb/75578, usb/72344, usb/65436 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Genesys GL3224 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16 }, { /* * Genesys 6-in-1 Card Reader * PR: usb/94647 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Rekam Digital CAMERA * PR: usb/98713 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver H10 MP3 player * PR: usb/102547 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver U10 MP3 player * PR: usb/92306 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * X-Micro Flash Disk * PR: usb/96901 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * EasyMP3 EM732X USB 2.0 Flash MP3 Player * PR: usb/96546 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Denver MP3 player * PR: usb/107101 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Philips USB Key Audio KEY013 * PR: usb/68412 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { /* * JNC MP3 Player * PR: usb/94439 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SAMSUNG MP0402H * PR: usb/108427 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * I/O Magic USB flash - Giga Bank * PR: usb/108810 */ {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * JoyFly 128mb USB Flash Drive * PR: 96133 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ChipsBnk usb stick * PR: 103702 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A * PR: 129858 */ {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung YP-U3 mp3-player * PR: 125398 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sony Cyber-Shot DSC cameras * PR: usb/137035 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", "1.00"}, /*quirks*/ DA_Q_NO_PREVENT }, { /* At least several Transcent USB sticks lie on RC16. */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, { /* * I-O Data USB Flash Disk * PR: usb/211716 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, /* ATA/SATA devices over SAS/USB/... */ { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, /*quirks*/DA_Q_4K }, { /* Micron Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Olympus FE-210 camera */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LG UP3S MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Laser MP3-2GA13 MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LaCie external 250GB Hard drive des by Porsche * Submitted by: Ben Stuyts * PR: 121474 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, /* SATA SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel S3610 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 750 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 840 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 845 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 850 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * Same as for SAMSUNG MZ7* but enable the quirks for SSD * starting with MZ7* too */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, /*quirks*/DA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, /*quirks*/DA_Q_4K }, { /* * Hama Innostor USB-Stick */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, /*quirks*/DA_Q_NO_RC16 }, { /* * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) * Drive Managed SATA hard drive. This drive doesn't report * in firmware that it is a drive managed SMR drive. */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" }, /*quirks*/DA_Q_SMR_DM }, { /* * MX-ES USB Drive by Mach Xtreme */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, /*quirks*/DA_Q_NO_RC16 }, }; static disk_strategy_t dastrategy; static dumper_t dadump; static periph_init_t dainit; static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void dasysctlinit(void *context, int pending); static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); static int dazonemodesysctl(SYSCTL_HANDLER_ARGS); static int dazonesupsysctl(SYSCTL_HANDLER_ARGS); static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method); static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method); static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method); static void daprobedone(struct cam_periph *periph, union ccb *ccb); static periph_ctor_t daregister; static periph_dtor_t dacleanup; static periph_start_t dastart; static periph_oninv_t daoninvalidate; static void dazonedone(struct cam_periph *periph, union ccb *ccb); static void dadone(struct cam_periph *periph, union ccb *done_ccb); static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void daprevent(struct cam_periph *periph, int action); static void dareprobe(struct cam_periph *periph); static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_size); static timeout_t dasendorderedtag; static void dashutdown(void *arg, int howto); static timeout_t damediapoll; #ifndef DA_DEFAULT_POLL_PERIOD #define DA_DEFAULT_POLL_PERIOD 3 #endif #ifndef DA_DEFAULT_TIMEOUT #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ #endif #ifndef DA_DEFAULT_SOFTTIMEOUT #define DA_DEFAULT_SOFTTIMEOUT 0 #endif #ifndef DA_DEFAULT_RETRY #define DA_DEFAULT_RETRY 4 #endif #ifndef DA_DEFAULT_SEND_ORDERED #define DA_DEFAULT_SEND_ORDERED 1 #endif static int da_poll_period = DA_DEFAULT_POLL_PERIOD; static int da_retry_count = DA_DEFAULT_RETRY; static int da_default_timeout = DA_DEFAULT_TIMEOUT; static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, &da_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, &da_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &da_send_ordered, 0, "Send Ordered Tags"); SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I", "Soft I/O timeout (ms)"); TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); /* * DA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef DA_ORDEREDTAG_INTERVAL #define DA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver dadriver = { dainit, "da", TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(da, dadriver); static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); static int daopen(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) { return (ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daopen\n")); softc = (struct da_softc *)periph->softc; dareprobe(periph); /* Wait for the disk size update. */ error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, "dareprobe", 0); if (error != 0) xpt_print(periph->path, "unable to retrieve capacity data\n"); if (periph->flags & CAM_PERIPH_INVALID) error = ENXIO; if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_PREVENT); if (error == 0) { softc->flags &= ~DA_FLAG_PACK_INVALID; softc->flags |= DA_FLAG_OPEN; } cam_periph_unhold(periph); cam_periph_unlock(periph); if (error != 0) cam_periph_release(periph); return (error); } static int daclose(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daclose\n")); if (cam_periph_hold(periph, PRIBIO) == 0) { /* Flush disk cache. */ if ((softc->flags & DA_FLAG_DIRTY) != 0 && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/1, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 5 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, softc->disk->d_devstat); softc->flags &= ~DA_FLAG_DIRTY; xpt_release_ccb(ccb); } /* Allow medium removal. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_ALLOW); cam_periph_unhold(periph); } /* * If we've got removeable media, mark the blocksize as * unavailable, since it could change when new media is * inserted. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; softc->flags &= ~DA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void daschedule(struct cam_periph *periph) { struct da_softc *softc = (struct da_softc *)periph->softc; if (softc->state != DA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void dastrategy(struct bio *bp) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); /* * If the device has been made invalid, error out */ if ((softc->flags & DA_FLAG_PACK_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); /* * Zone commands must be ordered, because they can depend on the * effects of previously issued commands, and they may affect * commands after them. */ if (bp->bio_cmd == BIO_ZONE) bp->bio_flags |= BIO_ORDERED; /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ daschedule(periph); cam_periph_unlock(periph); return; } static int dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct da_softc *softc; u_int secsize; struct ccb_scsiio csio; struct disk *dp; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); secsize = softc->params.secsize; if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { cam_periph_unlock(periph); return (ENXIO); } memset(&csio, 0, sizeof(csio)); if (length > 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_read_write(&csio, /*retries*/0, dadone, MSG_ORDERED_Q_TAG, /*read*/SCSI_RW_WRITE, /*byte2*/0, /*minimum_cmd_size*/ softc->minimum_cmd_size, offset / secsize, length / secsize, /*data_ptr*/(u_int8_t *) virtual, /*dxfer_len*/length, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); xpt_polled_action((union ccb *)&csio); error = cam_periph_error((union ccb *)&csio, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) printf("Aborting dump due to I/O error.\n"); cam_periph_unlock(periph); return (error); } /* * Sync the disk cache contents to the physical media. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_synchronize_cache(&csio, /*retries*/0, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0,/* Cover the whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 5 * 1000); xpt_polled_action((union ccb *)&csio); error = cam_periph_error((union ccb *)&csio, 0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL); if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } cam_periph_unlock(periph); return (error); } static int dagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static void dainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("da: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (da_send_ordered) { /* Register our shutdown event handler */ if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("dainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void dadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void daoninvalidate(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, daasync, periph, periph->path); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); /* * Tell GEOM that we've gone away, we'll get a callback when it is * done cleaning up its resources. */ disk_gone(softc->disk); } static void dacleanup(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_DIRECT && SID_TYPE(&cgd->inq_data) != T_RBC && SID_TYPE(&cgd->inq_data) != T_OPTICAL && SID_TYPE(&cgd->inq_data) != T_ZBC_HM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(daregister, daoninvalidate, dacleanup, dastart, "da", CAM_PERIPH_BIO, path, daasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("daasync: Unable to attach to new device " "due to status 0x%x\n", status); return; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct da_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct da_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all UNIT ATTENTIONs except our own, * as they will be handled by daerror(). */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x2A && ascq == 0x09) { xpt_print(ccb->ccb_h.path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); } else if (asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (asc == 0x3F && ascq == 0x03) { xpt_print(ccb->ccb_h.path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); } } cam_periph_async(periph, code, path, arg); break; } case AC_SCSI_AEN: softc = (struct da_softc *)periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* FALLTHROUGH */ case AC_SENT_BDR: case AC_BUS_RESET: { struct ccb_hdr *ccbh; softc = (struct da_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= DA_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= DA_CCB_RETRY_UA; break; } case AC_INQ_CHANGED: softc = (struct da_softc *)periph->softc; softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); break; default: break; } cam_periph_async(periph, code, path, arg); } static void dasysctlinit(void *context, int pending) { struct cam_periph *periph; struct da_softc *softc; char tmpstr[80], tmpstr2[80]; struct ccb_trans_settings cts; periph = (struct cam_periph *)context; /* * periph was held for us when this task was enqueued */ if (periph->flags & CAM_PERIPH_INVALID) { cam_periph_release(periph); return; } softc = (struct da_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= DA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("dasysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } /* * Now register the sysctl handler, so the user can change the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN, softc, 0, dadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, softc, 0, dadeletemaxsysctl, "Q", "Maximum BIO_DELETE size"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", "Minimum CDB size"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, dazonemodesysctl, "A", "Zone Mode"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, softc, 0, dazonesupsysctl, "A", "Zone Support"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, "Optimal Number of Open Sequential Write Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_nonseq_zones", CTLFLAG_RD, &softc->optimal_nonseq_zones, "Optimal Number of Non-Sequentially Written Sequential Write " "Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, "Maximum Number of Open Sequential Write Required Zones"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "error_inject", CTLFLAG_RW, &softc->error_inject, 0, "error_inject leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD, &softc->rotating, 0, "Rotating media"); /* * Add some addressing info. */ memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cam_periph_lock(periph); xpt_action((union ccb *)&cts); cam_periph_unlock(periph); if (cts.ccb_h.status != CAM_REQ_CMP) { cam_periph_release(periph); return; } if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWPN) { softc->wwpn = fc->wwpn; SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "wwpn", CTLFLAG_RD, &softc->wwpn, "World Wide Port Name"); } } #ifdef CAM_IO_STATS /* * Now add some useful stats. * XXX These should live in cam_periph and be common to all periphs */ softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD, &softc->errors, 0, "Transport errors reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD, &softc->invalidations, 0, "Device pack invalidations"); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); cam_periph_release(periph); } static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) { int error; uint64_t value; struct da_softc *softc; softc = (struct da_softc *)arg1; value = softc->disk->d_delmaxsize; error = sysctl_handle_64(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* only accept values smaller than the calculated value */ if (value > dadeletemaxsize(softc, softc->delete_method)) { return (EINVAL); } softc->disk->d_delmaxsize = value; return (0); } static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * Acceptable values here are 6, 10, 12 or 16. */ if (value < 6) value = 6; else if ((value > 6) && (value <= 10)) value = 10; else if ((value > 10) && (value <= 12)) value = 12; else if (value > 12) value = 16; *(int *)arg1 = value; return (0); } static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) { sbintime_t value; int error; value = da_default_softtimeout / SBT_1MS; error = sysctl_handle_int(oidp, (int *)&value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* XXX Should clip this to a reasonable level */ if (value > da_default_timeout * 1000) return (EINVAL); da_default_softtimeout = value * SBT_1MS; return (0); } static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) { softc->delete_method = delete_method; softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); softc->delete_func = da_delete_functions[delete_method]; if (softc->delete_method > DA_DELETE_DISABLE) softc->disk->d_flags |= DISKFLAG_CANDELETE; else softc->disk->d_flags &= ~DISKFLAG_CANDELETE; } static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) { off_t sectors; switch(delete_method) { case DA_DELETE_UNMAP: sectors = (off_t)softc->unmap_max_lba; break; case DA_DELETE_ATA_TRIM: sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; break; case DA_DELETE_WS16: sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); break; case DA_DELETE_ZERO: case DA_DELETE_WS10: sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); break; default: return 0; } return (off_t)softc->params.secsize * omin(sectors, softc->params.sectors); } static void daprobedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; dadeletemethodchoose(softc, DA_DELETE_NONE); if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { char buf[80]; int i, sep; snprintf(buf, sizeof(buf), "Delete methods: <"); sep = 0; for (i = 0; i <= DA_DELETE_MAX; i++) { if ((softc->delete_available & (1 << i)) == 0 && i != softc->delete_method) continue; if (sep) strlcat(buf, ",", sizeof(buf)); strlcat(buf, da_delete_method_names[i], sizeof(buf)); if (i == softc->delete_method) strlcat(buf, "(*)", sizeof(buf)); sep = 1; } strlcat(buf, ">", sizeof(buf)); printf("%s%d: %s\n", periph->periph_name, periph->unit_number, buf); } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(ccb); softc->state = DA_STATE_NORMAL; softc->flags |= DA_FLAG_PROBED; daschedule(periph); wakeup(&softc->disk->d_mediasize); if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { softc->flags |= DA_FLAG_ANNOUNCED; cam_periph_unhold(periph); } else cam_periph_release_locked(periph); } static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) { int i, methods; /* If available, prefer the method requested by user. */ i = softc->delete_method_pref; methods = softc->delete_available | (1 << DA_DELETE_DISABLE); if (methods & (1 << i)) { dadeletemethodset(softc, i); return; } /* Use the pre-defined order to choose the best performing delete. */ for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { if (i == DA_DELETE_ZERO) continue; if (softc->delete_available & (1 << i)) { dadeletemethodset(softc, i); return; } } /* Fallback to default. */ dadeletemethodset(softc, default_method); } static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct da_softc *softc; int i, error, methods, value; softc = (struct da_softc *)arg1; value = softc->delete_method; if (value < 0 || value > DA_DELETE_MAX) p = "UNKNOWN"; else p = da_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); methods = softc->delete_available | (1 << DA_DELETE_DISABLE); for (i = 0; i <= DA_DELETE_MAX; i++) { if (strcmp(buf, da_delete_method_names[i]) == 0) break; } if (i > DA_DELETE_MAX) return (EINVAL); softc->delete_method_pref = i; dadeletemethodchoose(softc, DA_DELETE_NONE); return (0); } static int dazonemodesysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[40]; struct da_softc *softc; int error; softc = (struct da_softc *)arg1; switch (softc->zone_mode) { case DA_ZONE_DRIVE_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); break; case DA_ZONE_HOST_AWARE: snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); break; case DA_ZONE_HOST_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); break; case DA_ZONE_NONE: default: snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); break; } error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); return (error); } static int dazonesupsysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[180]; struct da_softc *softc; struct sbuf sb; int error, first; unsigned int i; softc = (struct da_softc *)arg1; error = 0; first = 1; sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); for (i = 0; i < sizeof(da_zone_desc_table) / sizeof(da_zone_desc_table[0]); i++) { if (softc->zone_flags & da_zone_desc_table[i].value) { if (first == 0) sbuf_printf(&sb, ", "); else first = 0; sbuf_cat(&sb, da_zone_desc_table[i].desc); } } if (first == 1) sbuf_printf(&sb, "None"); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); return (error); } static cam_status daregister(struct cam_periph *periph, void *arg) { struct da_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("daregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("daregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); free(softc, M_DEVBUF); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); softc->state = DA_STATE_PROBE_RC; bioq_init(&softc->delete_run_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= DA_FLAG_PACK_REMOVABLE; softc->unmap_max_ranges = UNMAP_MAX_RANGES; softc->unmap_max_lba = UNMAP_RANGE_MAX; softc->unmap_gran = 0; softc->unmap_gran_align = 0; softc->ws_max_blks = WS16_MAX_BLKS; softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; softc->rotating = 1; periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)da_quirk_table, nitems(da_quirk_table), sizeof(*da_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct da_quirk_entry *)match)->quirks; else softc->quirks = DA_Q_NONE; /* Check if the SIM does not want 6 byte commands */ bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= DA_Q_NO_6_BYTE; if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM) softc->zone_mode = DA_ZONE_HOST_MANAGED; else if (softc->quirks & DA_Q_SMR_DM) softc->zone_mode = DA_ZONE_DRIVE_MANAGED; else softc->zone_mode = DA_ZONE_NONE; if (softc->zone_mode != DA_ZONE_NONE) { if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) softc->zone_interface = DA_ZONE_IF_ATA_SAT; else softc->zone_interface = DA_ZONE_IF_ATA_PASS; } else softc->zone_interface = DA_ZONE_IF_SCSI; } TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); /* * Take an exclusive refcount on the periph while dastart is called * to finish the probe. The reference will be dropped in dadone at * the end of probe. */ (void)cam_periph_hold(periph, PRIBIO); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); cam_periph_unlock(periph); /* * RBC devices don't have to support READ(6), only READ(10). */ if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); /* * 6, 10, 12 and 16 are the currently permissible values. */ if (softc->minimum_cmd_size > 12) softc->minimum_cmd_size = 16; else if (softc->minimum_cmd_size > 10) softc->minimum_cmd_size = 12; else if (softc->minimum_cmd_size > 6) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* Predict whether device may support READ CAPACITY(16). */ if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && (softc->quirks & DA_Q_NO_RC16) == 0) { softc->flags |= DA_FLAG_CAN_RC16; softc->state = DA_STATE_PROBE_RC16; } /* * Register this media as a disk. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = daopen; softc->disk->d_close = daclose; softc->disk->d_strategy = dastrategy; softc->disk->d_dump = dadump; softc->disk->d_getattr = dagetattr; softc->disk->d_gone = dadiskgonecb; softc->disk->d_name = "da"; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; softc->disk->d_maxsize = softc->maxio; softc->disk->d_unit = periph->unit_number; softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { softc->unmappedio = 1; softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; } cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add async callbacks for events of interest. * I don't bother checking if this fails as, * in most cases, the system will function just * fine without them and the only alternative * would be to not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | AC_INQ_CHANGED, daasync, periph, periph->path); /* * Emit an attribute changed notification just in case * physical path information arrived before our async * event handler was registered, but after anyone attaching * to our disk device polled it. */ disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && da_poll_period != 0) callout_reset(&softc->mediapoll_c, da_poll_period * hz, damediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int da_zone_bio_to_scsi(int disk_zone_cmd) { switch (disk_zone_cmd) { case DISK_ZONE_OPEN: return ZBC_OUT_SA_OPEN; case DISK_ZONE_CLOSE: return ZBC_OUT_SA_CLOSE; case DISK_ZONE_FINISH: return ZBC_OUT_SA_FINISH; case DISK_ZONE_RWP: return ZBC_OUT_SA_RWP; } return -1; } static int da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb) { struct da_softc *softc; int error; error = 0; if (bp->bio_cmd != BIO_ZONE) { error = EINVAL; goto bailout; } softc = periph->softc; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: { int zone_flags; int zone_sa; uint64_t lba; zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd); if (zone_sa == -1) { xpt_print(periph->path, "Cannot translate zone " "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd); error = EINVAL; goto bailout; } zone_flags = 0; lba = bp->bio_zone.zone_params.rwp.id; if (bp->bio_zone.zone_params.rwp.flags & DISK_ZONE_RWP_FLAG_ALL) zone_flags |= ZBC_OUT_ALL; if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { scsi_zbc_out(&ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); } else { /* * Note that in this case, even though we can * technically use NCQ, we don't bother for several * reasons: * 1. It hasn't been tested on a SAT layer that * supports it. This is new as of SAT-4. * 2. Even when there is a SAT layer that supports * it, that SAT layer will also probably support * ZBC -> ZAC translation, since they are both * in the SAT-4 spec. * 3. Translation will likely be preferable to ATA * passthrough. LSI / Avago at least single * steps ATA passthrough commands in the HBA, * regardless of protocol, so unless that * changes, there is a performance penalty for * doing ATA passthrough no matter whether * you're using NCQ/FPDMA, DMA or PIO. * 4. It requires a 32-byte CDB, which at least at * this point in CAM requires a CDB pointer, which * would require us to allocate an additional bit * of storage separate from the CCB. */ error = scsi_ata_zac_mgmt_out(&ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*use_ncq*/ 0, /*zm_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (error != 0) { error = EINVAL; xpt_print(periph->path, "scsi_ata_zac_mgmt_out() returned an " "error!"); goto bailout; } } *queue_ccb = 1; break; } case DISK_ZONE_REPORT_ZONES: { uint8_t *rz_ptr; uint32_t num_entries, alloc_size; struct disk_zone_report *rep; rep = &bp->bio_zone.zone_params.report; num_entries = rep->entries_allocated; if (num_entries == 0) { xpt_print(periph->path, "No entries allocated for " "Report Zones request\n"); error = EINVAL; goto bailout; } alloc_size = sizeof(struct scsi_report_zones_hdr) + (sizeof(struct scsi_report_zones_desc) * num_entries); alloc_size = min(alloc_size, softc->disk->d_maxsize); rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO); if (rz_ptr == NULL) { xpt_print(periph->path, "Unable to allocate memory " "for Report Zones request\n"); error = ENOMEM; goto bailout; } if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { scsi_zbc_in(&ccb->csio, /*retries*/ da_retry_count, /*cbcfnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ ZBC_IN_SA_REPORT_ZONES, /*zone_start_lba*/ rep->starting_id, /*zone_options*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); } else { /* * Note that in this case, even though we can * technically use NCQ, we don't bother for several * reasons: * 1. It hasn't been tested on a SAT layer that * supports it. This is new as of SAT-4. * 2. Even when there is a SAT layer that supports * it, that SAT layer will also probably support * ZBC -> ZAC translation, since they are both * in the SAT-4 spec. * 3. Translation will likely be preferable to ATA * passthrough. LSI / Avago at least single * steps ATA passthrough commands in the HBA, * regardless of protocol, so unless that * changes, there is a performance penalty for * doing ATA passthrough no matter whether * you're using NCQ/FPDMA, DMA or PIO. * 4. It requires a 32-byte CDB, which at least at * this point in CAM requires a CDB pointer, which * would require us to allocate an additional bit * of storage separate from the CCB. */ error = scsi_ata_zac_mgmt_in(&ccb->csio, /*retries*/ da_retry_count, /*cbcfnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*use_ncq*/ 0, /*zm_action*/ ATA_ZM_REPORT_ZONES, /*zone_id*/ rep->starting_id, /*zone_flags*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (error != 0) { error = EINVAL; xpt_print(periph->path, "scsi_ata_zac_mgmt_in() returned an " "error!"); goto bailout; } } /* * For BIO_ZONE, this isn't normally needed. However, it * is used by devstat_end_transaction_bio() to determine * how much data was transferred. */ /* * XXX KDM we have a problem. But I'm not sure how to fix * it. devstat uses bio_bcount - bio_resid to calculate * the amount of data transferred. The GEOM disk code * uses bio_length - bio_resid to calculate the amount of * data in bio_completed. We have different structure * sizes above and below the ada(4) driver. So, if we * use the sizes above, the amount transferred won't be * quite accurate for devstat. If we use different sizes * for bio_bcount and bio_length (above and below * respectively), then the residual needs to match one or * the other. Everything is calculated after the bio * leaves the driver, so changing the values around isn't * really an option. For now, just set the count to the * passed in length. This means that the calculations * above (e.g. bio_completed) will be correct, but the * amount of data reported to devstat will be slightly * under or overstated. */ bp->bio_bcount = bp->bio_length; *queue_ccb = 1; break; } case DISK_ZONE_GET_PARAMS: { struct disk_zone_disk_params *params; params = &bp->bio_zone.zone_params.disk_params; bzero(params, sizeof(*params)); switch (softc->zone_mode) { case DA_ZONE_DRIVE_MANAGED: params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; break; case DA_ZONE_HOST_AWARE: params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; break; case DA_ZONE_HOST_MANAGED: params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; break; default: case DA_ZONE_NONE: params->zone_mode = DISK_ZONE_MODE_NONE; break; } if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ) params->flags |= DISK_ZONE_DISK_URSWRZ; if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) { params->optimal_seq_zones = softc->optimal_seq_zones; params->flags |= DISK_ZONE_OPT_SEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) { params->optimal_nonseq_zones = softc->optimal_nonseq_zones; params->flags |= DISK_ZONE_OPT_NONSEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) { params->max_seq_zones = softc->max_seq_zones; params->flags |= DISK_ZONE_MAX_SEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP) params->flags |= DISK_ZONE_RZ_SUP; if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP) params->flags |= DISK_ZONE_OPEN_SUP; if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP) params->flags |= DISK_ZONE_CLOSE_SUP; if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP) params->flags |= DISK_ZONE_FINISH_SUP; if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP) params->flags |= DISK_ZONE_RWP_SUP; break; } default: break; } bailout: return (error); } static void dastart(struct cam_periph *periph, union ccb *start_ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); skipstate: switch (softc->state) { case DA_STATE_NORMAL: { struct bio *bp; uint8_t tag_code; more: bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); scsi_test_unit_ready(&start_ccb->csio, /*retries*/ da_retry_count, dadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); break; } if (bp->bio_cmd == BIO_DELETE) { if (softc->delete_func != NULL) { softc->delete_func(periph, start_ccb, bp); goto out; } else { /* Not sure this is possible, but failsafe by lying and saying "sure, done." */ biofinish(bp, NULL, 0); goto more; } } if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); cam_periph_release_locked(periph); /* XXX is this still valid? I think so but unverified */ } if ((bp->bio_flags & BIO_ORDERED) != 0 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { softc->flags &= ~DA_FLAG_NEED_OTAG; softc->flags |= DA_FLAG_WAS_OTAG; tag_code = MSG_ORDERED_Q_TAG; } else { tag_code = MSG_SIMPLE_Q_TAG; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { void *data_ptr; int rw_op; biotrack(bp, __func__); if (bp->bio_cmd == BIO_WRITE) { softc->flags |= DA_FLAG_DIRTY; rw_op = SCSI_RW_WRITE; } else { rw_op = SCSI_RW_READ; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= SCSI_RW_BIO; data_ptr = bp; } scsi_read_write(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/tag_code, rw_op, /*byte2*/0, softc->minimum_cmd_size, /*lba*/bp->bio_pblkno, /*block_count*/bp->bio_bcount / softc->params.secsize, data_ptr, /*dxfer_len*/ bp->bio_bcount, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) start_ccb->csio.bio = bp; #endif break; } case BIO_FLUSH: /* * If we don't support sync cache, or the disk * isn't dirty, FLUSH is a no-op. Use the * allocated * CCB for the next bio if one is * available. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 || (softc->flags & DA_FLAG_DIRTY) == 0) { biodone(bp); goto skipstate; } /* * BIO_FLUSH doesn't currently communicate * range data, so we synchronize the cache * over the whole disk. We also force * ordered tag semantics the flush applies * to all previously queued I/O. */ scsi_synchronize_cache(&start_ccb->csio, /*retries*/1, /*cbfcnp*/dadone, MSG_ORDERED_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, da_default_timeout*1000); /* * Clear the dirty flag before sending the command. * Either this sync cache will be successful, or it * will fail after a retry. If it fails, it is * unlikely to be successful if retried later, so * we'll save ourselves time by just marking the * device clean. */ softc->flags &= ~DA_FLAG_DIRTY; break; case BIO_ZONE: { int error, queue_ccb; queue_ccb = 0; error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb); if ((error != 0) || (queue_ccb == 0)) { biofinish(bp, NULL, error); xpt_release_ccb(start_ccb); return; } break; } } start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); out: LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); /* We expect a unit attention from this device */ if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; softc->flags &= ~DA_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ daschedule(periph); break; } case DA_STATE_PROBE_RC: { struct scsi_read_capacity_data *rcap; rcap = (struct scsi_read_capacity_data *) malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcap == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity(&start_ccb->csio, /*retries*/da_retry_count, dadone, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/5000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_RC16: { struct scsi_read_capacity_data_long *rcaplong; rcaplong = (struct scsi_read_capacity_data_long *) malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcaplong == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity_16(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)rcaplong, /*rcap_buf_len*/ sizeof(*rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; xpt_action(start_ccb); break; } case DA_STATE_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { /* * If we get here we don't support any SBC-3 delete * methods with UNMAP as the Logical Block Provisioning * VPD page support is required for devices which * support it according to T10/1799-D Revision 31 * however older revisions of the spec don't mandate * this so we currently don't remove these methods * from the available set. */ softc->state = DA_STATE_PROBE_BLK_LIMITS; goto skipstate; } lbp = (struct scsi_vpd_logical_block_prov *) malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); if (lbp == NULL) { printf("dastart: Couldn't malloc lbp data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)lbp, /*inq_len*/sizeof(*lbp), /*evpd*/TRUE, /*page_code*/SVPD_LBP, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { /* Not supported skip to next probe */ softc->state = DA_STATE_PROBE_BDC; goto skipstate; } block_limits = (struct scsi_vpd_block_limits *) malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); if (block_limits == NULL) { printf("dastart: Couldn't malloc block_limits data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)block_limits, /*inq_len*/sizeof(*block_limits), /*evpd*/TRUE, /*page_code*/SVPD_BLOCK_LIMITS, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BDC: { struct scsi_vpd_block_characteristics *bdc; if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { softc->state = DA_STATE_PROBE_ATA; goto skipstate; } bdc = (struct scsi_vpd_block_characteristics *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { printf("dastart: Couldn't malloc bdc data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_BDC, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA: { struct ata_params *ata_params; if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { if ((softc->zone_mode == DA_ZONE_HOST_AWARE) || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { /* * Note that if the ATA VPD page isn't * supported, we aren't talking to an ATA * device anyway. Support for that VPD * page is mandatory for SCSI to ATA (SAT) * translation layers. */ softc->state = DA_STATE_PROBE_ZONE; goto skipstate; } daprobedone(periph, start_ccb); break; } ata_params = (struct ata_params*) malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO); if (ata_params == NULL) { xpt_print(periph->path, "Couldn't malloc ata_params " "data\n"); /* da_free_periph??? */ break; } scsi_ata_identify(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*data_ptr*/(u_int8_t *)ata_params, /*dxfer_len*/sizeof(*ata_params), /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_LOGDIR: { struct ata_gp_log_dir *log_dir; int retval; retval = 0; if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) { /* * If we don't have log support, not much point in * trying to probe zone support. */ daprobedone(periph, start_ccb); break; } /* * If we have an ATA device (the SCSI ATA Information VPD * page should be present and the ATA identify should have * succeeded) and it supports logs, ask for the log directory. */ log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO); if (log_dir == NULL) { xpt_print(periph->path, "Couldn't malloc log_dir " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_LOG_DIRECTORY, /*page_number*/ 0, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)log_dir, /*dxfer_len*/ sizeof(*log_dir), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(log_dir, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_IDDIR: { struct ata_identify_log_pages *id_dir; int retval; retval = 0; /* * Check here to see whether the Identify Device log is * supported in the directory of logs. If so, continue * with requesting the log of identify device pages. */ if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) { daprobedone(periph, start_ccb); break; } id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO); if (id_dir == NULL) { xpt_print(periph->path, "Couldn't malloc id_dir " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_PAGE_LIST, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)id_dir, /*dxfer_len*/ sizeof(*id_dir), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(id_dir, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_SUP: { struct ata_identify_log_sup_cap *sup_cap; int retval; retval = 0; /* * Check here to see whether the Supported Capabilities log * is in the list of Identify Device logs. */ if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) { daprobedone(periph, start_ccb); break; } sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO); if (sup_cap == NULL) { xpt_print(periph->path, "Couldn't malloc sup_cap " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_SUP_CAP, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)sup_cap, /*dxfer_len*/ sizeof(*sup_cap), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(sup_cap, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_ZONE: { struct ata_zoned_info_log *ata_zone; int retval; retval = 0; /* * Check here to see whether the zoned device information * page is supported. If so, continue on to request it. * If not, skip to DA_STATE_PROBE_LOG or done. */ if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) { daprobedone(periph, start_ccb); break; } ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA, M_NOWAIT|M_ZERO); if (ata_zone == NULL) { xpt_print(periph->path, "Couldn't malloc ata_zone " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_ZDI, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)ata_zone, /*dxfer_len*/ sizeof(*ata_zone), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(ata_zone, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ZONE: { struct scsi_vpd_zoned_bdc *bdc; /* * Note that this page will be supported for SCSI protocol * devices that support ZBC (SMR devices), as well as ATA * protocol devices that are behind a SAT (SCSI to ATA * Translation) layer that supports converting ZBC commands * to their ZAC equivalents. */ if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) { daprobedone(periph, start_ccb); break; } bdc = (struct scsi_vpd_zoned_bdc *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { xpt_release_ccb(start_ccb); xpt_print(periph->path, "Couldn't malloc zone VPD " "data\n"); break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_ZONED_BDC, /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE; xpt_action(start_ccb); break; } } } /* * In each of the methods below, while its the caller's * responsibility to ensure the request will fit into a * single device request, we might have changed the delete * method due to the device incorrectly advertising either * its supported methods or limits. * * To prevent this causing further issues we validate the * against the methods limits, and warn which would * otherwise be unnecessary. */ static void da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc;; struct bio *bp1; uint8_t *buf = softc->unmap_buf; struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE]; uint64_t lba, lastlba = (uint64_t)-1; uint64_t totalcount = 0; uint64_t count; uint32_t c, lastcount = 0, ranges = 0; /* * Currently this doesn't take the UNMAP * Granularity and Granularity Alignment * fields into account. * * This could result in both unoptimal unmap * requests as as well as UNMAP calls unmapping * fewer LBA's than requested. */ bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { /* * Note: ada and da are different in how they store the * pending bp's in a trim. ada stores all of them in the * trim_req.bps. da stores all but the first one in the * delete_run_queue. ada then completes all the bps in * its adadone() loop. da completes all the bps in the * delete_run_queue in dadone, and relies on the biodone * after to complete. This should be reconciled since there's * no real reason to do it differently. XXX */ if (bp1 != bp) bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, UNMAP_RANGE_MAX - lastcount); lastlba += c; lastcount += c; scsi_ulto4b(lastcount, d[ranges - 1].length); count -= c; lba += c; totalcount += c; } else if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0) { /* Align length of the previous range. */ if ((c = lastcount % softc->unmap_gran) != 0) { if (lastcount <= c) { totalcount -= lastcount; lastlba = (uint64_t)-1; lastcount = 0; ranges--; } else { totalcount -= c; lastlba -= c; lastcount -= c; scsi_ulto4b(lastcount, d[ranges - 1].length); } } /* Align beginning of the new range. */ c = (lba - softc->unmap_gran_align) % softc->unmap_gran; if (c != 0) { c = softc->unmap_gran - c; if (count <= c) { count = 0; } else { lba += c; count -= c; } } } while (count > 0) { c = omin(count, UNMAP_RANGE_MAX); if (totalcount + c > softc->unmap_max_lba || ranges >= softc->unmap_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld" "|| %d >= %d", da_delete_method_desc[softc->delete_method], totalcount + c, softc->unmap_max_lba, ranges, softc->unmap_max_ranges); break; } scsi_u64to8b(lba, d[ranges].lba); scsi_ulto4b(c, d[ranges].length); lba += c; totalcount += c; ranges++; count -= c; lastlba = lba; lastcount = c; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (ranges >= softc->unmap_max_ranges || totalcount + bp1->bio_bcount / softc->params.secsize > softc->unmap_max_lba) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); /* Align length of the last range. */ if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 && (c = lastcount % softc->unmap_gran) != 0) { if (lastcount <= c) ranges--; else scsi_ulto4b(lastcount - c, d[ranges - 1].length); } scsi_ulto2b(ranges * 16 + 6, &buf[0]); scsi_ulto2b(ranges * 16, &buf[2]); scsi_unmap(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/0, /*data_ptr*/ buf, /*dxfer_len*/ ranges * 16 + 8, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static void da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc; struct bio *bp1; uint8_t *buf = softc->unmap_buf; uint64_t lastlba = (uint64_t)-1; uint64_t count; uint64_t lba; uint32_t lastcount = 0, c, requestcount; int ranges = 0, off, block_count; bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; requestcount = count; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * 8; buf[off + 6] = lastcount & 0xff; buf[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; } while (count > 0) { c = omin(count, ATA_DSM_RANGE_MAX); off = ranges * 8; buf[off + 0] = lba & 0xff; buf[off + 1] = (lba >> 8) & 0xff; buf[off + 2] = (lba >> 16) & 0xff; buf[off + 3] = (lba >> 24) & 0xff; buf[off + 4] = (lba >> 32) & 0xff; buf[off + 5] = (lba >> 40) & 0xff; buf[off + 6] = c & 0xff; buf[off + 7] = (c >> 8) & 0xff; lba += c; ranges++; count -= c; lastcount = c; if (count != 0 && ranges == softc->trim_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], requestcount, (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX); break; } } lastlba = lba; bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (bp1->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); block_count = howmany(ranges, ATA_DSM_BLK_RANGES); scsi_ata_trim(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, block_count, /*data_ptr*/buf, /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } /* * We calculate ws_max_blks here based off d_delmaxsize instead * of using softc->ws_max_blks as it is absolute max for the * device not the protocol max which may well be lower. */ static void da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc; struct bio *bp1; uint64_t ws_max_blks; uint64_t lba; uint64_t count; /* forward compat with WS32 */ softc = (struct da_softc *)periph->softc; ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; lba = bp->bio_pblkno; count = 0; bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); count += bp1->bio_bcount / softc->params.secsize; if (count > ws_max_blks) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], count, ws_max_blks); count = omin(count, ws_max_blks); break; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (lba + count != bp1->bio_pblkno || count + bp1->bio_bcount / softc->params.secsize > ws_max_blks) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); scsi_write_same(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/softc->delete_method == DA_DELETE_ZERO ? 0 : SWS_UNMAP, softc->delete_method == DA_DELETE_WS16 ? 16 : 10, /*lba*/lba, /*block_count*/count, /*data_ptr*/ __DECONST(void *, zero_region), /*dxfer_len*/ softc->params.secsize, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static int cmd6workaround(union ccb *ccb) { struct scsi_rw_6 cmd6; struct scsi_rw_10 *cmd10; struct da_softc *softc; u_int8_t *cdb; struct bio *bp; int frozen; cdb = ccb->csio.cdb_io.cdb_bytes; softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { da_delete_methods old_method = softc->delete_method; /* * Typically there are two reasons for failure here * 1. Delete method was detected as supported but isn't * 2. Delete failed due to invalid params e.g. too big * * While we will attempt to choose an alternative delete method * this may result in short deletes if the existing delete * requests from geom are big for the new method chosen. * * This method assumes that the error which triggered this * will not retry the io otherwise a panic will occur */ dadeleteflag(softc, old_method, 0); dadeletemethodchoose(softc, DA_DELETE_DISABLE); if (softc->delete_method == DA_DELETE_DISABLE) xpt_print(ccb->ccb_h.path, "%s failed, disabling BIO_DELETE\n", da_delete_method_desc[old_method]); else xpt_print(ccb->ccb_h.path, "%s failed, switching to %s BIO_DELETE\n", da_delete_method_desc[old_method], da_delete_method_desc[softc->delete_method]); while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) cam_iosched_queue_work(softc->cam_iosched, bp); cam_iosched_queue_work(softc->cam_iosched, (struct bio *)ccb->ccb_h.ccb_bp); ccb->ccb_h.ccb_bp = NULL; return (0); } /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == PREVENT_ALLOW) && (softc->quirks & DA_Q_NO_PREVENT) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); softc->quirks |= DA_Q_NO_PREVENT; return (0); } /* Detect unsupported SYNCHRONIZE CACHE(10). */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == SYNCHRONIZE_CACHE) && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "SYNCHRONIZE CACHE(10) not supported.\n"); softc->quirks |= DA_Q_NO_SYNC_CACHE; softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; return (0); } /* Translation only possible if CDB is an array and cmd is R/W6 */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || (*cdb != READ_6 && *cdb != WRITE_6)) return 0; xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " "increasing minimum_cmd_size to 10.\n"); softc->minimum_cmd_size = 10; bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); cmd10 = (struct scsi_rw_10 *)cdb; cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; cmd10->byte2 = 0; scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); cmd10->reserved = 0; scsi_ulto2b(cmd6.length, cmd10->length); cmd10->control = cmd6.control; ccb->csio.cdb_len = sizeof(*cmd10); /* Requeue request, unfreezing queue if necessary */ frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static void dazonedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; struct bio *bp; softc = periph->softc; bp = (struct bio *)ccb->ccb_h.ccb_bp; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: break; case DISK_ZONE_REPORT_ZONES: { uint32_t avail_len; struct disk_zone_report *rep; struct scsi_report_zones_hdr *hdr; struct scsi_report_zones_desc *desc; struct disk_zone_rep_entry *entry; uint32_t num_alloced, hdr_len, num_avail; uint32_t num_to_fill, i; int ata; rep = &bp->bio_zone.zone_params.report; avail_len = ccb->csio.dxfer_len - ccb->csio.resid; /* * Note that bio_resid isn't normally used for zone * commands, but it is used by devstat_end_transaction_bio() * to determine how much data was transferred. Because * the size of the SCSI/ATA data structures is different * than the size of the BIO interface structures, the * amount of data actually transferred from the drive will * be different than the amount of data transferred to * the user. */ bp->bio_resid = ccb->csio.resid; num_alloced = rep->entries_allocated; hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr; if (avail_len < sizeof(*hdr)) { /* * Is there a better error than EIO here? We asked * for at least the header, and we got less than * that. */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; break; } if (softc->zone_interface == DA_ZONE_IF_ATA_PASS) ata = 1; else ata = 0; hdr_len = ata ? le32dec(hdr->length) : scsi_4btoul(hdr->length); if (hdr_len > 0) rep->entries_available = hdr_len / sizeof(*desc); else rep->entries_available = 0; /* * NOTE: using the same values for the BIO version of the * same field as the SCSI/ATA values. This means we could * get some additional values that aren't defined in bio.h * if more values of the same field are defined later. */ rep->header.same = hdr->byte4 & SRZ_SAME_MASK; rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) : scsi_8btou64(hdr->maximum_lba); /* * If the drive reports no entries that match the query, * we're done. */ if (hdr_len == 0) { rep->entries_filled = 0; break; } num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), hdr_len / sizeof(*desc)); /* * If the drive didn't return any data, then we're done. */ if (num_avail == 0) { rep->entries_filled = 0; break; } num_to_fill = min(num_avail, rep->entries_allocated); /* * If the user didn't allocate any entries for us to fill, * we're done. */ if (num_to_fill == 0) { rep->entries_filled = 0; break; } for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; i < num_to_fill; i++, desc++, entry++) { /* * NOTE: we're mapping the values here directly * from the SCSI/ATA bit definitions to the bio.h * definitons. There is also a warning in * disk_zone.h, but the impact is that if * additional values are added in the SCSI/ATA * specs these will be visible to consumers of * this interface. */ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; entry->zone_condition = (desc->zone_flags & SRZ_ZONE_COND_MASK) >> SRZ_ZONE_COND_SHIFT; entry->zone_flags |= desc->zone_flags & (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); entry->zone_length = ata ? le64dec(desc->zone_length) : scsi_8btou64(desc->zone_length); entry->zone_start_lba = ata ? le64dec(desc->zone_start_lba) : scsi_8btou64(desc->zone_start_lba); entry->write_pointer_lba = ata ? le64dec(desc->write_pointer_lba) : scsi_8btou64(desc->write_pointer_lba); } rep->entries_filled = num_to_fill; break; } case DISK_ZONE_GET_PARAMS: default: /* * In theory we should not get a GET_PARAMS bio, since it * should be handled without queueing the command to the * drive. */ panic("%s: Invalid zone command %d", __func__, bp->bio_zone.zone_cmd); break; } if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) free(ccb->csio.data_ptr, M_SCSIDA); } static void dadone(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; da_ccb_state state; softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); csio = &done_ccb->csio; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (csio->bio != NULL) biotrack(csio->bio, __func__); #endif state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; switch (state) { case DA_CCB_BUFFER_IO: case DA_CCB_DELETE: { struct bio *bp, *bp1; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; int sf; if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = daerror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ cam_periph_unlock(periph); return; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if (error != 0) { int queued_error; /* * return all queued I/O with EIO, so that * the client can retry these I/Os in the * proper order should it attempt to recover. */ queued_error = EIO; if (error == ENXIO && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { /* * Catastrophic error. Mark our pack as * invalid. */ /* * XXX See if this is really a media * XXX change first? */ xpt_print(periph->path, "Invalidating pack\n"); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif queued_error = ENXIO; } cam_iosched_flush(softc->cam_iosched, NULL, queued_error); if (bp != NULL) { bp->bio_error = error; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } } else if (bp != NULL) { if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) bp->bio_flags |= BIO_ERROR; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else if (bp != NULL) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); if (bp->bio_cmd == BIO_ZONE) dazonedone(periph, done_ccb); else if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE)) bp->bio_flags |= BIO_ERROR; if (softc->error_inject != 0) { bp->bio_error = softc->error_inject; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; softc->error_inject = 0; } } if (bp != NULL) biotrack(bp, __func__); LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); if (LIST_EMPTY(&softc->pending_ccbs)) softc->flags |= DA_FLAG_WAS_OTAG; /* * We need to call cam_iosched before we call biodone so that we * don't measure any activity that happens in the completion * routine, which in the case of sendfile can be quite * extensive. */ cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); if (state == DA_CCB_DELETE) { TAILQ_HEAD(, bio) queue; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); softc->delete_run_queue.insert_point = NULL; /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * delete_running set to 0 before the call above to * allow other I/O to progress when many BIO_DELETE * requests are pushed down. We set delete_running to 0 * and call daschedule again so that we don't stall if * there are no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); daschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = bp->bio_error; if (bp->bio_flags & BIO_ERROR) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { daschedule(periph); cam_periph_unlock(periph); } if (bp != NULL) biodone(bp); return; } case DA_CCB_PROBE_RC: case DA_CCB_PROBE_RC16: { struct scsi_read_capacity_data *rdcap; struct scsi_read_capacity_data_long *rcaplong; char *announce_buf; int lbp; lbp = 0; rdcap = NULL; rcaplong = NULL; /* XXX TODO: can this be a malloc? */ announce_buf = softc->announce_temp; bzero(announce_buf, DA_ANNOUNCETMP_SZ); if (state == DA_CCB_PROBE_RC) rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; else rcaplong = (struct scsi_read_capacity_data_long *) csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct disk_params *dp; uint32_t block_size; uint64_t maxsector; u_int lalba; /* Lowest aligned LBA. */ if (state == DA_CCB_PROBE_RC) { block_size = scsi_4btoul(rdcap->length); maxsector = scsi_4btoul(rdcap->addr); lalba = 0; /* * According to SBC-2, if the standard 10 * byte READ CAPACITY command returns 2^32, * we should issue the 16 byte version of * the command, since the device in question * has more sectors than can be represented * with the short version of the command. */ if (maxsector == 0xffffffff) { free(rdcap, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_RC16; xpt_schedule(periph, priority); return; } } else { block_size = scsi_4btoul(rcaplong->length); maxsector = scsi_8btou64(rcaplong->addr); lalba = scsi_2btoul(rcaplong->lalba_lbp); } /* * Because GEOM code just will panic us if we * give them an 'illegal' value we'll avoid that * here. */ if (block_size == 0) { block_size = 512; if (maxsector == 0) maxsector = -1; } if (block_size >= MAXPHYS) { xpt_print(periph->path, "unsupportable block size %ju\n", (uintmax_t) block_size); announce_buf = NULL; cam_periph_invalidate(periph); } else { /* * We pass rcaplong into dasetgeom(), * because it will only use it if it is * non-NULL. */ dasetgeom(periph, block_size, maxsector, rcaplong, sizeof(*rcaplong)); lbp = (lalba & SRC16_LBPME_A); dp = &softc->params; snprintf(announce_buf, DA_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors)", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); } } else { int error; /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else if (error != 0) { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ status = done_ccb->ccb_h.status; if ((status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * If we tried READ CAPACITY(16) and failed, * fallback to READ CAPACITY(10). */ if ((state == DA_CCB_PROBE_RC16) && (softc->flags & DA_FLAG_CAN_RC16) && (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) || ((have_sense) && (error_code == SSD_CURRENT_ERROR) && (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { softc->flags &= ~DA_FLAG_CAN_RC16; free(rdcap, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, priority); return; } /* * Attach to anything that claims to be a * direct access or optical disk device, * as long as it doesn't return a "Logical * unit not supported" (0x25) error. * "Internal Target Failure" (0x44) is also * special and typically means that the * device is a SATA drive behind a SATL * translation that's fallen into a * terminally fatal state. */ if ((have_sense) && (asc != 0x25) && (asc != 0x44) && (error_code == SSD_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; dasetgeom(periph, 512, -1, NULL, 0); scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, DA_ANNOUNCETMP_SZ, "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else { if (have_sense) scsi_sense_print( &done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); announce_buf = NULL; /* * Free up resources. */ cam_periph_invalidate(periph); } } } free(csio->data_ptr, M_SCSIDA); if (announce_buf != NULL && ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { struct sbuf sb; sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, DA_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ /* increase the refcount */ if (cam_periph_acquire(periph) == CAM_REQ_CMP) { taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); } else { /* XXX This message is useless! */ xpt_print(periph->path, "fatal error, " "could not acquire reference count\n"); } } /* We already probed the device. */ if (softc->flags & DA_FLAG_PROBED) { daprobedone(periph, done_ccb); return; } /* Ensure re-probe doesn't see old delete. */ softc->delete_available = 0; dadeleteflag(softc, DA_DELETE_ZERO, 1); if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { /* * Based on older SBC-3 spec revisions * any of the UNMAP methods "may" be * available via LBP given this flag so * we flag all of them as available and * then remove those which further * probes confirm aren't available * later. * * We could also check readcap(16) p_type * flag to exclude one or more invalid * write same (X) types here */ dadeleteflag(softc, DA_DELETE_WS16, 1); dadeleteflag(softc, DA_DELETE_WS10, 1); dadeleteflag(softc, DA_DELETE_UNMAP, 1); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_LBP; xpt_schedule(periph, priority); return; } xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BDC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { /* * T10/1799-D Revision 31 states at least one of these * must be supported but we don't currently enforce this. */ dadeleteflag(softc, DA_DELETE_WS16, (lbp->flags & SVPD_LBP_WS16)); dadeleteflag(softc, DA_DELETE_WS10, (lbp->flags & SVPD_LBP_WS10)); dadeleteflag(softc, DA_DELETE_UNMAP, (lbp->flags & SVPD_LBP_UNMAP)); } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure indicates we don't support any SBC-3 * delete methods with UNMAP */ } } free(lbp, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BLK_LIMITS; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t max_txfer_len = scsi_4btoul( block_limits->max_txfer_len); uint32_t max_unmap_lba_cnt = scsi_4btoul( block_limits->max_unmap_lba_cnt); uint32_t max_unmap_blk_cnt = scsi_4btoul( block_limits->max_unmap_blk_cnt); uint32_t unmap_gran = scsi_4btoul( block_limits->opt_unmap_grain); uint32_t unmap_gran_align = scsi_4btoul( block_limits->unmap_grain_align); uint64_t ws_max_blks = scsi_8btou64( block_limits->max_write_same_length); if (max_txfer_len != 0) { softc->disk->d_maxsize = MIN(softc->maxio, (off_t)max_txfer_len * softc->params.secsize); } /* * We should already support UNMAP but we check lba * and block count to be sure */ if (max_unmap_lba_cnt != 0x00L && max_unmap_blk_cnt != 0x00L) { softc->unmap_max_lba = max_unmap_lba_cnt; softc->unmap_max_ranges = min(max_unmap_blk_cnt, UNMAP_MAX_RANGES); if (unmap_gran > 1) { softc->unmap_gran = unmap_gran; if (unmap_gran_align & 0x80000000) { softc->unmap_gran_align = unmap_gran_align & 0x7fffffff; } } } else { /* * Unexpected UNMAP limits which means the * device doesn't actually support UNMAP */ dadeleteflag(softc, DA_DELETE_UNMAP, 0); } if (ws_max_blks != 0x00L) softc->ws_max_blks = ws_max_blks; } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure here doesn't mean UNMAP is not * supported as this is an optional page. */ softc->unmap_max_lba = 1; softc->unmap_max_ranges = 1; } } free(block_limits, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BDC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_BDC: { struct scsi_vpd_block_device_characteristics *bdc; bdc = (struct scsi_vpd_block_device_characteristics *) csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; /* * Disable queue sorting for non-rotational media * by default. */ u_int16_t old_rate = softc->disk->d_rotation_rate; valid_len = csio->dxfer_len - csio->resid; if (SBDC_IS_PRESENT(bdc, valid_len, medium_rotation_rate)) { softc->disk->d_rotation_rate = scsi_2btoul(bdc->medium_rotation_rate); if (softc->disk->d_rotation_rate == SVPD_BDC_RATE_NON_ROTATING) { cam_iosched_set_sort_queue( softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } } if ((SBDC_IS_PRESENT(bdc, valid_len, flags)) && (softc->zone_mode == DA_ZONE_NONE)) { int ata_proto; if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) ata_proto = 1; else ata_proto = 0; /* * The Zoned field will only be set for * Drive Managed and Host Aware drives. If * they are Host Managed, the device type * in the standard INQUIRY data should be * set to T_ZBC_HM (0x14). */ if ((bdc->flags & SVPD_ZBC_MASK) == SVPD_HAW_ZBC) { softc->zone_mode = DA_ZONE_HOST_AWARE; softc->zone_interface = (ata_proto) ? DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; } else if ((bdc->flags & SVPD_ZBC_MASK) == SVPD_DM_ZBC) { softc->zone_mode =DA_ZONE_DRIVE_MANAGED; softc->zone_interface = (ata_proto) ? DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; } else if ((bdc->flags & SVPD_ZBC_MASK) != SVPD_ZBC_NR) { xpt_print(periph->path, "Unknown zoned " "type %#x", bdc->flags & SVPD_ZBC_MASK); } } } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(bdc, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_ATA; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_ATA: { int i; struct ata_params *ata_params; int continue_probe; int error; int16_t *ptr; ata_params = (struct ata_params *)csio->data_ptr; ptr = (uint16_t *)ata_params; continue_probe = 0; error = 0; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint16_t old_rate; for (i = 0; i < sizeof(*ata_params) / 2; i++) ptr[i] = le16toh(ptr[i]); if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && (softc->quirks & DA_Q_NO_UNMAP) == 0) { dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); if (ata_params->max_dsm_blocks != 0) softc->trim_max_ranges = min( softc->trim_max_ranges, ata_params->max_dsm_blocks * ATA_DSM_BLK_RANGES); } /* * Disable queue sorting for non-rotational media * by default. */ old_rate = softc->disk->d_rotation_rate; softc->disk->d_rotation_rate = ata_params->media_rotation_rate; if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) { cam_iosched_set_sort_queue(softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } if (ata_params->capabilities1 & ATA_SUPPORT_DMA) softc->flags |= DA_FLAG_CAN_ATA_DMA; if (ata_params->support.extension & ATA_SUPPORT_GENLOG) softc->flags |= DA_FLAG_CAN_ATA_LOG; /* * At this point, if we have a SATA host aware drive, * we communicate via ATA passthrough unless the * SAT layer supports ZBC -> ZAC translation. In * that case, */ /* * XXX KDM figure out how to detect a host managed * SATA drive. */ if (softc->zone_mode == DA_ZONE_NONE) { /* * Note that we don't override the zone * mode or interface if it has already been * set. This is because it has either been * set as a quirk, or when we probed the * SCSI Block Device Characteristics page, * the zoned field was set. The latter * means that the SAT layer supports ZBC to * ZAC translation, and we would prefer to * use that if it is available. */ if ((ata_params->support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) { softc->zone_mode = DA_ZONE_HOST_AWARE; softc->zone_interface = DA_ZONE_IF_ATA_PASS; } else if ((ata_params->support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) { softc->zone_mode =DA_ZONE_DRIVE_MANAGED; softc->zone_interface = DA_ZONE_IF_ATA_PASS; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ata_params, M_SCSIDA); if ((softc->zone_mode == DA_ZONE_HOST_AWARE) || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { /* * If the ATA IDENTIFY failed, we could be talking * to a SCSI drive, although that seems unlikely, * since the drive did report that it supported the * ATA Information VPD page. If the ATA IDENTIFY * succeeded, and the SAT layer doesn't support * ZBC -> ZAC translation, continue on to get the * directory of ATA logs, and complete the rest of * the ZAC probe. If the SAT layer does support * ZBC -> ZAC translation, we want to use that, * and we'll probe the SCSI Zoned Block Device * Characteristics VPD page next. */ if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_LOG) && (softc->zone_interface == DA_ZONE_IF_ATA_PASS)) softc->state = DA_STATE_PROBE_ATA_LOGDIR; else softc->state = DA_STATE_PROBE_ZONE; continue_probe = 1; } if (continue_probe != 0) { xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } else daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_LOGDIR: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { error = 0; softc->valid_logdir_len = 0; bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); softc->valid_logdir_len = csio->dxfer_len - csio->resid; if (softc->valid_logdir_len > 0) bcopy(csio->data_ptr, &softc->ata_logdir, min(softc->valid_logdir_len, sizeof(softc->ata_logdir))); /* * Figure out whether the Identify Device log is * supported. The General Purpose log directory * has a header, and lists the number of pages * available for each GP log identified by the * offset into the list. */ if ((softc->valid_logdir_len >= ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) && (le16dec(softc->ata_logdir.header) == ATA_GP_LOG_DIR_VERSION) && (le16dec(&softc->ata_logdir.num_pages[ (ATA_IDENTIFY_DATA_LOG * sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ softc->flags |= DA_FLAG_CAN_ATA_IDLOG; } else { softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA log directory, * then ATA logs are effectively not * supported even if the bit is set in the * identify data. */ softc->flags &= ~(DA_FLAG_CAN_ATA_LOG | DA_FLAG_CAN_ATA_IDLOG); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) { softc->state = DA_STATE_PROBE_ATA_IDDIR; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_IDDIR: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { off_t entries_offset, max_entries; error = 0; softc->valid_iddir_len = 0; bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP | DA_FLAG_CAN_ATA_ZONE); softc->valid_iddir_len = csio->dxfer_len - csio->resid; if (softc->valid_iddir_len > 0) bcopy(csio->data_ptr, &softc->ata_iddir, min(softc->valid_iddir_len, sizeof(softc->ata_iddir))); entries_offset = __offsetof(struct ata_identify_log_pages,entries); max_entries = softc->valid_iddir_len - entries_offset; if ((softc->valid_iddir_len > (entries_offset + 1)) && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) && (softc->ata_iddir.entry_count > 0)) { int num_entries, i; num_entries = softc->ata_iddir.entry_count; num_entries = min(num_entries, softc->valid_iddir_len - entries_offset); for (i = 0; i < num_entries && i < max_entries; i++) { if (softc->ata_iddir.entries[i] == ATA_IDL_SUP_CAP) softc->flags |= DA_FLAG_CAN_ATA_SUPCAP; else if (softc->ata_iddir.entries[i]== ATA_IDL_ZDI) softc->flags |= DA_FLAG_CAN_ATA_ZONE; if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) break; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data log * directory, then it effectively isn't * supported even if the ATA Log directory * a non-zero number of pages present for * this log. */ softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) { softc->state = DA_STATE_PROBE_ATA_SUP; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_SUP: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_size; struct ata_identify_log_sup_cap *sup_cap; error = 0; sup_cap = (struct ata_identify_log_sup_cap *) csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_size = __offsetof(struct ata_identify_log_sup_cap, sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); if (valid_len >= needed_size) { uint64_t zoned, zac_cap; zoned = le64dec(sup_cap->zoned_cap); if (zoned & ATA_ZONED_VALID) { /* * This should have already been * set, because this is also in the * ATA identify data. */ if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = DA_ZONE_HOST_AWARE; else if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) softc->zone_mode = DA_ZONE_DRIVE_MANAGED; } zac_cap = le64dec(sup_cap->sup_zac_cap); if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { if (zac_cap & ATA_REPORT_ZONES_SUP) softc->zone_flags |= DA_ZONE_FLAG_RZ_SUP; if (zac_cap & ATA_ND_OPEN_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_OPEN_SUP; if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_CLOSE_SUP; if (zac_cap & ATA_ND_FINISH_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_FINISH_SUP; if (zac_cap & ATA_ND_RWP_SUP) softc->zone_flags |= DA_ZONE_FLAG_RWP_SUP; } else { /* * This field was introduced in * ACS-4, r08 on April 28th, 2015. * If the drive firmware was written * to an earlier spec, it won't have * the field. So, assume all * commands are supported. */ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data * Supported Capabilities page, clear the * flag... */ softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP; /* * And clear zone capabilities. */ softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) { softc->state = DA_STATE_PROBE_ATA_ZONE; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ATA_ZONE: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct ata_zoned_info_log *zi_log; uint32_t valid_len; size_t needed_size; zi_log = (struct ata_zoned_info_log *)csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_size = __offsetof(struct ata_zoned_info_log, version_info) + 1 + sizeof(zi_log->version_info); if (valid_len >= needed_size) { uint64_t tmpvar; tmpvar = le64dec(zi_log->zoned_cap); if (tmpvar & ATA_ZDI_CAP_VALID) { if (tmpvar & ATA_ZDI_CAP_URSWRZ) softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; } tmpvar = le64dec(zi_log->optimal_seq_zones); if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = (tmpvar & ATA_ZDI_OPT_SEQ_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = 0; } tmpvar =le64dec(zi_log->optimal_nonseq_zones); if (tmpvar & ATA_ZDI_OPT_NS_VALID) { softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = (tmpvar & ATA_ZDI_OPT_NS_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = 0; } tmpvar = le64dec(zi_log->max_seq_req_zones); if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = (tmpvar & ATA_ZDI_MAX_SEQ_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = 0; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { softc->flags &= ~DA_FLAG_CAN_ATA_ZONE; softc->flags &= ~DA_ZONE_FLAG_SET_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); daprobedone(periph, done_ccb); return; } case DA_CCB_PROBE_ZONE: { int error; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_len; struct scsi_vpd_zoned_bdc *zoned_bdc; error = 0; zoned_bdc = (struct scsi_vpd_zoned_bdc *) csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_len = __offsetof(struct scsi_vpd_zoned_bdc, max_seq_req_zones) + 1 + sizeof(zoned_bdc->max_seq_req_zones); if ((valid_len >= needed_len) && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) { if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ) softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; softc->optimal_seq_zones = scsi_4btoul(zoned_bdc->optimal_seq_zones); softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_nonseq_zones = scsi_4btoul( zoned_bdc->optimal_nonseq_zones); softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->max_seq_zones = scsi_4btoul(zoned_bdc->max_seq_req_zones); softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; } /* * All of the zone commands are mandatory for SCSI * devices. * * XXX KDM this is valid as of September 2015. * Re-check this assumption once the SAT spec is * updated to support SCSI ZBC to ATA ZAC mapping. * Since ATA allows zone commands to be reported * as supported or not, this may not necessarily * be true for an ATA device behind a SAT (SCSI to * ATA Translation) layer. */ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } daprobedone(periph, done_ccb); return; } case DA_CCB_DUMP: /* No-op. We're polling */ return; case DA_CCB_TUR: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } xpt_release_ccb(done_ccb); cam_periph_release_locked(periph); return; } default: break; } xpt_release_ccb(done_ccb); } static void dareprobe(struct cam_periph *periph) { struct da_softc *softc; cam_status status; softc = (struct da_softc *)periph->softc; /* Probe in progress; don't interfere. */ if (softc->state != DA_STATE_NORMAL) return; status = cam_periph_acquire(periph); KASSERT(status == CAM_REQ_CMP, ("dareprobe: cam_periph_acquire failed")); if (softc->flags & DA_FLAG_CAN_RC16) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, CAM_PRIORITY_DEV); } static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct da_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->csio.bio != NULL) biotrack(ccb->csio.bio, __func__); #endif periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct da_softc *)periph->softc; /* * Automatically detect devices that do not support * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. */ error = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cmd6workaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cmd6workaround(ccb); /* * If the target replied with CAPACITY DATA HAS CHANGED UA, * query the capacity and notify upper layers. */ else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x2A && ascq == 0x09) { xpt_print(periph->path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x3F && ascq == 0x03) { xpt_print(periph->path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { softc->flags |= DA_FLAG_PACK_INVALID; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (ERESTART); #ifdef CAM_IO_STATS switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: softc->errors++; break; default: break; } #endif /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & DA_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return(cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static void damediapoll(void *arg) { struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && LIST_EMPTY(&softc->pending_ccbs)) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* Queue us up again */ if (da_poll_period != 0) callout_schedule(&softc->mediapoll_c, da_poll_period * hz); } static void daprevent(struct cam_periph *periph, int action) { struct da_softc *softc; union ccb *ccb; int error; softc = (struct da_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/1, /*cbcfp*/dadone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, 5000); error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~DA_FLAG_PACK_LOCKED; else softc->flags |= DA_FLAG_PACK_LOCKED; } xpt_release_ccb(ccb); } static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) { struct ccb_calc_geometry ccg; struct da_softc *softc; struct disk_params *dp; u_int lbppbe, lalba; int error; softc = (struct da_softc *)periph->softc; dp = &softc->params; dp->secsize = block_len; dp->sectors = maxsector + 1; if (rcaplong != NULL) { lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; lalba = scsi_2btoul(rcaplong->lalba_lbp); lalba &= SRC16_LALBA_A; } else { lbppbe = 0; lalba = 0; } if (lbppbe > 0) { dp->stripesize = block_len << lbppbe; dp->stripeoffset = (dp->stripesize - block_len * lalba) % dp->stripesize; } else if (softc->quirks & DA_Q_4K) { dp->stripesize = 4096; dp->stripeoffset = 0; } else if (softc->unmap_gran != 0) { dp->stripesize = block_len * softc->unmap_gran; dp->stripeoffset = (dp->stripesize - block_len * softc->unmap_gran_align) % dp->stripesize; } else { dp->stripesize = 0; dp->stripeoffset = 0; } /* * Have the controller provide us with a geometry * for this disk. The only time the geometry * matters is when we boot and the controller * is the only one knowledgeable enough to come * up with something that will make this a bootable * device. */ xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; ccg.block_size = dp->secsize; ccg.volume_size = dp->sectors; ccg.heads = 0; ccg.secs_per_track = 0; ccg.cylinders = 0; xpt_action((union ccb*)&ccg); if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* * We don't know what went wrong here- but just pick * a geometry so we don't have nasty things like divide * by zero. */ dp->heads = 255; dp->secs_per_track = 255; dp->cylinders = dp->sectors / (255 * 255); if (dp->cylinders == 0) { dp->cylinders = 1; } } else { dp->heads = ccg.heads; dp->secs_per_track = ccg.secs_per_track; dp->cylinders = ccg.cylinders; } /* * If the user supplied a read capacity buffer, and if it is * different than the previous buffer, update the data in the EDT. * If it's the same, we don't bother. This avoids sending an * update every time someone opens this device. */ if ((rcaplong != NULL) && (bcmp(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)) != 0)) { struct ccb_dev_advinfo cdai; xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_RCAPLONG; cdai.flags = CDAI_FLAG_STORE; cdai.bufsiz = rcap_len; cdai.buf = (uint8_t *)rcaplong; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.ccb_h.status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: failed to set read " "capacity advinfo\n", __func__); /* Use cam_error_print() to decode the status */ cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, CAM_EPF_ALL); } else { bcopy(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)); } } softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; softc->disk->d_stripesize = softc->params.stripesize; softc->disk->d_stripeoffset = softc->params.stripeoffset; /* XXX: these are not actually "firmware" values, so they may be wrong */ softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; softc->disk->d_devstat->block_size = softc->params.secsize; softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; error = disk_resize(softc->disk, M_NOWAIT); if (error != 0) xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); } static void dasendorderedtag(void *arg) { struct da_softc *softc = arg; if (da_send_ordered) { if (!LIST_EMPTY(&softc->pending_ccbs)) { if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) softc->flags |= DA_FLAG_NEED_OTAG; softc->flags &= ~DA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); } /* * Step through all DA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void dashutdown(void * arg, int howto) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &dadriver) { softc = (struct da_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & DA_FLAG_OPEN)) { dadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & DA_FLAG_OPEN) == 0) || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/0, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /* whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 60 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } #else /* !_KERNEL */ /* * XXX These are only left out of the kernel build to silence warnings. If, * for some reason these functions are used in the kernel, the ifdefs should * be moved so they are included both in the kernel and userland. */ void scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_format_unit *scsi_cmd; scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = FORMAT_UNIT; scsi_cmd->byte2 = byte2; scsi_ulto2b(ileave, scsi_cmd->interleave); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t list_format, uint32_t addr_desc_index, uint8_t *data_ptr, uint32_t dxfer_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout) { uint8_t cdb_len; /* * These conditions allow using the 10 byte command. Otherwise we * need to use the 12 byte command. */ if ((minimum_cmd_size <= 10) && (addr_desc_index == 0) && (dxfer_len <= SRDD10_MAX_LENGTH)) { struct scsi_read_defect_data_10 *cdb10; cdb10 = (struct scsi_read_defect_data_10 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb10); bzero(cdb10, cdb_len); cdb10->opcode = READ_DEFECT_DATA_10; cdb10->format = list_format; scsi_ulto2b(dxfer_len, cdb10->alloc_length); } else { struct scsi_read_defect_data_12 *cdb12; cdb12 = (struct scsi_read_defect_data_12 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb12); bzero(cdb12, cdb_len); cdb12->opcode = READ_DEFECT_DATA_12; cdb12->format = list_format; scsi_ulto4b(dxfer_len, cdb12->alloc_length); scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); } cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t control, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sanitize *scsi_cmd; scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = SANITIZE; scsi_cmd->byte2 = byte2; scsi_cmd->control = control; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } #endif /* _KERNEL */ void scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { struct scsi_zbc_out *scsi_cmd; scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = ZBC_OUT; scsi_cmd->service_action = service_action; scsi_u64to8b(zone_id, scsi_cmd->zone_id); scsi_cmd->zone_flags = zone_flags; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { struct scsi_zbc_in *scsi_cmd; scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = ZBC_IN; scsi_cmd->service_action = service_action; scsi_ulto4b(dxfer_len, scsi_cmd->length); scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba); scsi_cmd->zone_options = zone_options; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } int scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout) { uint8_t command_out, protocol, ata_flags; uint16_t features_out; uint32_t sectors_out, auxiliary; int retval; retval = 0; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_OUT; features_out = (zm_action & 0xf) | (zone_flags << 8); ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; if (dxfer_len == 0) { protocol = AP_PROTO_NON_DATA; ata_flags |= AP_FLAG_TLEN_NO_DATA; sectors_out = 0; } else { protocol = AP_PROTO_DMA; ata_flags |= AP_FLAG_TLEN_SECT_CNT | AP_FLAG_TDIR_TO_DEV; sectors_out = ((dxfer_len >> 9) & 0xffff); } auxiliary = 0; } else { ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; if (dxfer_len == 0) { command_out = ATA_NCQ_NON_DATA; features_out = ATA_NCQ_ZAC_MGMT_OUT; /* * We're assuming the SCSI to ATA translation layer * will set the NCQ tag number in the tag field. * That isn't clear from the SAT-4 spec (as of rev 05). */ sectors_out = 0; ata_flags |= AP_FLAG_TLEN_NO_DATA; } else { command_out = ATA_SEND_FPDMA_QUEUED; /* * Note that we're defaulting to normal priority, * and assuming that the SCSI to ATA translation * layer will insert the NCQ tag number in the tag * field. That isn't clear in the SAT-4 spec (as * of rev 05). */ sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; ata_flags |= AP_FLAG_TLEN_FEAT | AP_FLAG_TDIR_TO_DEV; /* * For SEND FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large, and it may confuse the * the SAT layer, because generally that means that * 0 bytes should be transferred. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else if (dxfer_len <= (65535 * 512)) { features_out = ((dxfer_len >> 9) & 0xffff); } else { /* The transfer is too big. */ retval = 1; goto bailout; } } auxiliary = (zm_action & 0xf) | (zone_flags << 8); protocol = AP_PROTO_FPDMA; } protocol |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features_out, /*sector_count*/ sectors_out, /*lba*/ zone_id, /*command*/ command_out, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout); bailout: return (retval); } int scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout) { uint8_t command_out, protocol; uint16_t features_out, sectors_out; uint32_t auxiliary; int ata_flags; int retval; retval = 0; ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_IN; /* XXX KDM put a macro here */ features_out = (zm_action & 0xf) | (zone_flags << 8); sectors_out = dxfer_len >> 9; /* XXX KDM macro */ protocol = AP_PROTO_DMA; ata_flags |= AP_FLAG_TLEN_SECT_CNT; auxiliary = 0; } else { ata_flags |= AP_FLAG_TLEN_FEAT; command_out = ATA_RECV_FPDMA_QUEUED; sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; /* * For RECEIVE FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large, and it may confuse the * the SAT layer, because generally that means that * 0 bytes should be transferred. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else if (dxfer_len <= (65535 * 512)) { features_out = ((dxfer_len >> 9) & 0xffff); } else { /* The transfer is too big. */ retval = 1; goto bailout; } auxiliary = (zm_action & 0xf) | (zone_flags << 8), protocol = AP_PROTO_FPDMA; } protocol |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features_out, /*sector_count*/ sectors_out, /*lba*/ zone_id, /*command*/ command_out, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */ /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout); bailout: return (retval); } Index: head/sys/cam/scsi/scsi_dvcfg.h =================================================================== --- head/sys/cam/scsi/scsi_dvcfg.h (revision 326264) +++ head/sys/cam/scsi/scsi_dvcfg.h (revision 326265) @@ -1,58 +1,60 @@ /* $FreeBSD$ */ /* $NecBSD: scsi_dvcfg.h,v 1.4 1998/03/14 07:05:06 kmatsuda Exp $ */ /* $NetBSD$ */ /*- + * SPDX-License-Identifier: BSD-3-Clause + * * [NetBSD for NEC PC-98 series] * Copyright (c) 1994, 1995, 1996, 1997, 1998 * NetBSD/pc98 porting staff. All rights reserved. * Copyright (c) 1994, 1995, 1996, 1997, 1998 * Naofumi HONDA. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _SCSI_DVCFG_H_ #define _SCSI_DVCFG_H_ /* common dvcfg flags defitions for bs, ncv, stg */ #define DVF_SCSI_SYNC 0x01 #define DVF_SCSI_DISC 0x02 #define DVF_SCSI_WAIT 0x04 #define DVF_SCSI_LINK 0x08 #define DVF_SCSI_QTAG 0x10 #define DVF_SCSI_SP0 0x100 #define DVF_SCSI_NOPARITY 0x200 #define DVF_SCSI_SAVESP 0x400 #define DVF_SCSI_SP1 0x800 #define DVF_SCSI_PERIOD(XXX) (((XXX) >> 24) & 0xff) #define DVF_SCSI_OFFSET(XXX) (((XXX) >> 16) & 0xff) #define DVF_SCSI_SYNCMASK 0xffff0000 #define DVF_SCSI_DEFCFG (DVF_SCSI_SYNC | DVF_SCSI_NOPARITY | DVF_SCSI_SYNCMASK) #define DVF_SCSI_BITS "\020\13fssp\12noparity\11nosat\005qtag\004cmdlnk\003wait\002disc\001sync" #endif /* !_SCSI_DVCFG_H_ */ Index: head/sys/cam/scsi/scsi_enc.c =================================================================== --- head/sys/cam/scsi/scsi_enc.c (revision 326264) +++ head/sys/cam/scsi/scsi_enc.c (revision 326265) @@ -1,1044 +1,1046 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2000 Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ses.h" MALLOC_DEFINE(M_SCSIENC, "SCSI ENC", "SCSI ENC buffers"); /* Enclosure type independent driver */ static d_open_t enc_open; static d_close_t enc_close; static d_ioctl_t enc_ioctl; static periph_init_t enc_init; static periph_ctor_t enc_ctor; static periph_oninv_t enc_oninvalidate; static periph_dtor_t enc_dtor; static void enc_async(void *, uint32_t, struct cam_path *, void *); static enctyp enc_type(struct ccb_getdev *); SYSCTL_NODE(_kern_cam, OID_AUTO, enc, CTLFLAG_RD, 0, "CAM Enclosure Services driver"); static struct periph_driver encdriver = { enc_init, "ses", TAILQ_HEAD_INITIALIZER(encdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(enc, encdriver); static struct cdevsw enc_cdevsw = { .d_version = D_VERSION, .d_open = enc_open, .d_close = enc_close, .d_ioctl = enc_ioctl, .d_name = "ses", .d_flags = D_TRACKCLOSE, }; static void enc_init(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, enc_async, NULL, NULL); if (status != CAM_REQ_CMP) { printf("enc: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void enc_devgonecb(void *arg) { struct cam_periph *periph; struct enc_softc *enc; struct mtx *mtx; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); enc = (struct enc_softc *)periph->softc; /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < enc->open_count; i++) cam_periph_release_locked(periph); enc->open_count = 0; /* * Release the reference held for the device node, it is gone now. */ cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void enc_oninvalidate(struct cam_periph *periph) { struct enc_softc *enc; enc = periph->softc; enc->enc_flags |= ENC_FLAG_INVALID; /* If the sub-driver has an invalidate routine, call it */ if (enc->enc_vec.softc_invalidate != NULL) enc->enc_vec.softc_invalidate(enc); /* * Unregister any async callbacks. */ xpt_register_async(0, enc_async, periph, periph->path); /* * Shutdown our daemon. */ enc->enc_flags |= ENC_FLAG_SHUTDOWN; if (enc->enc_daemon != NULL) { /* Signal the ses daemon to terminate. */ wakeup(enc->enc_daemon); } callout_drain(&enc->status_updater); destroy_dev_sched_cb(enc->enc_dev, enc_devgonecb, periph); } static void enc_dtor(struct cam_periph *periph) { struct enc_softc *enc; enc = periph->softc; /* If the sub-driver has a cleanup routine, call it */ if (enc->enc_vec.softc_cleanup != NULL) enc->enc_vec.softc_cleanup(enc); if (enc->enc_boot_hold_ch.ich_func != NULL) { config_intrhook_disestablish(&enc->enc_boot_hold_ch); enc->enc_boot_hold_ch.ich_func = NULL; } ENC_FREE(enc); } static void enc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch(code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; path_id_t path_id; cgd = (struct ccb_getdev *)arg; if (arg == NULL) { break; } if (enc_type(cgd) == ENC_NONE) { /* * Schedule announcement of the ENC bindings for * this device if it is managed by a SEP. */ path_id = xpt_path_path_id(path); xpt_lock_buses(); TAILQ_FOREACH(periph, &encdriver.units, unit_links) { struct enc_softc *softc; softc = (struct enc_softc *)periph->softc; if (xpt_path_path_id(periph->path) != path_id || softc == NULL || (softc->enc_flags & ENC_FLAG_INITIALIZED) == 0 || softc->enc_vec.device_found == NULL) continue; softc->enc_vec.device_found(softc); } xpt_unlock_buses(); return; } status = cam_periph_alloc(enc_ctor, enc_oninvalidate, enc_dtor, NULL, "ses", CAM_PERIPH_BIO, path, enc_async, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { printf("enc_async: Unable to probe new device due to " "status 0x%x\n", status); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static int enc_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct enc_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); cam_periph_lock(periph); softc = (struct enc_softc *)periph->softc; if ((softc->enc_flags & ENC_FLAG_INITIALIZED) == 0) { error = ENXIO; goto out; } if (softc->enc_flags & ENC_FLAG_INVALID) { error = ENXIO; goto out; } out: if (error != 0) cam_periph_release_locked(periph); else softc->open_count++; cam_periph_unlock(periph); return (error); } static int enc_close(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct enc_softc *enc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; mtx = cam_periph_mtx(periph); mtx_lock(mtx); enc = periph->softc; enc->open_count--; cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } int enc_error(union ccb *ccb, uint32_t cflags, uint32_t sflags) { struct enc_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct enc_softc *)periph->softc; return (cam_periph_error(ccb, cflags, sflags, &softc->saved_ccb)); } static int enc_ioctl(struct cdev *dev, u_long cmd, caddr_t arg_addr, int flag, struct thread *td) { struct cam_periph *periph; encioc_enc_status_t tmp; encioc_string_t sstr; encioc_elm_status_t elms; encioc_elm_desc_t elmd; encioc_elm_devnames_t elmdn; encioc_element_t *uelm; enc_softc_t *enc; enc_cache_t *cache; void *addr; int error, i; if (arg_addr) addr = *((caddr_t *) arg_addr); else addr = NULL; periph = (struct cam_periph *)dev->si_drv1; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering encioctl\n")); cam_periph_lock(periph); enc = (struct enc_softc *)periph->softc; cache = &enc->enc_cache; /* * Now check to see whether we're initialized or not. * This actually should never fail as we're not supposed * to get past enc_open w/o successfully initializing * things. */ if ((enc->enc_flags & ENC_FLAG_INITIALIZED) == 0) { cam_periph_unlock(periph); return (ENXIO); } cam_periph_unlock(periph); error = 0; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("trying to do ioctl %#lx\n", cmd)); /* * If this command can change the device's state, * we must have the device open for writing. * * For commands that get information about the * device- we don't need to lock the peripheral * if we aren't running a command. The periph * also can't go away while a user process has * it open. */ switch (cmd) { case ENCIOC_GETNELM: case ENCIOC_GETELMMAP: case ENCIOC_GETENCSTAT: case ENCIOC_GETELMSTAT: case ENCIOC_GETELMDESC: case ENCIOC_GETELMDEVNAMES: case ENCIOC_GETENCNAME: case ENCIOC_GETENCID: break; default: if ((flag & FWRITE) == 0) { return (EBADF); } } /* * XXX The values read here are only valid for the current * configuration generation. We need these ioctls * to also pass in/out a generation number. */ sx_slock(&enc->enc_cache_lock); switch (cmd) { case ENCIOC_GETNELM: error = copyout(&cache->nelms, addr, sizeof (cache->nelms)); break; case ENCIOC_GETELMMAP: for (uelm = addr, i = 0; i != cache->nelms; i++) { encioc_element_t kelm; kelm.elm_idx = i; kelm.elm_subenc_id = cache->elm_map[i].subenclosure; kelm.elm_type = cache->elm_map[i].enctype; error = copyout(&kelm, &uelm[i], sizeof(kelm)); if (error) break; } break; case ENCIOC_GETENCSTAT: cam_periph_lock(periph); error = enc->enc_vec.get_enc_status(enc, 1); if (error) { cam_periph_unlock(periph); break; } tmp = cache->enc_status; cam_periph_unlock(periph); error = copyout(&tmp, addr, sizeof(tmp)); cache->enc_status = tmp; break; case ENCIOC_SETENCSTAT: error = copyin(addr, &tmp, sizeof(tmp)); if (error) break; cam_periph_lock(periph); error = enc->enc_vec.set_enc_status(enc, tmp, 1); cam_periph_unlock(periph); break; case ENCIOC_GETSTRING: case ENCIOC_SETSTRING: case ENCIOC_GETENCNAME: case ENCIOC_GETENCID: if (enc->enc_vec.handle_string == NULL) { error = EINVAL; break; } error = copyin(addr, &sstr, sizeof(sstr)); if (error) break; cam_periph_lock(periph); error = enc->enc_vec.handle_string(enc, &sstr, cmd); cam_periph_unlock(periph); break; case ENCIOC_GETELMSTAT: error = copyin(addr, &elms, sizeof(elms)); if (error) break; if (elms.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = enc->enc_vec.get_elm_status(enc, &elms, 1); cam_periph_unlock(periph); if (error) break; error = copyout(&elms, addr, sizeof(elms)); break; case ENCIOC_GETELMDESC: error = copyin(addr, &elmd, sizeof(elmd)); if (error) break; if (elmd.elm_idx >= cache->nelms) { error = EINVAL; break; } if (enc->enc_vec.get_elm_desc != NULL) { error = enc->enc_vec.get_elm_desc(enc, &elmd); if (error) break; } else elmd.elm_desc_len = 0; error = copyout(&elmd, addr, sizeof(elmd)); break; case ENCIOC_GETELMDEVNAMES: if (enc->enc_vec.get_elm_devnames == NULL) { error = EINVAL; break; } error = copyin(addr, &elmdn, sizeof(elmdn)); if (error) break; if (elmdn.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = (*enc->enc_vec.get_elm_devnames)(enc, &elmdn); cam_periph_unlock(periph); if (error) break; error = copyout(&elmdn, addr, sizeof(elmdn)); break; case ENCIOC_SETELMSTAT: error = copyin(addr, &elms, sizeof(elms)); if (error) break; if (elms.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = enc->enc_vec.set_elm_status(enc, &elms, 1); cam_periph_unlock(periph); break; case ENCIOC_INIT: cam_periph_lock(periph); error = enc->enc_vec.init_enc(enc); cam_periph_unlock(periph); break; default: cam_periph_lock(periph); error = cam_periph_ioctl(periph, cmd, arg_addr, enc_error); cam_periph_unlock(periph); break; } sx_sunlock(&enc->enc_cache_lock); return (error); } int enc_runcmd(struct enc_softc *enc, char *cdb, int cdbl, char *dptr, int *dlenp) { int error, dlen, tdlen; ccb_flags ddf; union ccb *ccb; CAM_DEBUG(enc->periph->path, CAM_DEBUG_TRACE, ("entering enc_runcmd\n")); if (dptr) { if ((dlen = *dlenp) < 0) { dlen = -dlen; ddf = CAM_DIR_OUT; } else { ddf = CAM_DIR_IN; } } else { dlen = 0; ddf = CAM_DIR_NONE; } if (cdbl > IOCDBLEN) { cdbl = IOCDBLEN; } ccb = cam_periph_getccb(enc->periph, CAM_PRIORITY_NORMAL); if (enc->enc_type == ENC_SEMB_SES || enc->enc_type == ENC_SEMB_SAFT) { tdlen = min(dlen, 1020); tdlen = (tdlen + 3) & ~3; cam_fill_ataio(&ccb->ataio, 0, NULL, ddf, 0, dptr, tdlen, 30 * 1000); if (cdb[0] == RECEIVE_DIAGNOSTIC) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, cdb[2], 0x02, tdlen / 4); else if (cdb[0] == SEND_DIAGNOSTIC) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, dlen > 0 ? dptr[0] : 0, 0x82, tdlen / 4); else if (cdb[0] == READ_BUFFER) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, cdb[2], 0x00, tdlen / 4); else ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, dlen > 0 ? dptr[0] : 0, 0x80, tdlen / 4); } else { tdlen = dlen; cam_fill_csio(&ccb->csio, 0, NULL, ddf, MSG_SIMPLE_Q_TAG, dptr, dlen, sizeof (struct scsi_sense_data), cdbl, 60 * 1000); bcopy(cdb, ccb->csio.cdb_io.cdb_bytes, cdbl); } error = cam_periph_runccb(ccb, enc_error, ENC_CFLAGS, ENC_FLAGS, NULL); if (error) { if (dptr) { *dlenp = dlen; } } else { if (dptr) { if (ccb->ccb_h.func_code == XPT_ATA_IO) *dlenp = ccb->ataio.resid; else *dlenp = ccb->csio.resid; *dlenp += tdlen - dlen; } } xpt_release_ccb(ccb); CAM_DEBUG(enc->periph->path, CAM_DEBUG_SUBTRACE, ("exiting enc_runcmd: *dlenp = %d\n", *dlenp)); return (error); } void enc_log(struct enc_softc *enc, const char *fmt, ...) { va_list ap; printf("%s%d: ", enc->periph->periph_name, enc->periph->unit_number); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } /* * The code after this point runs on many platforms, * so forgive the slightly awkward and nonconforming * appearance. */ /* * Is this a device that supports enclosure services? * * It's a pretty simple ruleset- if it is device type * 0x0D (13), it's an ENCLOSURE device. */ #define SAFTE_START 44 #define SAFTE_END 50 #define SAFTE_LEN SAFTE_END-SAFTE_START static enctyp enc_type(struct ccb_getdev *cgd) { int buflen; unsigned char *iqd; if (cgd->protocol == PROTO_SEMB) { iqd = (unsigned char *)&cgd->ident_data; if (STRNCMP(iqd + 43, "S-E-S", 5) == 0) return (ENC_SEMB_SES); else if (STRNCMP(iqd + 43, "SAF-TE", 6) == 0) return (ENC_SEMB_SAFT); return (ENC_NONE); } else if (cgd->protocol != PROTO_SCSI) return (ENC_NONE); iqd = (unsigned char *)&cgd->inq_data; buflen = min(sizeof(cgd->inq_data), SID_ADDITIONAL_LENGTH(&cgd->inq_data)); if ((iqd[0] & 0x1f) == T_ENCLOSURE) { if ((iqd[2] & 0x7) > 2) { return (ENC_SES); } else { return (ENC_SES_SCSI2); } return (ENC_NONE); } #ifdef SES_ENABLE_PASSTHROUGH if ((iqd[6] & 0x40) && (iqd[2] & 0x7) >= 2) { /* * PassThrough Device. */ return (ENC_SES_PASSTHROUGH); } #endif /* * The comparison is short for a reason- * some vendors were chopping it short. */ if (buflen < SAFTE_END - 2) { return (ENC_NONE); } if (STRNCMP((char *)&iqd[SAFTE_START], "SAF-TE", SAFTE_LEN - 2) == 0) { return (ENC_SAFT); } return (ENC_NONE); } /*================== Enclosure Monitoring/Processing Daemon ==================*/ /** * \brief Queue an update request for a given action, if needed. * * \param enc SES softc to queue the request for. * \param action Action requested. */ void enc_update_request(enc_softc_t *enc, uint32_t action) { if ((enc->pending_actions & (0x1 << action)) == 0) { enc->pending_actions |= (0x1 << action); ENC_DLOG(enc, "%s: queing requested action %d\n", __func__, action); if (enc->current_action == ENC_UPDATE_NONE) wakeup(enc->enc_daemon); } else { ENC_DLOG(enc, "%s: ignoring requested action %d - " "Already queued\n", __func__, action); } } /** * \brief Invoke the handler of the highest priority pending * state in the SES state machine. * * \param enc The SES instance invoking the state machine. */ static void enc_fsm_step(enc_softc_t *enc) { union ccb *ccb; uint8_t *buf; struct enc_fsm_state *cur_state; int error; uint32_t xfer_len; ENC_DLOG(enc, "%s enter %p\n", __func__, enc); enc->current_action = ffs(enc->pending_actions) - 1; enc->pending_actions &= ~(0x1 << enc->current_action); cur_state = &enc->enc_fsm_states[enc->current_action]; buf = NULL; if (cur_state->buf_size != 0) { cam_periph_unlock(enc->periph); buf = malloc(cur_state->buf_size, M_SCSIENC, M_WAITOK|M_ZERO); cam_periph_lock(enc->periph); } error = 0; ccb = NULL; if (cur_state->fill != NULL) { ccb = cam_periph_getccb(enc->periph, CAM_PRIORITY_NORMAL); error = cur_state->fill(enc, cur_state, ccb, buf); if (error != 0) goto done; error = cam_periph_runccb(ccb, cur_state->error, ENC_CFLAGS, ENC_FLAGS|SF_QUIET_IR, NULL); } if (ccb != NULL) { if (ccb->ccb_h.func_code == XPT_ATA_IO) xfer_len = ccb->ataio.dxfer_len - ccb->ataio.resid; else xfer_len = ccb->csio.dxfer_len - ccb->csio.resid; } else xfer_len = 0; cam_periph_unlock(enc->periph); cur_state->done(enc, cur_state, ccb, &buf, error, xfer_len); cam_periph_lock(enc->periph); done: ENC_DLOG(enc, "%s exit - result %d\n", __func__, error); ENC_FREE_AND_NULL(buf); if (ccb != NULL) xpt_release_ccb(ccb); } /** * \invariant Called with cam_periph mutex held. */ static void enc_status_updater(void *arg) { enc_softc_t *enc; enc = arg; if (enc->enc_vec.poll_status != NULL) enc->enc_vec.poll_status(enc); } static void enc_daemon(void *arg) { enc_softc_t *enc; enc = arg; cam_periph_lock(enc->periph); while ((enc->enc_flags & ENC_FLAG_SHUTDOWN) == 0) { if (enc->pending_actions == 0) { struct intr_config_hook *hook; /* * Reset callout and msleep, or * issue timed task completion * status command. */ enc->current_action = ENC_UPDATE_NONE; /* * We've been through our state machine at least * once. Allow the transition to userland. */ hook = &enc->enc_boot_hold_ch; if (hook->ich_func != NULL) { config_intrhook_disestablish(hook); hook->ich_func = NULL; } callout_reset(&enc->status_updater, 60*hz, enc_status_updater, enc); cam_periph_sleep(enc->periph, enc->enc_daemon, PUSER, "idle", 0); } else { enc_fsm_step(enc); } } enc->enc_daemon = NULL; cam_periph_unlock(enc->periph); cam_periph_release(enc->periph); kproc_exit(0); } static int enc_kproc_init(enc_softc_t *enc) { int result; callout_init_mtx(&enc->status_updater, cam_periph_mtx(enc->periph), 0); if (cam_periph_acquire(enc->periph) != CAM_REQ_CMP) return (ENXIO); result = kproc_create(enc_daemon, enc, &enc->enc_daemon, /*flags*/0, /*stackpgs*/0, "enc_daemon%d", enc->periph->unit_number); if (result == 0) { /* Do an initial load of all page data. */ cam_periph_lock(enc->periph); enc->enc_vec.poll_status(enc); cam_periph_unlock(enc->periph); } else cam_periph_release(enc->periph); return (result); } /** * \brief Interrupt configuration hook callback associated with * enc_boot_hold_ch. * * Since interrupts are always functional at the time of enclosure * configuration, there is nothing to be done when the callback occurs. * This hook is only registered to hold up boot processing while initial * eclosure processing occurs. * * \param arg The enclosure softc, but currently unused in this callback. */ static void enc_nop_confighook_cb(void *arg __unused) { } static cam_status enc_ctor(struct cam_periph *periph, void *arg) { cam_status status = CAM_REQ_CMP_ERR; int err; enc_softc_t *enc; struct ccb_getdev *cgd; char *tname; struct make_dev_args args; struct sbuf sb; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("enc_ctor: no getdev CCB, can't register device\n"); goto out; } enc = ENC_MALLOCZ(sizeof(*enc)); if (enc == NULL) { printf("enc_ctor: Unable to probe new device. " "Unable to allocate enc\n"); goto out; } enc->periph = periph; enc->current_action = ENC_UPDATE_INVALID; enc->enc_type = enc_type(cgd); sx_init(&enc->enc_cache_lock, "enccache"); switch (enc->enc_type) { case ENC_SES: case ENC_SES_SCSI2: case ENC_SES_PASSTHROUGH: case ENC_SEMB_SES: err = ses_softc_init(enc); break; case ENC_SAFT: case ENC_SEMB_SAFT: err = safte_softc_init(enc); break; case ENC_NONE: default: ENC_FREE(enc); return (CAM_REQ_CMP_ERR); } if (err) { xpt_print(periph->path, "error %d initializing\n", err); goto out; } /* * Hold off userland until we have made at least one pass * through our state machine so that physical path data is * present. */ if (enc->enc_vec.poll_status != NULL) { enc->enc_boot_hold_ch.ich_func = enc_nop_confighook_cb; enc->enc_boot_hold_ch.ich_arg = enc; config_intrhook_establish(&enc->enc_boot_hold_ch); } /* * The softc field is set only once the enc is fully initialized * so that we can rely on this field to detect partially * initialized periph objects in the AC_FOUND_DEVICE handler. */ periph->softc = enc; cam_periph_unlock(periph); if (enc->enc_vec.poll_status != NULL) { err = enc_kproc_init(enc); if (err) { xpt_print(periph->path, "error %d starting enc_daemon\n", err); goto out; } } /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } make_dev_args_init(&args); args.mda_devsw = &enc_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; err = make_dev_s(&args, &enc->enc_dev, "%s%d", periph->periph_name, periph->unit_number); cam_periph_lock(periph); if (err != 0) { cam_periph_release_locked(periph); return (CAM_REQ_CMP_ERR); } enc->enc_flags |= ENC_FLAG_INITIALIZED; /* * Add an async callback so that we get notified if this * device goes away. */ xpt_register_async(AC_LOST_DEVICE, enc_async, periph, periph->path); switch (enc->enc_type) { default: case ENC_NONE: tname = "No ENC device"; break; case ENC_SES_SCSI2: tname = "SCSI-2 ENC Device"; break; case ENC_SES: tname = "SCSI-3 ENC Device"; break; case ENC_SES_PASSTHROUGH: tname = "ENC Passthrough Device"; break; case ENC_SAFT: tname = "SAF-TE Compliant Device"; break; case ENC_SEMB_SES: tname = "SEMB SES Device"; break; case ENC_SEMB_SAFT: tname = "SEMB SAF-TE Device"; break; } sbuf_new(&sb, enc->announce_buf, ENC_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, tname); sbuf_finish(&sb); sbuf_putbuf(&sb); status = CAM_REQ_CMP; out: if (status != CAM_REQ_CMP) enc_dtor(periph); return (status); } Index: head/sys/cam/scsi/scsi_enc_internal.h =================================================================== --- head/sys/cam/scsi/scsi_enc_internal.h (revision 326264) +++ head/sys/cam/scsi/scsi_enc_internal.h (revision 326265) @@ -1,233 +1,235 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2000 Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * This file contains definitions only intended for use within * sys/cam/scsi/scsi_enc*.c, and not in other kernel components. */ #ifndef __SCSI_ENC_INTERNAL_H__ #define __SCSI_ENC_INTERNAL_H__ typedef struct enc_element { uint32_t enctype : 8, /* enclosure type */ subenclosure : 8, /* subenclosure id */ svalid : 1, /* enclosure information valid */ overall_status_elem: 1,/* * This object represents generic * status about all objects of this * type. */ priv : 14; /* private data, per object */ uint8_t encstat[4]; /* state && stats */ uint8_t *physical_path; /* Device physical path data. */ u_int physical_path_len; /* Length of device path data. */ void *elm_private; /* per-type object data */ } enc_element_t; typedef enum { ENC_NONE, ENC_SES_SCSI2, ENC_SES, ENC_SES_PASSTHROUGH, ENC_SEN, ENC_SAFT, ENC_SEMB_SES, ENC_SEMB_SAFT } enctyp; /* Platform Independent Driver Internal Definitions for enclosure devices. */ typedef struct enc_softc enc_softc_t; struct enc_fsm_state; typedef int fsm_fill_handler_t(enc_softc_t *ssc, struct enc_fsm_state *state, union ccb *ccb, uint8_t *buf); typedef int fsm_error_handler_t(union ccb *ccb, uint32_t cflags, uint32_t sflags); typedef int fsm_done_handler_t(enc_softc_t *ssc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len); struct enc_fsm_state { const char *name; int page_code; size_t buf_size; uint32_t timeout; fsm_fill_handler_t *fill; fsm_done_handler_t *done; fsm_error_handler_t *error; }; typedef int (enc_softc_init_t)(enc_softc_t *); typedef void (enc_softc_invalidate_t)(enc_softc_t *); typedef void (enc_softc_cleanup_t)(enc_softc_t *); typedef int (enc_init_enc_t)(enc_softc_t *); typedef int (enc_get_enc_status_t)(enc_softc_t *, int); typedef int (enc_set_enc_status_t)(enc_softc_t *, encioc_enc_status_t, int); typedef int (enc_get_elm_status_t)(enc_softc_t *, encioc_elm_status_t *, int); typedef int (enc_set_elm_status_t)(enc_softc_t *, encioc_elm_status_t *, int); typedef int (enc_get_elm_desc_t)(enc_softc_t *, encioc_elm_desc_t *); typedef int (enc_get_elm_devnames_t)(enc_softc_t *, encioc_elm_devnames_t *); typedef int (enc_handle_string_t)(enc_softc_t *, encioc_string_t *, int); typedef void (enc_device_found_t)(enc_softc_t *); typedef void (enc_poll_status_t)(enc_softc_t *); struct enc_vec { enc_softc_invalidate_t *softc_invalidate; enc_softc_cleanup_t *softc_cleanup; enc_init_enc_t *init_enc; enc_get_enc_status_t *get_enc_status; enc_set_enc_status_t *set_enc_status; enc_get_elm_status_t *get_elm_status; enc_set_elm_status_t *set_elm_status; enc_get_elm_desc_t *get_elm_desc; enc_get_elm_devnames_t *get_elm_devnames; enc_handle_string_t *handle_string; enc_device_found_t *device_found; enc_poll_status_t *poll_status; }; typedef struct enc_cache { enc_element_t *elm_map; /* objects */ int nelms; /* number of objects */ encioc_enc_status_t enc_status; /* overall status */ void *private; /* per-type private data */ } enc_cache_t; /* Enclosure instance toplevel structure */ struct enc_softc { enctyp enc_type; /* type of enclosure */ struct enc_vec enc_vec; /* vector to handlers */ void *enc_private; /* per-type private data */ /** * "Published" configuration and state data available to * external consumers. */ enc_cache_t enc_cache; /** * Configuration and state data being actively updated * by the enclosure daemon. */ enc_cache_t enc_daemon_cache; struct sx enc_cache_lock; uint8_t enc_flags; #define ENC_FLAG_INVALID 0x01 #define ENC_FLAG_INITIALIZED 0x02 #define ENC_FLAG_SHUTDOWN 0x04 union ccb saved_ccb; struct cdev *enc_dev; struct cam_periph *periph; int open_count; /* Bitmap of pending operations. */ uint32_t pending_actions; /* The action on which the state machine is currently working. */ uint32_t current_action; #define ENC_UPDATE_NONE 0x00 #define ENC_UPDATE_INVALID 0xff /* Callout for auto-updating enclosure status */ struct callout status_updater; struct proc *enc_daemon; struct enc_fsm_state *enc_fsm_states; struct intr_config_hook enc_boot_hold_ch; #define ENC_ANNOUNCE_SZ 400 char announce_buf[ENC_ANNOUNCE_SZ]; }; static inline enc_cache_t * enc_other_cache(enc_softc_t *enc, enc_cache_t *primary) { return (primary == &enc->enc_cache ? &enc->enc_daemon_cache : &enc->enc_cache); } /* SES Management mode page - SES2r20 Table 59 */ struct ses_mgmt_mode_page { struct scsi_mode_header_6 header; struct scsi_mode_blk_desc blk_desc; uint8_t byte0; /* ps : 1, spf : 1, page_code : 6 */ #define SES_MGMT_MODE_PAGE_CODE 0x14 uint8_t length; #define SES_MGMT_MODE_PAGE_LEN 6 uint8_t reserved[3]; uint8_t byte5; /* reserved : 7, enbltc : 1 */ #define SES_MGMT_TIMED_COMP_EN 0x1 uint8_t max_comp_time[2]; }; /* Enclosure core interface for sub-drivers */ int enc_runcmd(struct enc_softc *, char *, int, char *, int *); void enc_log(struct enc_softc *, const char *, ...); int enc_error(union ccb *, uint32_t, uint32_t); void enc_update_request(enc_softc_t *, uint32_t); /* SES Native interface */ enc_softc_init_t ses_softc_init; /* SAF-TE interface */ enc_softc_init_t safte_softc_init; /* Helper macros */ MALLOC_DECLARE(M_SCSIENC); #define ENC_CFLAGS CAM_RETRY_SELTO #define ENC_FLAGS SF_NO_PRINT | SF_RETRY_UA #define STRNCMP strncmp #define PRINTF printf #define ENC_LOG enc_log #if defined(DEBUG) || defined(ENC_DEBUG) #define ENC_DLOG enc_log #else #define ENC_DLOG if (0) enc_log #endif #define ENC_VLOG if (bootverbose) enc_log #define ENC_MALLOC(amt) malloc(amt, M_SCSIENC, M_NOWAIT) #define ENC_MALLOCZ(amt) malloc(amt, M_SCSIENC, M_ZERO|M_NOWAIT) /* Cast away const avoiding GCC warnings. */ #define ENC_FREE(ptr) free((void *)((uintptr_t)ptr), M_SCSIENC) #define ENC_FREE_AND_NULL(ptr) do { \ if (ptr != NULL) { \ ENC_FREE(ptr); \ ptr = NULL; \ } \ } while(0) #define MEMZERO bzero #define MEMCPY(dest, src, amt) bcopy(src, dest, amt) #endif /* __SCSI_ENC_INTERNAL_H__ */ Index: head/sys/cam/scsi/scsi_enc_safte.c =================================================================== --- head/sys/cam/scsi/scsi_enc_safte.c (revision 326264) +++ head/sys/cam/scsi/scsi_enc_safte.c (revision 326265) @@ -1,1131 +1,1133 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2000 Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * SAF-TE Type Device Emulation */ static int safte_set_enc_status(enc_softc_t *enc, uint8_t encstat, int slpflag); #define ALL_ENC_STAT (SES_ENCSTAT_CRITICAL | SES_ENCSTAT_UNRECOV | \ SES_ENCSTAT_NONCRITICAL | SES_ENCSTAT_INFO) /* * SAF-TE specific defines- Mandatory ones only... */ /* * READ BUFFER ('get' commands) IDs- placed in offset 2 of cdb */ #define SAFTE_RD_RDCFG 0x00 /* read enclosure configuration */ #define SAFTE_RD_RDESTS 0x01 /* read enclosure status */ #define SAFTE_RD_RDDSTS 0x04 /* read drive slot status */ #define SAFTE_RD_RDGFLG 0x05 /* read global flags */ /* * WRITE BUFFER ('set' commands) IDs- placed in offset 0 of databuf */ #define SAFTE_WT_DSTAT 0x10 /* write device slot status */ #define SAFTE_WT_SLTOP 0x12 /* perform slot operation */ #define SAFTE_WT_FANSPD 0x13 /* set fan speed */ #define SAFTE_WT_ACTPWS 0x14 /* turn on/off power supply */ #define SAFTE_WT_GLOBAL 0x15 /* send global command */ #define SAFT_SCRATCH 64 #define SCSZ 0x8000 typedef enum { SAFTE_UPDATE_NONE, SAFTE_UPDATE_READCONFIG, SAFTE_UPDATE_READGFLAGS, SAFTE_UPDATE_READENCSTATUS, SAFTE_UPDATE_READSLOTSTATUS, SAFTE_PROCESS_CONTROL_REQS, SAFTE_NUM_UPDATE_STATES } safte_update_action; static fsm_fill_handler_t safte_fill_read_buf_io; static fsm_fill_handler_t safte_fill_control_request; static fsm_done_handler_t safte_process_config; static fsm_done_handler_t safte_process_gflags; static fsm_done_handler_t safte_process_status; static fsm_done_handler_t safte_process_slotstatus; static fsm_done_handler_t safte_process_control_request; static struct enc_fsm_state enc_fsm_states[SAFTE_NUM_UPDATE_STATES] = { { "SAFTE_UPDATE_NONE", 0, 0, 0, NULL, NULL, NULL }, { "SAFTE_UPDATE_READCONFIG", SAFTE_RD_RDCFG, SAFT_SCRATCH, 60 * 1000, safte_fill_read_buf_io, safte_process_config, enc_error }, { "SAFTE_UPDATE_READGFLAGS", SAFTE_RD_RDGFLG, 16, 60 * 1000, safte_fill_read_buf_io, safte_process_gflags, enc_error }, { "SAFTE_UPDATE_READENCSTATUS", SAFTE_RD_RDESTS, SCSZ, 60 * 1000, safte_fill_read_buf_io, safte_process_status, enc_error }, { "SAFTE_UPDATE_READSLOTSTATUS", SAFTE_RD_RDDSTS, SCSZ, 60 * 1000, safte_fill_read_buf_io, safte_process_slotstatus, enc_error }, { "SAFTE_PROCESS_CONTROL_REQS", 0, SCSZ, 60 * 1000, safte_fill_control_request, safte_process_control_request, enc_error } }; typedef struct safte_control_request { int elm_idx; uint8_t elm_stat[4]; int result; TAILQ_ENTRY(safte_control_request) links; } safte_control_request_t; TAILQ_HEAD(safte_control_reqlist, safte_control_request); typedef struct safte_control_reqlist safte_control_reqlist_t; enum { SES_SETSTATUS_ENC_IDX = -1 }; static void safte_terminate_control_requests(safte_control_reqlist_t *reqlist, int result) { safte_control_request_t *req; while ((req = TAILQ_FIRST(reqlist)) != NULL) { TAILQ_REMOVE(reqlist, req, links); req->result = result; wakeup(req); } } struct scfg { /* * Cached Configuration */ uint8_t Nfans; /* Number of Fans */ uint8_t Npwr; /* Number of Power Supplies */ uint8_t Nslots; /* Number of Device Slots */ uint8_t DoorLock; /* Door Lock Installed */ uint8_t Ntherm; /* Number of Temperature Sensors */ uint8_t Nspkrs; /* Number of Speakers */ uint8_t Ntstats; /* Number of Thermostats */ /* * Cached Flag Bytes for Global Status */ uint8_t flag1; uint8_t flag2; /* * What object index ID is where various slots start. */ uint8_t pwroff; uint8_t slotoff; #define SAFT_ALARM_OFFSET(cc) (cc)->slotoff - 1 encioc_enc_status_t adm_status; encioc_enc_status_t enc_status; encioc_enc_status_t slot_status; safte_control_reqlist_t requests; safte_control_request_t *current_request; int current_request_stage; int current_request_stages; }; #define SAFT_FLG1_ALARM 0x1 #define SAFT_FLG1_GLOBFAIL 0x2 #define SAFT_FLG1_GLOBWARN 0x4 #define SAFT_FLG1_ENCPWROFF 0x8 #define SAFT_FLG1_ENCFANFAIL 0x10 #define SAFT_FLG1_ENCPWRFAIL 0x20 #define SAFT_FLG1_ENCDRVFAIL 0x40 #define SAFT_FLG1_ENCDRVWARN 0x80 #define SAFT_FLG2_LOCKDOOR 0x4 #define SAFT_PRIVATE sizeof (struct scfg) static char *safte_2little = "Too Little Data Returned (%d) at line %d\n"; #define SAFT_BAIL(r, x) \ if ((r) >= (x)) { \ ENC_VLOG(enc, safte_2little, x, __LINE__);\ return (EIO); \ } int emulate_array_devices = 1; SYSCTL_DECL(_kern_cam_enc); SYSCTL_INT(_kern_cam_enc, OID_AUTO, emulate_array_devices, CTLFLAG_RWTUN, &emulate_array_devices, 0, "Emulate Array Devices for SAF-TE"); static int safte_fill_read_buf_io(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t *buf) { if (state->page_code != SAFTE_RD_RDCFG && enc->enc_cache.nelms == 0) { enc_update_request(enc, SAFTE_UPDATE_READCONFIG); return (-1); } if (enc->enc_type == ENC_SEMB_SAFT) { semb_read_buffer(&ccb->ataio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, state->page_code, buf, state->buf_size, state->timeout); } else { scsi_read_buffer(&ccb->csio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, 1, state->page_code, 0, buf, state->buf_size, SSD_FULL_SIZE, state->timeout); } return (0); } static int safte_process_config(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct scfg *cfg; uint8_t *buf = *bufp; int i, r; cfg = enc->enc_private; if (cfg == NULL) return (ENXIO); if (error != 0) return (error); if (xfer_len < 6) { ENC_VLOG(enc, "too little data (%d) for configuration\n", xfer_len); return (EIO); } cfg->Nfans = buf[0]; cfg->Npwr = buf[1]; cfg->Nslots = buf[2]; cfg->DoorLock = buf[3]; cfg->Ntherm = buf[4]; cfg->Nspkrs = buf[5]; if (xfer_len >= 7) cfg->Ntstats = buf[6] & 0x0f; else cfg->Ntstats = 0; ENC_VLOG(enc, "Nfans %d Npwr %d Nslots %d Lck %d Ntherm %d Nspkrs %d " "Ntstats %d\n", cfg->Nfans, cfg->Npwr, cfg->Nslots, cfg->DoorLock, cfg->Ntherm, cfg->Nspkrs, cfg->Ntstats); enc->enc_cache.nelms = cfg->Nfans + cfg->Npwr + cfg->Nslots + cfg->DoorLock + cfg->Ntherm + cfg->Nspkrs + cfg->Ntstats + 1; ENC_FREE_AND_NULL(enc->enc_cache.elm_map); enc->enc_cache.elm_map = malloc(enc->enc_cache.nelms * sizeof(enc_element_t), M_SCSIENC, M_WAITOK|M_ZERO); r = 0; /* * Note that this is all arranged for the convenience * in later fetches of status. */ for (i = 0; i < cfg->Nfans; i++) enc->enc_cache.elm_map[r++].enctype = ELMTYP_FAN; cfg->pwroff = (uint8_t) r; for (i = 0; i < cfg->Npwr; i++) enc->enc_cache.elm_map[r++].enctype = ELMTYP_POWER; for (i = 0; i < cfg->DoorLock; i++) enc->enc_cache.elm_map[r++].enctype = ELMTYP_DOORLOCK; if (cfg->Nspkrs > 0) enc->enc_cache.elm_map[r++].enctype = ELMTYP_ALARM; for (i = 0; i < cfg->Ntherm; i++) enc->enc_cache.elm_map[r++].enctype = ELMTYP_THERM; for (i = 0; i <= cfg->Ntstats; i++) enc->enc_cache.elm_map[r++].enctype = ELMTYP_THERM; cfg->slotoff = (uint8_t) r; for (i = 0; i < cfg->Nslots; i++) enc->enc_cache.elm_map[r++].enctype = emulate_array_devices ? ELMTYP_ARRAY_DEV : ELMTYP_DEVICE; enc_update_request(enc, SAFTE_UPDATE_READGFLAGS); enc_update_request(enc, SAFTE_UPDATE_READENCSTATUS); enc_update_request(enc, SAFTE_UPDATE_READSLOTSTATUS); return (0); } static int safte_process_gflags(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct scfg *cfg; uint8_t *buf = *bufp; cfg = enc->enc_private; if (cfg == NULL) return (ENXIO); if (error != 0) return (error); SAFT_BAIL(3, xfer_len); cfg->flag1 = buf[1]; cfg->flag2 = buf[2]; cfg->adm_status = 0; if (cfg->flag1 & SAFT_FLG1_GLOBFAIL) cfg->adm_status |= SES_ENCSTAT_CRITICAL; else if (cfg->flag1 & SAFT_FLG1_GLOBWARN) cfg->adm_status |= SES_ENCSTAT_NONCRITICAL; return (0); } static int safte_process_status(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct scfg *cfg; uint8_t *buf = *bufp; int oid, r, i, nitems; uint16_t tempflags; enc_cache_t *cache = &enc->enc_cache; cfg = enc->enc_private; if (cfg == NULL) return (ENXIO); if (error != 0) return (error); oid = r = 0; cfg->enc_status = 0; for (nitems = i = 0; i < cfg->Nfans; i++) { SAFT_BAIL(r, xfer_len); /* * 0 = Fan Operational * 1 = Fan is malfunctioning * 2 = Fan is not present * 0x80 = Unknown or Not Reportable Status */ cache->elm_map[oid].encstat[1] = 0; /* resvd */ cache->elm_map[oid].encstat[2] = 0; /* resvd */ if (cfg->flag1 & SAFT_FLG1_ENCFANFAIL) cache->elm_map[oid].encstat[3] |= 0x40; else cache->elm_map[oid].encstat[3] &= ~0x40; switch ((int)buf[r]) { case 0: nitems++; cache->elm_map[oid].encstat[0] = SES_OBJSTAT_OK; if ((cache->elm_map[oid].encstat[3] & 0x37) == 0) cache->elm_map[oid].encstat[3] |= 0x27; break; case 1: cache->elm_map[oid].encstat[0] = SES_OBJSTAT_CRIT; /* * FAIL and FAN STOPPED synthesized */ cache->elm_map[oid].encstat[3] |= 0x10; cache->elm_map[oid].encstat[3] &= ~0x07; /* * Enclosure marked with CRITICAL error * if only one fan or no thermometers, * else the NONCRITICAL error is set. */ if (cfg->Nfans == 1 || (cfg->Ntherm + cfg->Ntstats) == 0) cfg->enc_status |= SES_ENCSTAT_CRITICAL; else cfg->enc_status |= SES_ENCSTAT_NONCRITICAL; break; case 2: cache->elm_map[oid].encstat[0] = SES_OBJSTAT_NOTINSTALLED; cache->elm_map[oid].encstat[3] |= 0x10; cache->elm_map[oid].encstat[3] &= ~0x07; /* * Enclosure marked with CRITICAL error * if only one fan or no thermometers, * else the NONCRITICAL error is set. */ if (cfg->Nfans == 1) cfg->enc_status |= SES_ENCSTAT_CRITICAL; else cfg->enc_status |= SES_ENCSTAT_NONCRITICAL; break; case 0x80: cache->elm_map[oid].encstat[0] = SES_OBJSTAT_UNKNOWN; cache->elm_map[oid].encstat[3] = 0; cfg->enc_status |= SES_ENCSTAT_INFO; break; default: cache->elm_map[oid].encstat[0] = SES_OBJSTAT_UNSUPPORTED; ENC_VLOG(enc, "Unknown fan%d status 0x%x\n", i, buf[r] & 0xff); break; } cache->elm_map[oid++].svalid = 1; r++; } /* * No matter how you cut it, no cooling elements when there * should be some there is critical. */ if (cfg->Nfans && nitems == 0) cfg->enc_status |= SES_ENCSTAT_CRITICAL; for (i = 0; i < cfg->Npwr; i++) { SAFT_BAIL(r, xfer_len); cache->elm_map[oid].encstat[0] = SES_OBJSTAT_UNKNOWN; cache->elm_map[oid].encstat[1] = 0; /* resvd */ cache->elm_map[oid].encstat[2] = 0; /* resvd */ cache->elm_map[oid].encstat[3] = 0x20; /* requested on */ switch (buf[r]) { case 0x00: /* pws operational and on */ cache->elm_map[oid].encstat[0] = SES_OBJSTAT_OK; break; case 0x01: /* pws operational and off */ cache->elm_map[oid].encstat[0] = SES_OBJSTAT_OK; cache->elm_map[oid].encstat[3] = 0x10; cfg->enc_status |= SES_ENCSTAT_INFO; break; case 0x10: /* pws is malfunctioning and commanded on */ cache->elm_map[oid].encstat[0] = SES_OBJSTAT_CRIT; cache->elm_map[oid].encstat[3] = 0x61; cfg->enc_status |= SES_ENCSTAT_NONCRITICAL; break; case 0x11: /* pws is malfunctioning and commanded off */ cache->elm_map[oid].encstat[0] = SES_OBJSTAT_NONCRIT; cache->elm_map[oid].encstat[3] = 0x51; cfg->enc_status |= SES_ENCSTAT_NONCRITICAL; break; case 0x20: /* pws is not present */ cache->elm_map[oid].encstat[0] = SES_OBJSTAT_NOTINSTALLED; cache->elm_map[oid].encstat[3] = 0; cfg->enc_status |= SES_ENCSTAT_INFO; break; case 0x21: /* pws is present */ /* * This is for enclosures that cannot tell whether the * device is on or malfunctioning, but know that it is * present. Just fall through. */ /* FALLTHROUGH */ case 0x80: /* Unknown or Not Reportable Status */ cache->elm_map[oid].encstat[0] = SES_OBJSTAT_UNKNOWN; cache->elm_map[oid].encstat[3] = 0; cfg->enc_status |= SES_ENCSTAT_INFO; break; default: ENC_VLOG(enc, "unknown power supply %d status (0x%x)\n", i, buf[r] & 0xff); break; } enc->enc_cache.elm_map[oid++].svalid = 1; r++; } /* * Copy Slot SCSI IDs */ for (i = 0; i < cfg->Nslots; i++) { SAFT_BAIL(r, xfer_len); if (cache->elm_map[cfg->slotoff + i].enctype == ELMTYP_DEVICE) cache->elm_map[cfg->slotoff + i].encstat[1] = buf[r]; r++; } /* * We always have doorlock status, no matter what, * but we only save the status if we have one. */ SAFT_BAIL(r, xfer_len); if (cfg->DoorLock) { /* * 0 = Door Locked * 1 = Door Unlocked, or no Lock Installed * 0x80 = Unknown or Not Reportable Status */ cache->elm_map[oid].encstat[1] = 0; cache->elm_map[oid].encstat[2] = 0; switch (buf[r]) { case 0: cache->elm_map[oid].encstat[0] = SES_OBJSTAT_OK; cache->elm_map[oid].encstat[3] = 0; break; case 1: cache->elm_map[oid].encstat[0] = SES_OBJSTAT_OK; cache->elm_map[oid].encstat[3] = 1; break; case 0x80: cache->elm_map[oid].encstat[0] = SES_OBJSTAT_UNKNOWN; cache->elm_map[oid].encstat[3] = 0; cfg->enc_status |= SES_ENCSTAT_INFO; break; default: cache->elm_map[oid].encstat[0] = SES_OBJSTAT_UNSUPPORTED; ENC_VLOG(enc, "unknown lock status 0x%x\n", buf[r] & 0xff); break; } cache->elm_map[oid++].svalid = 1; } r++; /* * We always have speaker status, no matter what, * but we only save the status if we have one. */ SAFT_BAIL(r, xfer_len); if (cfg->Nspkrs) { cache->elm_map[oid].encstat[0] = SES_OBJSTAT_OK; cache->elm_map[oid].encstat[1] = 0; cache->elm_map[oid].encstat[2] = 0; if (buf[r] == 0) { cache->elm_map[oid].encstat[0] |= SESCTL_DISABLE; cache->elm_map[oid].encstat[3] |= 0x40; } cache->elm_map[oid++].svalid = 1; } r++; /* * Now, for "pseudo" thermometers, we have two bytes * of information in enclosure status- 16 bits. Actually, * the MSB is a single TEMP ALERT flag indicating whether * any other bits are set, but, thanks to fuzzy thinking, * in the SAF-TE spec, this can also be set even if no * other bits are set, thus making this really another * binary temperature sensor. */ SAFT_BAIL(r + cfg->Ntherm, xfer_len); tempflags = buf[r + cfg->Ntherm]; SAFT_BAIL(r + cfg->Ntherm + 1, xfer_len); tempflags |= (tempflags << 8) | buf[r + cfg->Ntherm + 1]; for (i = 0; i < cfg->Ntherm; i++) { SAFT_BAIL(r, xfer_len); /* * Status is a range from -10 to 245 deg Celsius, * which we need to normalize to -20 to -245 according * to the latest SCSI spec, which makes little * sense since this would overflow an 8bit value. * Well, still, the base normalization is -20, * not -10, so we have to adjust. * * So what's over and under temperature? * Hmm- we'll state that 'normal' operating * is 10 to 40 deg Celsius. */ /* * Actually.... All of the units that people out in the world * seem to have do not come even close to setting a value that * complies with this spec. * * The closest explanation I could find was in an * LSI-Logic manual, which seemed to indicate that * this value would be set by whatever the I2C code * would interpolate from the output of an LM75 * temperature sensor. * * This means that it is impossible to use the actual * numeric value to predict anything. But we don't want * to lose the value. So, we'll propagate the *uncorrected* * value and set SES_OBJSTAT_NOTAVAIL. We'll depend on the * temperature flags for warnings. */ if (tempflags & (1 << i)) { cache->elm_map[oid].encstat[0] = SES_OBJSTAT_CRIT; cfg->enc_status |= SES_ENCSTAT_CRITICAL; } else cache->elm_map[oid].encstat[0] = SES_OBJSTAT_OK; cache->elm_map[oid].encstat[1] = 0; cache->elm_map[oid].encstat[2] = buf[r]; cache->elm_map[oid].encstat[3] = 0; cache->elm_map[oid++].svalid = 1; r++; } for (i = 0; i <= cfg->Ntstats; i++) { cache->elm_map[oid].encstat[1] = 0; if (tempflags & (1 << ((i == cfg->Ntstats) ? 15 : (cfg->Ntherm + i)))) { cache->elm_map[oid].encstat[0] = SES_OBJSTAT_CRIT; cache->elm_map[4].encstat[2] = 0xff; /* * Set 'over temperature' failure. */ cache->elm_map[oid].encstat[3] = 8; cfg->enc_status |= SES_ENCSTAT_CRITICAL; } else { /* * We used to say 'not available' and synthesize a * nominal 30 deg (C)- that was wrong. Actually, * Just say 'OK', and use the reserved value of * zero. */ if ((cfg->Ntherm + cfg->Ntstats) == 0) cache->elm_map[oid].encstat[0] = SES_OBJSTAT_NOTAVAIL; else cache->elm_map[oid].encstat[0] = SES_OBJSTAT_OK; cache->elm_map[oid].encstat[2] = 0; cache->elm_map[oid].encstat[3] = 0; } cache->elm_map[oid++].svalid = 1; } r += 2; cache->enc_status = cfg->enc_status | cfg->slot_status | cfg->adm_status; return (0); } static int safte_process_slotstatus(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct scfg *cfg; uint8_t *buf = *bufp; enc_cache_t *cache = &enc->enc_cache; int oid, r, i; cfg = enc->enc_private; if (cfg == NULL) return (ENXIO); if (error != 0) return (error); cfg->slot_status = 0; oid = cfg->slotoff; for (r = i = 0; i < cfg->Nslots; i++, r += 4) { SAFT_BAIL(r+3, xfer_len); if (cache->elm_map[oid].enctype == ELMTYP_ARRAY_DEV) cache->elm_map[oid].encstat[1] = 0; cache->elm_map[oid].encstat[2] &= SESCTL_RQSID; cache->elm_map[oid].encstat[3] = 0; if ((buf[r+3] & 0x01) == 0) { /* no device */ cache->elm_map[oid].encstat[0] = SES_OBJSTAT_NOTINSTALLED; } else if (buf[r+0] & 0x02) { cache->elm_map[oid].encstat[0] = SES_OBJSTAT_CRIT; cfg->slot_status |= SES_ENCSTAT_CRITICAL; } else if (buf[r+0] & 0x40) { cache->elm_map[oid].encstat[0] = SES_OBJSTAT_NONCRIT; cfg->slot_status |= SES_ENCSTAT_NONCRITICAL; } else { cache->elm_map[oid].encstat[0] = SES_OBJSTAT_OK; } if (buf[r+3] & 0x2) { if (buf[r+3] & 0x01) cache->elm_map[oid].encstat[2] |= SESCTL_RQSRMV; else cache->elm_map[oid].encstat[2] |= SESCTL_RQSINS; } if ((buf[r+3] & 0x04) == 0) cache->elm_map[oid].encstat[3] |= SESCTL_DEVOFF; if (buf[r+0] & 0x02) cache->elm_map[oid].encstat[3] |= SESCTL_RQSFLT; if (buf[r+0] & 0x40) cache->elm_map[oid].encstat[0] |= SESCTL_PRDFAIL; if (cache->elm_map[oid].enctype == ELMTYP_ARRAY_DEV) { if (buf[r+0] & 0x01) cache->elm_map[oid].encstat[1] |= 0x80; if (buf[r+0] & 0x04) cache->elm_map[oid].encstat[1] |= 0x02; if (buf[r+0] & 0x08) cache->elm_map[oid].encstat[1] |= 0x04; if (buf[r+0] & 0x10) cache->elm_map[oid].encstat[1] |= 0x08; if (buf[r+0] & 0x20) cache->elm_map[oid].encstat[1] |= 0x10; if (buf[r+1] & 0x01) cache->elm_map[oid].encstat[1] |= 0x20; if (buf[r+1] & 0x02) cache->elm_map[oid].encstat[1] |= 0x01; } cache->elm_map[oid++].svalid = 1; } cache->enc_status = cfg->enc_status | cfg->slot_status | cfg->adm_status; return (0); } static int safte_fill_control_request(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t *buf) { struct scfg *cfg; enc_element_t *ep, *ep1; safte_control_request_t *req; int i, idx, xfer_len; cfg = enc->enc_private; if (cfg == NULL) return (ENXIO); if (enc->enc_cache.nelms == 0) { enc_update_request(enc, SAFTE_UPDATE_READCONFIG); return (-1); } if (cfg->current_request == NULL) { cfg->current_request = TAILQ_FIRST(&cfg->requests); TAILQ_REMOVE(&cfg->requests, cfg->current_request, links); cfg->current_request_stage = 0; cfg->current_request_stages = 1; } req = cfg->current_request; idx = (int)req->elm_idx; if (req->elm_idx == SES_SETSTATUS_ENC_IDX) { cfg->adm_status = req->elm_stat[0] & ALL_ENC_STAT; cfg->flag1 &= ~(SAFT_FLG1_GLOBFAIL|SAFT_FLG1_GLOBWARN); if (req->elm_stat[0] & (SES_ENCSTAT_CRITICAL|SES_ENCSTAT_UNRECOV)) cfg->flag1 |= SAFT_FLG1_GLOBFAIL; else if (req->elm_stat[0] & SES_ENCSTAT_NONCRITICAL) cfg->flag1 |= SAFT_FLG1_GLOBWARN; buf[0] = SAFTE_WT_GLOBAL; buf[1] = cfg->flag1; buf[2] = cfg->flag2; buf[3] = 0; xfer_len = 16; } else { ep = &enc->enc_cache.elm_map[idx]; switch (ep->enctype) { case ELMTYP_DEVICE: case ELMTYP_ARRAY_DEV: switch (cfg->current_request_stage) { case 0: ep->priv = 0; if (req->elm_stat[0] & SESCTL_PRDFAIL) ep->priv |= 0x40; if (req->elm_stat[3] & SESCTL_RQSFLT) ep->priv |= 0x02; if (ep->enctype == ELMTYP_ARRAY_DEV) { if (req->elm_stat[1] & 0x01) ep->priv |= 0x200; if (req->elm_stat[1] & 0x02) ep->priv |= 0x04; if (req->elm_stat[1] & 0x04) ep->priv |= 0x08; if (req->elm_stat[1] & 0x08) ep->priv |= 0x10; if (req->elm_stat[1] & 0x10) ep->priv |= 0x20; if (req->elm_stat[1] & 0x20) ep->priv |= 0x100; if (req->elm_stat[1] & 0x80) ep->priv |= 0x01; } if (ep->priv == 0) ep->priv |= 0x01; /* no errors */ buf[0] = SAFTE_WT_DSTAT; for (i = 0; i < cfg->Nslots; i++) { ep1 = &enc->enc_cache.elm_map[cfg->slotoff + i]; buf[1 + (3 * i)] = ep1->priv; buf[2 + (3 * i)] = ep1->priv >> 8; } xfer_len = cfg->Nslots * 3 + 1; #define DEVON(x) (!(((x)[2] & SESCTL_RQSINS) | \ ((x)[2] & SESCTL_RQSRMV) | \ ((x)[3] & SESCTL_DEVOFF))) if (DEVON(req->elm_stat) != DEVON(ep->encstat)) cfg->current_request_stages++; #define IDON(x) (!!((x)[2] & SESCTL_RQSID)) if (IDON(req->elm_stat) != IDON(ep->encstat)) cfg->current_request_stages++; break; case 1: case 2: buf[0] = SAFTE_WT_SLTOP; buf[1] = idx - cfg->slotoff; if (cfg->current_request_stage == 1 && DEVON(req->elm_stat) != DEVON(ep->encstat)) { if (DEVON(req->elm_stat)) buf[2] = 0x01; else buf[2] = 0x02; } else { if (IDON(req->elm_stat)) buf[2] = 0x04; else buf[2] = 0x00; ep->encstat[2] &= ~SESCTL_RQSID; ep->encstat[2] |= req->elm_stat[2] & SESCTL_RQSID; } xfer_len = 64; break; default: return (EINVAL); } break; case ELMTYP_POWER: cfg->current_request_stages = 2; switch (cfg->current_request_stage) { case 0: if (req->elm_stat[3] & SESCTL_RQSTFAIL) { cfg->flag1 |= SAFT_FLG1_ENCPWRFAIL; } else { cfg->flag1 &= ~SAFT_FLG1_ENCPWRFAIL; } buf[0] = SAFTE_WT_GLOBAL; buf[1] = cfg->flag1; buf[2] = cfg->flag2; buf[3] = 0; xfer_len = 16; break; case 1: buf[0] = SAFTE_WT_ACTPWS; buf[1] = idx - cfg->pwroff; if (req->elm_stat[3] & SESCTL_RQSTON) buf[2] = 0x01; else buf[2] = 0x00; buf[3] = 0; xfer_len = 16; default: return (EINVAL); } break; case ELMTYP_FAN: if ((req->elm_stat[3] & 0x7) != 0) cfg->current_request_stages = 2; switch (cfg->current_request_stage) { case 0: if (req->elm_stat[3] & SESCTL_RQSTFAIL) cfg->flag1 |= SAFT_FLG1_ENCFANFAIL; else cfg->flag1 &= ~SAFT_FLG1_ENCFANFAIL; buf[0] = SAFTE_WT_GLOBAL; buf[1] = cfg->flag1; buf[2] = cfg->flag2; buf[3] = 0; xfer_len = 16; break; case 1: buf[0] = SAFTE_WT_FANSPD; buf[1] = idx; if (req->elm_stat[3] & SESCTL_RQSTON) { if ((req->elm_stat[3] & 0x7) == 7) buf[2] = 4; else if ((req->elm_stat[3] & 0x7) >= 5) buf[2] = 3; else if ((req->elm_stat[3] & 0x7) >= 3) buf[2] = 2; else buf[2] = 1; } else buf[2] = 0; buf[3] = 0; xfer_len = 16; ep->encstat[3] = req->elm_stat[3] & 0x67; default: return (EINVAL); } break; case ELMTYP_DOORLOCK: if (req->elm_stat[3] & 0x1) cfg->flag2 &= ~SAFT_FLG2_LOCKDOOR; else cfg->flag2 |= SAFT_FLG2_LOCKDOOR; buf[0] = SAFTE_WT_GLOBAL; buf[1] = cfg->flag1; buf[2] = cfg->flag2; buf[3] = 0; xfer_len = 16; break; case ELMTYP_ALARM: if ((req->elm_stat[0] & SESCTL_DISABLE) || (req->elm_stat[3] & 0x40)) { cfg->flag2 &= ~SAFT_FLG1_ALARM; } else if ((req->elm_stat[3] & 0x0f) != 0) { cfg->flag2 |= SAFT_FLG1_ALARM; } else { cfg->flag2 &= ~SAFT_FLG1_ALARM; } buf[0] = SAFTE_WT_GLOBAL; buf[1] = cfg->flag1; buf[2] = cfg->flag2; buf[3] = 0; xfer_len = 16; ep->encstat[3] = req->elm_stat[3]; break; default: return (EINVAL); } } if (enc->enc_type == ENC_SEMB_SAFT) { semb_write_buffer(&ccb->ataio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, buf, xfer_len, state->timeout); } else { scsi_write_buffer(&ccb->csio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, 1, 0, 0, buf, xfer_len, SSD_FULL_SIZE, state->timeout); } return (0); } static int safte_process_control_request(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct scfg *cfg; safte_control_request_t *req; int idx, type; cfg = enc->enc_private; if (cfg == NULL) return (ENXIO); req = cfg->current_request; if (req->result == 0) req->result = error; if (++cfg->current_request_stage >= cfg->current_request_stages) { idx = req->elm_idx; if (idx == SES_SETSTATUS_ENC_IDX) type = -1; else type = enc->enc_cache.elm_map[idx].enctype; if (type == ELMTYP_DEVICE || type == ELMTYP_ARRAY_DEV) enc_update_request(enc, SAFTE_UPDATE_READSLOTSTATUS); else enc_update_request(enc, SAFTE_UPDATE_READENCSTATUS); cfg->current_request = NULL; wakeup(req); } else { enc_update_request(enc, SAFTE_PROCESS_CONTROL_REQS); } return (0); } static void safte_softc_invalidate(enc_softc_t *enc) { struct scfg *cfg; cfg = enc->enc_private; safte_terminate_control_requests(&cfg->requests, ENXIO); } static void safte_softc_cleanup(enc_softc_t *enc) { ENC_FREE_AND_NULL(enc->enc_cache.elm_map); ENC_FREE_AND_NULL(enc->enc_private); enc->enc_cache.nelms = 0; } static int safte_init_enc(enc_softc_t *enc) { struct scfg *cfg; int err; static char cdb0[6] = { SEND_DIAGNOSTIC }; cfg = enc->enc_private; if (cfg == NULL) return (ENXIO); err = enc_runcmd(enc, cdb0, 6, NULL, 0); if (err) { return (err); } DELAY(5000); cfg->flag1 = 0; cfg->flag2 = 0; err = safte_set_enc_status(enc, 0, 1); return (err); } static int safte_get_enc_status(enc_softc_t *enc, int slpflg) { return (0); } static int safte_set_enc_status(enc_softc_t *enc, uint8_t encstat, int slpflag) { struct scfg *cfg; safte_control_request_t req; cfg = enc->enc_private; if (cfg == NULL) return (ENXIO); req.elm_idx = SES_SETSTATUS_ENC_IDX; req.elm_stat[0] = encstat & 0xf; req.result = 0; TAILQ_INSERT_TAIL(&cfg->requests, &req, links); enc_update_request(enc, SAFTE_PROCESS_CONTROL_REQS); cam_periph_sleep(enc->periph, &req, PUSER, "encstat", 0); return (req.result); } static int safte_get_elm_status(enc_softc_t *enc, encioc_elm_status_t *elms, int slpflg) { int i = (int)elms->elm_idx; elms->cstat[0] = enc->enc_cache.elm_map[i].encstat[0]; elms->cstat[1] = enc->enc_cache.elm_map[i].encstat[1]; elms->cstat[2] = enc->enc_cache.elm_map[i].encstat[2]; elms->cstat[3] = enc->enc_cache.elm_map[i].encstat[3]; return (0); } static int safte_set_elm_status(enc_softc_t *enc, encioc_elm_status_t *elms, int slpflag) { struct scfg *cfg; safte_control_request_t req; cfg = enc->enc_private; if (cfg == NULL) return (ENXIO); /* If this is clear, we don't do diddly. */ if ((elms->cstat[0] & SESCTL_CSEL) == 0) return (0); req.elm_idx = elms->elm_idx; memcpy(&req.elm_stat, elms->cstat, sizeof(req.elm_stat)); req.result = 0; TAILQ_INSERT_TAIL(&cfg->requests, &req, links); enc_update_request(enc, SAFTE_PROCESS_CONTROL_REQS); cam_periph_sleep(enc->periph, &req, PUSER, "encstat", 0); return (req.result); } static void safte_poll_status(enc_softc_t *enc) { enc_update_request(enc, SAFTE_UPDATE_READENCSTATUS); enc_update_request(enc, SAFTE_UPDATE_READSLOTSTATUS); } static struct enc_vec safte_enc_vec = { .softc_invalidate = safte_softc_invalidate, .softc_cleanup = safte_softc_cleanup, .init_enc = safte_init_enc, .get_enc_status = safte_get_enc_status, .set_enc_status = safte_set_enc_status, .get_elm_status = safte_get_elm_status, .set_elm_status = safte_set_elm_status, .poll_status = safte_poll_status }; int safte_softc_init(enc_softc_t *enc) { struct scfg *cfg; enc->enc_vec = safte_enc_vec; enc->enc_fsm_states = enc_fsm_states; if (enc->enc_private == NULL) { enc->enc_private = ENC_MALLOCZ(SAFT_PRIVATE); if (enc->enc_private == NULL) return (ENOMEM); } cfg = enc->enc_private; enc->enc_cache.nelms = 0; enc->enc_cache.enc_status = 0; TAILQ_INIT(&cfg->requests); return (0); } Index: head/sys/cam/scsi/scsi_enc_ses.c =================================================================== --- head/sys/cam/scsi/scsi_enc_ses.c (revision 326264) +++ head/sys/cam/scsi/scsi_enc_ses.c (revision 326265) @@ -1,2884 +1,2886 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2000 Matthew Jacob * Copyright (c) 2010 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * \file scsi_enc_ses.c * * Structures and routines specific && private to SES only */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* SES Native Type Device Support */ /* SES Diagnostic Page Codes */ typedef enum { SesSupportedPages = 0x0, SesConfigPage = 0x1, SesControlPage = 0x2, SesStatusPage = SesControlPage, SesHelpTxt = 0x3, SesStringOut = 0x4, SesStringIn = SesStringOut, SesThresholdOut = 0x5, SesThresholdIn = SesThresholdOut, SesArrayControl = 0x6, /* Obsolete in SES v2 */ SesArrayStatus = SesArrayControl, SesElementDescriptor = 0x7, SesShortStatus = 0x8, SesEnclosureBusy = 0x9, SesAddlElementStatus = 0xa } SesDiagPageCodes; typedef struct ses_type { const struct ses_elm_type_desc *hdr; const char *text; } ses_type_t; typedef struct ses_comstat { uint8_t comstatus; uint8_t comstat[3]; } ses_comstat_t; typedef union ses_addl_data { struct ses_elm_sas_device_phy *sasdev_phys; struct ses_elm_sas_expander_phy *sasexp_phys; struct ses_elm_sas_port_phy *sasport_phys; struct ses_fcobj_port *fc_ports; } ses_add_data_t; typedef struct ses_addl_status { struct ses_elm_addlstatus_base_hdr *hdr; union { union ses_fcobj_hdr *fc; union ses_elm_sas_hdr *sas; } proto_hdr; union ses_addl_data proto_data; /* array sizes stored in header */ } ses_add_status_t; typedef struct ses_element { uint8_t eip; /* eip bit is set */ uint16_t descr_len; /* length of the descriptor */ char *descr; /* descriptor for this object */ struct ses_addl_status addl; /* additional status info */ } ses_element_t; typedef struct ses_control_request { int elm_idx; ses_comstat_t elm_stat; int result; TAILQ_ENTRY(ses_control_request) links; } ses_control_request_t; TAILQ_HEAD(ses_control_reqlist, ses_control_request); typedef struct ses_control_reqlist ses_control_reqlist_t; enum { SES_SETSTATUS_ENC_IDX = -1 }; static void ses_terminate_control_requests(ses_control_reqlist_t *reqlist, int result) { ses_control_request_t *req; while ((req = TAILQ_FIRST(reqlist)) != NULL) { TAILQ_REMOVE(reqlist, req, links); req->result = result; wakeup(req); } } enum ses_iter_index_values { /** * \brief Value of an initialized but invalid index * in a ses_iterator object. * * This value is used for the individual_element_index of * overal status elements and for all index types when * an iterator is first initialized. */ ITERATOR_INDEX_INVALID = -1, /** * \brief Value of an index in a ses_iterator object * when the iterator has traversed past the last * valid element.. */ ITERATOR_INDEX_END = INT_MAX }; /** * \brief Structure encapsulating all data necessary to traverse the * elements of a SES configuration. * * The ses_iterator object simplifies the task of iterating through all * elements detected via the SES configuration page by tracking the numerous * element indexes that, instead of memoizing in the softc, we calculate * on the fly during the traversal of the element objects. The various * indexes are necessary due to the varying needs of matching objects in * the different SES pages. Some pages (e.g. Status/Control) contain all * elements, while others (e.g. Additional Element Status) only contain * individual elements (no overal status elements) of particular types. * * To use an iterator, initialize it with ses_iter_init(), and then * use ses_iter_next() to traverse the elements (including the first) in * the configuration. Once an iterator is initiailized with ses_iter_init(), * you may also seek to any particular element by either it's global or * individual element index via the ses_iter_seek_to() function. You may * also return an iterator to the position just before the first element * (i.e. the same state as after an ses_iter_init()), with ses_iter_reset(). */ struct ses_iterator { /** * \brief Backlink to the overal software configuration structure. * * This is included for convenience so the iteration functions * need only take a single, struct ses_iterator *, argument. */ enc_softc_t *enc; enc_cache_t *cache; /** * \brief Index of the type of the current element within the * ses_cache's ses_types array. */ int type_index; /** * \brief The position (0 based) of this element relative to all other * elements of this type. * * This index resets to zero every time the iterator transitions * to elements of a new type in the configuration. */ int type_element_index; /** * \brief The position (0 based) of this element relative to all * other individual status elements in the configuration. * * This index ranges from 0 through the number of individual * elements in the configuration. When the iterator returns * an overall status element, individual_element_index is * set to ITERATOR_INDEX_INVALID, to indicate that it does * not apply to the current element. */ int individual_element_index; /** * \brief The position (0 based) of this element relative to * all elements in the configration. * * This index is appropriate for indexing into enc->ses_elm_map. */ int global_element_index; /** * \brief The last valid individual element index of this * iterator. * * When an iterator traverses an overal status element, the * individual element index is reset to ITERATOR_INDEX_INVALID * to prevent unintential use of the individual_element_index * field. The saved_individual_element_index allows the iterator * to restore it's position in the individual elements upon * reaching the next individual element. */ int saved_individual_element_index; }; typedef enum { SES_UPDATE_NONE, SES_UPDATE_PAGES, SES_UPDATE_GETCONFIG, SES_UPDATE_GETSTATUS, SES_UPDATE_GETELMDESCS, SES_UPDATE_GETELMADDLSTATUS, SES_PROCESS_CONTROL_REQS, SES_PUBLISH_PHYSPATHS, SES_PUBLISH_CACHE, SES_NUM_UPDATE_STATES } ses_update_action; static enc_softc_cleanup_t ses_softc_cleanup; #define SCSZ 0x8000 static fsm_fill_handler_t ses_fill_rcv_diag_io; static fsm_fill_handler_t ses_fill_control_request; static fsm_done_handler_t ses_process_pages; static fsm_done_handler_t ses_process_config; static fsm_done_handler_t ses_process_status; static fsm_done_handler_t ses_process_elm_descs; static fsm_done_handler_t ses_process_elm_addlstatus; static fsm_done_handler_t ses_process_control_request; static fsm_done_handler_t ses_publish_physpaths; static fsm_done_handler_t ses_publish_cache; static struct enc_fsm_state enc_fsm_states[SES_NUM_UPDATE_STATES] = { { "SES_UPDATE_NONE", 0, 0, 0, NULL, NULL, NULL }, { "SES_UPDATE_PAGES", SesSupportedPages, SCSZ, 60 * 1000, ses_fill_rcv_diag_io, ses_process_pages, enc_error }, { "SES_UPDATE_GETCONFIG", SesConfigPage, SCSZ, 60 * 1000, ses_fill_rcv_diag_io, ses_process_config, enc_error }, { "SES_UPDATE_GETSTATUS", SesStatusPage, SCSZ, 60 * 1000, ses_fill_rcv_diag_io, ses_process_status, enc_error }, { "SES_UPDATE_GETELMDESCS", SesElementDescriptor, SCSZ, 60 * 1000, ses_fill_rcv_diag_io, ses_process_elm_descs, enc_error }, { "SES_UPDATE_GETELMADDLSTATUS", SesAddlElementStatus, SCSZ, 60 * 1000, ses_fill_rcv_diag_io, ses_process_elm_addlstatus, enc_error }, { "SES_PROCESS_CONTROL_REQS", SesControlPage, SCSZ, 60 * 1000, ses_fill_control_request, ses_process_control_request, enc_error }, { "SES_PUBLISH_PHYSPATHS", 0, 0, 0, NULL, ses_publish_physpaths, NULL }, { "SES_PUBLISH_CACHE", 0, 0, 0, NULL, ses_publish_cache, NULL } }; typedef struct ses_cache { /* Source for all the configuration data pointers */ const struct ses_cfg_page *cfg_page; /* References into the config page. */ int ses_nsubencs; const struct ses_enc_desc * const *subencs; int ses_ntypes; const ses_type_t *ses_types; /* Source for all the status pointers */ const struct ses_status_page *status_page; /* Source for all the object descriptor pointers */ const struct ses_elem_descr_page *elm_descs_page; /* Source for all the additional object status pointers */ const struct ses_addl_elem_status_page *elm_addlstatus_page; } ses_cache_t; typedef struct ses_softc { uint32_t ses_flags; #define SES_FLAG_TIMEDCOMP 0x01 #define SES_FLAG_ADDLSTATUS 0x02 #define SES_FLAG_DESC 0x04 ses_control_reqlist_t ses_requests; ses_control_reqlist_t ses_pending_requests; } ses_softc_t; /** * \brief Reset a SES iterator to just before the first element * in the configuration. * * \param iter The iterator object to reset. * * The indexes within a reset iterator are invalid and will only * become valid upon completion of a ses_iter_seek_to() or a * ses_iter_next(). */ static void ses_iter_reset(struct ses_iterator *iter) { /* * Set our indexes to just before the first valid element * of the first type (ITERATOR_INDEX_INVALID == -1). This * simplifies the implementation of ses_iter_next(). */ iter->type_index = 0; iter->type_element_index = ITERATOR_INDEX_INVALID; iter->global_element_index = ITERATOR_INDEX_INVALID; iter->individual_element_index = ITERATOR_INDEX_INVALID; iter->saved_individual_element_index = ITERATOR_INDEX_INVALID; } /** * \brief Initialize the storage of a SES iterator and reset it to * the position just before the first element of the * configuration. * * \param enc The SES softc for the SES instance whose configuration * will be enumerated by this iterator. * \param iter The iterator object to initialize. */ static void ses_iter_init(enc_softc_t *enc, enc_cache_t *cache, struct ses_iterator *iter) { iter->enc = enc; iter->cache = cache; ses_iter_reset(iter); } /** * \brief Traverse the provided SES iterator to the next element * within the configuraiton. * * \param iter The iterator to move. * * \return If a valid next element exists, a pointer to it's enc_element_t. * Otherwise NULL. */ static enc_element_t * ses_iter_next(struct ses_iterator *iter) { ses_cache_t *ses_cache; const ses_type_t *element_type; ses_cache = iter->cache->private; /* * Note: Treat nelms as signed, so we will hit this case * and immediately terminate the iteration if the * configuration has 0 objects. */ if (iter->global_element_index >= (int)iter->cache->nelms - 1) { /* Elements exhausted. */ iter->type_index = ITERATOR_INDEX_END; iter->type_element_index = ITERATOR_INDEX_END; iter->global_element_index = ITERATOR_INDEX_END; iter->individual_element_index = ITERATOR_INDEX_END; return (NULL); } KASSERT((iter->type_index < ses_cache->ses_ntypes), ("Corrupted element iterator. %d not less than %d", iter->type_index, ses_cache->ses_ntypes)); element_type = &ses_cache->ses_types[iter->type_index]; iter->global_element_index++; iter->type_element_index++; /* * There is an object for overal type status in addition * to one for each allowed element, but only if the element * count is non-zero. */ if (iter->type_element_index > element_type->hdr->etype_maxelt) { /* * We've exhausted the elements of this type. * This next element belongs to the next type. */ iter->type_index++; iter->type_element_index = 0; iter->saved_individual_element_index = iter->individual_element_index; iter->individual_element_index = ITERATOR_INDEX_INVALID; } if (iter->type_element_index > 0) { if (iter->type_element_index == 1) { iter->individual_element_index = iter->saved_individual_element_index; } iter->individual_element_index++; } return (&iter->cache->elm_map[iter->global_element_index]); } /** * Element index types tracked by a SES iterator. */ typedef enum { /** * Index relative to all elements (overall and individual) * in the system. */ SES_ELEM_INDEX_GLOBAL, /** * \brief Index relative to all individual elements in the system. * * This index counts only individual elements, skipping overall * status elements. This is the index space of the additional * element status page (page 0xa). */ SES_ELEM_INDEX_INDIVIDUAL } ses_elem_index_type_t; /** * \brief Move the provided iterator forwards or backwards to the object * having the give index. * * \param iter The iterator on which to perform the seek. * \param element_index The index of the element to find. * \param index_type The type (global or individual) of element_index. * * \return If the element is found, a pointer to it's enc_element_t. * Otherwise NULL. */ static enc_element_t * ses_iter_seek_to(struct ses_iterator *iter, int element_index, ses_elem_index_type_t index_type) { enc_element_t *element; int *cur_index; if (index_type == SES_ELEM_INDEX_GLOBAL) cur_index = &iter->global_element_index; else cur_index = &iter->individual_element_index; if (*cur_index == element_index) { /* Already there. */ return (&iter->cache->elm_map[iter->global_element_index]); } ses_iter_reset(iter); while ((element = ses_iter_next(iter)) != NULL && *cur_index != element_index) ; if (*cur_index != element_index) return (NULL); return (element); } #if 0 static int ses_encode(enc_softc_t *, uint8_t *, int, int, struct ses_comstat *); #endif static int ses_set_timed_completion(enc_softc_t *, uint8_t); #if 0 static int ses_putstatus(enc_softc_t *, int, struct ses_comstat *); #endif static void ses_poll_status(enc_softc_t *); static void ses_print_addl_data(enc_softc_t *, enc_element_t *); /*=========================== SES cleanup routines ===========================*/ static void ses_cache_free_elm_addlstatus(enc_softc_t *enc, enc_cache_t *cache) { ses_cache_t *ses_cache; ses_cache_t *other_ses_cache; enc_element_t *cur_elm; enc_element_t *last_elm; ENC_DLOG(enc, "%s: enter\n", __func__); ses_cache = cache->private; if (ses_cache->elm_addlstatus_page == NULL) return; for (cur_elm = cache->elm_map, last_elm = &cache->elm_map[cache->nelms]; cur_elm != last_elm; cur_elm++) { ses_element_t *elmpriv; elmpriv = cur_elm->elm_private; /* Clear references to the additional status page. */ bzero(&elmpriv->addl, sizeof(elmpriv->addl)); } other_ses_cache = enc_other_cache(enc, cache)->private; if (other_ses_cache->elm_addlstatus_page != ses_cache->elm_addlstatus_page) ENC_FREE(ses_cache->elm_addlstatus_page); ses_cache->elm_addlstatus_page = NULL; } static void ses_cache_free_elm_descs(enc_softc_t *enc, enc_cache_t *cache) { ses_cache_t *ses_cache; ses_cache_t *other_ses_cache; enc_element_t *cur_elm; enc_element_t *last_elm; ENC_DLOG(enc, "%s: enter\n", __func__); ses_cache = cache->private; if (ses_cache->elm_descs_page == NULL) return; for (cur_elm = cache->elm_map, last_elm = &cache->elm_map[cache->nelms]; cur_elm != last_elm; cur_elm++) { ses_element_t *elmpriv; elmpriv = cur_elm->elm_private; elmpriv->descr_len = 0; elmpriv->descr = NULL; } other_ses_cache = enc_other_cache(enc, cache)->private; if (other_ses_cache->elm_descs_page != ses_cache->elm_descs_page) ENC_FREE(ses_cache->elm_descs_page); ses_cache->elm_descs_page = NULL; } static void ses_cache_free_status(enc_softc_t *enc, enc_cache_t *cache) { ses_cache_t *ses_cache; ses_cache_t *other_ses_cache; ENC_DLOG(enc, "%s: enter\n", __func__); ses_cache = cache->private; if (ses_cache->status_page == NULL) return; other_ses_cache = enc_other_cache(enc, cache)->private; if (other_ses_cache->status_page != ses_cache->status_page) ENC_FREE(ses_cache->status_page); ses_cache->status_page = NULL; } static void ses_cache_free_elm_map(enc_softc_t *enc, enc_cache_t *cache) { enc_element_t *cur_elm; enc_element_t *last_elm; ENC_DLOG(enc, "%s: enter\n", __func__); if (cache->elm_map == NULL) return; ses_cache_free_elm_descs(enc, cache); ses_cache_free_elm_addlstatus(enc, cache); for (cur_elm = cache->elm_map, last_elm = &cache->elm_map[cache->nelms]; cur_elm != last_elm; cur_elm++) { ENC_FREE_AND_NULL(cur_elm->elm_private); } ENC_FREE_AND_NULL(cache->elm_map); cache->nelms = 0; ENC_DLOG(enc, "%s: exit\n", __func__); } static void ses_cache_free(enc_softc_t *enc, enc_cache_t *cache) { ses_cache_t *other_ses_cache; ses_cache_t *ses_cache; ENC_DLOG(enc, "%s: enter\n", __func__); ses_cache_free_elm_addlstatus(enc, cache); ses_cache_free_status(enc, cache); ses_cache_free_elm_map(enc, cache); ses_cache = cache->private; ses_cache->ses_ntypes = 0; other_ses_cache = enc_other_cache(enc, cache)->private; if (other_ses_cache->subencs != ses_cache->subencs) ENC_FREE(ses_cache->subencs); ses_cache->subencs = NULL; if (other_ses_cache->ses_types != ses_cache->ses_types) ENC_FREE(ses_cache->ses_types); ses_cache->ses_types = NULL; if (other_ses_cache->cfg_page != ses_cache->cfg_page) ENC_FREE(ses_cache->cfg_page); ses_cache->cfg_page = NULL; ENC_DLOG(enc, "%s: exit\n", __func__); } static void ses_cache_clone(enc_softc_t *enc, enc_cache_t *src, enc_cache_t *dst) { ses_cache_t *dst_ses_cache; ses_cache_t *src_ses_cache; enc_element_t *src_elm; enc_element_t *dst_elm; enc_element_t *last_elm; ses_cache_free(enc, dst); src_ses_cache = src->private; dst_ses_cache = dst->private; /* * The cloned enclosure cache and ses specific cache are * mostly identical to the source. */ *dst = *src; *dst_ses_cache = *src_ses_cache; /* * But the ses cache storage is still independent. Restore * the pointer that was clobbered by the structure copy above. */ dst->private = dst_ses_cache; /* * The element map is independent even though it starts out * pointing to the same constant page data. */ dst->elm_map = malloc(dst->nelms * sizeof(enc_element_t), M_SCSIENC, M_WAITOK); memcpy(dst->elm_map, src->elm_map, dst->nelms * sizeof(enc_element_t)); for (dst_elm = dst->elm_map, src_elm = src->elm_map, last_elm = &src->elm_map[src->nelms]; src_elm != last_elm; src_elm++, dst_elm++) { dst_elm->elm_private = malloc(sizeof(ses_element_t), M_SCSIENC, M_WAITOK); memcpy(dst_elm->elm_private, src_elm->elm_private, sizeof(ses_element_t)); } } /* Structure accessors. These are strongly typed to avoid errors. */ int ses_elm_sas_descr_type(union ses_elm_sas_hdr *obj) { return ((obj)->base_hdr.byte1 >> 6); } int ses_elm_addlstatus_proto(struct ses_elm_addlstatus_base_hdr *hdr) { return ((hdr)->byte0 & 0xf); } int ses_elm_addlstatus_eip(struct ses_elm_addlstatus_base_hdr *hdr) { return ((hdr)->byte0 >> 4) & 0x1; } int ses_elm_addlstatus_invalid(struct ses_elm_addlstatus_base_hdr *hdr) { return ((hdr)->byte0 >> 7); } int ses_elm_sas_type0_not_all_phys(union ses_elm_sas_hdr *hdr) { return ((hdr)->type0_noneip.byte1 & 0x1); } int ses_elm_sas_dev_phy_sata_dev(struct ses_elm_sas_device_phy *phy) { return ((phy)->target_ports & 0x1); } int ses_elm_sas_dev_phy_sata_port(struct ses_elm_sas_device_phy *phy) { return ((phy)->target_ports >> 7); } int ses_elm_sas_dev_phy_dev_type(struct ses_elm_sas_device_phy *phy) { return (((phy)->byte0 >> 4) & 0x7); } /** * \brief Verify that the cached configuration data in our softc * is valid for processing the page data corresponding to * the provided page header. * * \param ses_cache The SES cache to validate. * \param gen_code The 4 byte generation code from a SES diagnostic * page header. * * \return non-zero if true, 0 if false. */ static int ses_config_cache_valid(ses_cache_t *ses_cache, const uint8_t *gen_code) { uint32_t cache_gc; uint32_t cur_gc; if (ses_cache->cfg_page == NULL) return (0); cache_gc = scsi_4btoul(ses_cache->cfg_page->hdr.gen_code); cur_gc = scsi_4btoul(gen_code); return (cache_gc == cur_gc); } /** * Function signature for consumers of the ses_devids_iter() interface. */ typedef void ses_devid_callback_t(enc_softc_t *, enc_element_t *, struct scsi_vpd_id_descriptor *, void *); /** * \brief Iterate over and create vpd device id records from the * additional element status data for elm, passing that data * to the provided callback. * * \param enc SES instance containing elm * \param elm Element for which to extract device ID data. * \param callback The callback function to invoke on each generated * device id descriptor for elm. * \param callback_arg Argument passed through to callback on each invocation. */ static void ses_devids_iter(enc_softc_t *enc, enc_element_t *elm, ses_devid_callback_t *callback, void *callback_arg) { ses_element_t *elmpriv; struct ses_addl_status *addl; u_int i; size_t devid_record_size; elmpriv = elm->elm_private; addl = &(elmpriv->addl); /* * Don't assume this object has additional status information, or * that it is a SAS device, or that it is a device slot device. */ if (addl->hdr == NULL || addl->proto_hdr.sas == NULL || addl->proto_data.sasdev_phys == NULL) return; devid_record_size = SVPD_DEVICE_ID_DESC_HDR_LEN + sizeof(struct scsi_vpd_id_naa_ieee_reg); for (i = 0; i < addl->proto_hdr.sas->base_hdr.num_phys; i++) { uint8_t devid_buf[devid_record_size]; struct scsi_vpd_id_descriptor *devid; uint8_t *phy_addr; devid = (struct scsi_vpd_id_descriptor *)devid_buf; phy_addr = addl->proto_data.sasdev_phys[i].phy_addr; devid->proto_codeset = (SCSI_PROTO_SAS << SVPD_ID_PROTO_SHIFT) | SVPD_ID_CODESET_BINARY; devid->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA; devid->reserved = 0; devid->length = sizeof(struct scsi_vpd_id_naa_ieee_reg); memcpy(devid->identifier, phy_addr, devid->length); callback(enc, elm, devid, callback_arg); } } /** * Function signature for consumers of the ses_paths_iter() interface. */ typedef void ses_path_callback_t(enc_softc_t *, enc_element_t *, struct cam_path *, void *); /** * Argument package passed through ses_devids_iter() by * ses_paths_iter() to ses_path_iter_devid_callback(). */ typedef struct ses_path_iter_args { ses_path_callback_t *callback; void *callback_arg; } ses_path_iter_args_t; /** * ses_devids_iter() callback function used by ses_paths_iter() * to map device ids to peripheral driver instances. * * \param enc SES instance containing elm * \param elm Element on which device ID matching is active. * \param periph A device ID corresponding to elm. * \param arg Argument passed through to callback on each invocation. */ static void ses_path_iter_devid_callback(enc_softc_t *enc, enc_element_t *elem, struct scsi_vpd_id_descriptor *devid, void *arg) { struct ccb_dev_match cdm; struct dev_match_pattern match_pattern; struct dev_match_result match_result; struct device_match_result *device_match; struct device_match_pattern *device_pattern; ses_path_iter_args_t *args; args = (ses_path_iter_args_t *)arg; match_pattern.type = DEV_MATCH_DEVICE; device_pattern = &match_pattern.pattern.device_pattern; device_pattern->flags = DEV_MATCH_DEVID; device_pattern->data.devid_pat.id_len = offsetof(struct scsi_vpd_id_descriptor, identifier) + devid->length; memcpy(device_pattern->data.devid_pat.id, devid, device_pattern->data.devid_pat.id_len); memset(&cdm, 0, sizeof(cdm)); if (xpt_create_path(&cdm.ccb_h.path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) return; cdm.ccb_h.func_code = XPT_DEV_MATCH; cdm.num_patterns = 1; cdm.patterns = &match_pattern; cdm.pattern_buf_len = sizeof(match_pattern); cdm.match_buf_len = sizeof(match_result); cdm.matches = &match_result; xpt_action((union ccb *)&cdm); xpt_free_path(cdm.ccb_h.path); if ((cdm.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP || (cdm.status != CAM_DEV_MATCH_LAST && cdm.status != CAM_DEV_MATCH_MORE) || cdm.num_matches == 0) return; device_match = &match_result.result.device_result; if (xpt_create_path(&cdm.ccb_h.path, /*periph*/NULL, device_match->path_id, device_match->target_id, device_match->target_lun) != CAM_REQ_CMP) return; args->callback(enc, elem, cdm.ccb_h.path, args->callback_arg); xpt_free_path(cdm.ccb_h.path); } /** * \brief Iterate over and find the matching periph objects for the * specified element. * * \param enc SES instance containing elm * \param elm Element for which to perform periph object matching. * \param callback The callback function to invoke with each matching * periph object. * \param callback_arg Argument passed through to callback on each invocation. */ static void ses_paths_iter(enc_softc_t *enc, enc_element_t *elm, ses_path_callback_t *callback, void *callback_arg) { ses_path_iter_args_t args; args.callback = callback; args.callback_arg = callback_arg; ses_devids_iter(enc, elm, ses_path_iter_devid_callback, &args); } /** * ses_paths_iter() callback function used by ses_get_elmdevname() * to record periph driver instance strings corresponding to a SES * element. * * \param enc SES instance containing elm * \param elm Element on which periph matching is active. * \param periph A periph instance that matches elm. * \param arg Argument passed through to callback on each invocation. */ static void ses_elmdevname_callback(enc_softc_t *enc, enc_element_t *elem, struct cam_path *path, void *arg) { struct sbuf *sb; sb = (struct sbuf *)arg; cam_periph_list(path, sb); } /** * Argument package passed through ses_paths_iter() to * ses_getcampath_callback. */ typedef struct ses_setphyspath_callback_args { struct sbuf *physpath; int num_set; } ses_setphyspath_callback_args_t; /** * \brief ses_paths_iter() callback to set the physical path on the * CAM EDT entries corresponding to a given SES element. * * \param enc SES instance containing elm * \param elm Element on which periph matching is active. * \param periph A periph instance that matches elm. * \param arg Argument passed through to callback on each invocation. */ static void ses_setphyspath_callback(enc_softc_t *enc, enc_element_t *elm, struct cam_path *path, void *arg) { struct ccb_dev_advinfo cdai; ses_setphyspath_callback_args_t *args; char *old_physpath; args = (ses_setphyspath_callback_args_t *)arg; old_physpath = malloc(MAXPATHLEN, M_SCSIENC, M_WAITOK|M_ZERO); cam_periph_lock(enc->periph); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_PHYS_PATH; cdai.flags = CDAI_FLAG_NONE; cdai.bufsiz = MAXPATHLEN; cdai.buf = old_physpath; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (strcmp(old_physpath, sbuf_data(args->physpath)) != 0) { xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_PHYS_PATH; cdai.flags = CDAI_FLAG_STORE; cdai.bufsiz = sbuf_len(args->physpath); cdai.buf = sbuf_data(args->physpath); xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.ccb_h.status == CAM_REQ_CMP) args->num_set++; } cam_periph_unlock(enc->periph); free(old_physpath, M_SCSIENC); } /** * \brief Set a device's physical path string in CAM XPT. * * \param enc SES instance containing elm * \param elm Element to publish physical path string for * \param iter Iterator whose state corresponds to elm * * \return 0 on success, errno otherwise. */ static int ses_set_physpath(enc_softc_t *enc, enc_element_t *elm, struct ses_iterator *iter) { struct ccb_dev_advinfo cdai; ses_setphyspath_callback_args_t args; int i, ret; struct sbuf sb; struct scsi_vpd_id_descriptor *idd; uint8_t *devid; ses_element_t *elmpriv; const char *c; ret = EIO; devid = NULL; /* * Assemble the components of the physical path starting with * the device ID of the enclosure itself. */ xpt_setup_ccb(&cdai.ccb_h, enc->periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.flags = CDAI_FLAG_NONE; cdai.buftype = CDAI_TYPE_SCSI_DEVID; cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; cdai.buf = devid = malloc(cdai.bufsiz, M_SCSIENC, M_WAITOK|M_ZERO); cam_periph_lock(enc->periph); xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); cam_periph_unlock(enc->periph); if (cdai.ccb_h.status != CAM_REQ_CMP) goto out; idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_naa_ieee_reg); if (idd == NULL) goto out; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) { ret = ENOMEM; goto out; } /* Next, generate the physical path string */ sbuf_printf(&sb, "id1,enc@n%jx/type@%x/slot@%x", scsi_8btou64(idd->identifier), iter->type_index, iter->type_element_index); /* Append the element descriptor if one exists */ elmpriv = elm->elm_private; if (elmpriv->descr != NULL && elmpriv->descr_len > 0) { sbuf_cat(&sb, "/elmdesc@"); for (i = 0, c = elmpriv->descr; i < elmpriv->descr_len; i++, c++) { if (!isprint(*c) || isspace(*c) || *c == '/') sbuf_putc(&sb, '_'); else sbuf_putc(&sb, *c); } } sbuf_finish(&sb); /* * Set this physical path on any CAM devices with a device ID * descriptor that matches one created from the SES additional * status data for this element. */ args.physpath= &sb; args.num_set = 0; ses_paths_iter(enc, elm, ses_setphyspath_callback, &args); sbuf_delete(&sb); ret = args.num_set == 0 ? ENOENT : 0; out: if (devid != NULL) ENC_FREE(devid); return (ret); } /** * \brief Helper to set the CDB fields appropriately. * * \param cdb Buffer containing the cdb. * \param pagenum SES diagnostic page to query for. * \param dir Direction of query. */ static void ses_page_cdb(char *cdb, int bufsiz, SesDiagPageCodes pagenum, int dir) { /* Ref: SPC-4 r25 Section 6.20 Table 223 */ if (dir == CAM_DIR_IN) { cdb[0] = RECEIVE_DIAGNOSTIC; cdb[1] = 1; /* Set page code valid bit */ cdb[2] = pagenum; } else { cdb[0] = SEND_DIAGNOSTIC; cdb[1] = 0x10; cdb[2] = pagenum; } cdb[3] = bufsiz >> 8; /* high bits */ cdb[4] = bufsiz & 0xff; /* low bits */ cdb[5] = 0; } /** * \brief Discover whether this instance supports timed completion of a * RECEIVE DIAGNOSTIC RESULTS command requesting the Enclosure Status * page, and store the result in the softc, updating if necessary. * * \param enc SES instance to query and update. * \param tc_en Value of timed completion to set (see \return). * * \return 1 if timed completion enabled, 0 otherwise. */ static int ses_set_timed_completion(enc_softc_t *enc, uint8_t tc_en) { union ccb *ccb; struct cam_periph *periph; struct ses_mgmt_mode_page *mgmt; uint8_t *mode_buf; size_t mode_buf_len; ses_softc_t *ses; periph = enc->periph; ses = enc->enc_private; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); mode_buf_len = sizeof(struct ses_mgmt_mode_page); mode_buf = ENC_MALLOCZ(mode_buf_len); if (mode_buf == NULL) goto out; scsi_mode_sense(&ccb->csio, /*retries*/4, NULL, MSG_SIMPLE_Q_TAG, /*dbd*/FALSE, SMS_PAGE_CTRL_CURRENT, SES_MGMT_MODE_PAGE_CODE, mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60 * 1000); /* * Ignore illegal request errors, as they are quite common and we * will print something out in that case anyway. */ cam_periph_runccb(ccb, enc_error, ENC_CFLAGS, ENC_FLAGS|SF_QUIET_IR, NULL); if (ccb->ccb_h.status != CAM_REQ_CMP) { ENC_VLOG(enc, "Timed Completion Unsupported\n"); goto release; } /* Skip the mode select if the desired value is already set */ mgmt = (struct ses_mgmt_mode_page *)mode_buf; if ((mgmt->byte5 & SES_MGMT_TIMED_COMP_EN) == tc_en) goto done; /* Value is not what we wanted, set it */ if (tc_en) mgmt->byte5 |= SES_MGMT_TIMED_COMP_EN; else mgmt->byte5 &= ~SES_MGMT_TIMED_COMP_EN; /* SES2r20: a completion time of zero means as long as possible */ bzero(&mgmt->max_comp_time, sizeof(mgmt->max_comp_time)); scsi_mode_select(&ccb->csio, 5, NULL, MSG_SIMPLE_Q_TAG, /*page_fmt*/FALSE, /*save_pages*/TRUE, mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60 * 1000); cam_periph_runccb(ccb, enc_error, ENC_CFLAGS, ENC_FLAGS, NULL); if (ccb->ccb_h.status != CAM_REQ_CMP) { ENC_VLOG(enc, "Timed Completion Set Failed\n"); goto release; } done: if ((mgmt->byte5 & SES_MGMT_TIMED_COMP_EN) != 0) { ENC_LOG(enc, "Timed Completion Enabled\n"); ses->ses_flags |= SES_FLAG_TIMEDCOMP; } else { ENC_LOG(enc, "Timed Completion Disabled\n"); ses->ses_flags &= ~SES_FLAG_TIMEDCOMP; } release: ENC_FREE(mode_buf); xpt_release_ccb(ccb); out: return (ses->ses_flags & SES_FLAG_TIMEDCOMP); } /** * \brief Process the list of supported pages and update flags. * * \param enc SES device to query. * \param buf Buffer containing the config page. * \param xfer_len Length of the config page in the buffer. * * \return 0 on success, errno otherwise. */ static int ses_process_pages(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { ses_softc_t *ses; struct scsi_diag_page *page; int err, i, length; CAM_DEBUG(enc->periph->path, CAM_DEBUG_SUBTRACE, ("entering %s(%p, %d)\n", __func__, bufp, xfer_len)); ses = enc->enc_private; err = -1; if (error != 0) { err = error; goto out; } if (xfer_len < sizeof(*page)) { ENC_VLOG(enc, "Unable to parse Diag Pages List Header\n"); err = EIO; goto out; } page = (struct scsi_diag_page *)*bufp; length = scsi_2btoul(page->length); if (length + offsetof(struct scsi_diag_page, params) > xfer_len) { ENC_VLOG(enc, "Diag Pages List Too Long\n"); goto out; } ENC_DLOG(enc, "%s: page length %d, xfer_len %d\n", __func__, length, xfer_len); err = 0; for (i = 0; i < length; i++) { if (page->params[i] == SesElementDescriptor) ses->ses_flags |= SES_FLAG_DESC; else if (page->params[i] == SesAddlElementStatus) ses->ses_flags |= SES_FLAG_ADDLSTATUS; } out: ENC_DLOG(enc, "%s: exiting with err %d\n", __func__, err); return (err); } /** * \brief Process the config page and update associated structures. * * \param enc SES device to query. * \param buf Buffer containing the config page. * \param xfer_len Length of the config page in the buffer. * * \return 0 on success, errno otherwise. */ static int ses_process_config(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct ses_iterator iter; ses_softc_t *ses; enc_cache_t *enc_cache; ses_cache_t *ses_cache; uint8_t *buf; int length; int err; int nelm; int ntype; struct ses_cfg_page *cfg_page; struct ses_enc_desc *buf_subenc; const struct ses_enc_desc **subencs; const struct ses_enc_desc **cur_subenc; const struct ses_enc_desc **last_subenc; ses_type_t *ses_types; ses_type_t *sestype; const struct ses_elm_type_desc *cur_buf_type; const struct ses_elm_type_desc *last_buf_type; uint8_t *last_valid_byte; enc_element_t *element; const char *type_text; CAM_DEBUG(enc->periph->path, CAM_DEBUG_SUBTRACE, ("entering %s(%p, %d)\n", __func__, bufp, xfer_len)); ses = enc->enc_private; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; buf = *bufp; err = -1; if (error != 0) { err = error; goto out; } if (xfer_len < sizeof(cfg_page->hdr)) { ENC_VLOG(enc, "Unable to parse SES Config Header\n"); err = EIO; goto out; } cfg_page = (struct ses_cfg_page *)buf; length = ses_page_length(&cfg_page->hdr); if (length > xfer_len) { ENC_VLOG(enc, "Enclosure Config Page Too Long\n"); goto out; } last_valid_byte = &buf[length - 1]; ENC_DLOG(enc, "%s: total page length %d, xfer_len %d\n", __func__, length, xfer_len); err = 0; if (ses_config_cache_valid(ses_cache, cfg_page->hdr.gen_code)) { /* Our cache is still valid. Proceed to fetching status. */ goto out; } /* Cache is no longer valid. Free old data to make way for new. */ ses_cache_free(enc, enc_cache); ENC_VLOG(enc, "Generation Code 0x%x has %d SubEnclosures\n", scsi_4btoul(cfg_page->hdr.gen_code), ses_cfg_page_get_num_subenc(cfg_page)); /* Take ownership of the buffer. */ ses_cache->cfg_page = cfg_page; *bufp = NULL; /* * Now waltz through all the subenclosures summing the number of * types available in each. */ subencs = malloc(ses_cfg_page_get_num_subenc(cfg_page) * sizeof(*subencs), M_SCSIENC, M_WAITOK|M_ZERO); /* * Sub-enclosure data is const after construction (i.e. when * accessed via our cache object. * * The cast here is not required in C++ but C99 is not so * sophisticated (see C99 6.5.16.1(1)). */ ses_cache->ses_nsubencs = ses_cfg_page_get_num_subenc(cfg_page); ses_cache->subencs = subencs; buf_subenc = cfg_page->subencs; cur_subenc = subencs; last_subenc = &subencs[ses_cache->ses_nsubencs - 1]; ntype = 0; while (cur_subenc <= last_subenc) { if (!ses_enc_desc_is_complete(buf_subenc, last_valid_byte)) { ENC_VLOG(enc, "Enclosure %d Beyond End of " "Descriptors\n", cur_subenc - subencs); err = EIO; goto out; } ENC_VLOG(enc, " SubEnclosure ID %d, %d Types With this ID, " "Descriptor Length %d, offset %d\n", buf_subenc->subenc_id, buf_subenc->num_types, buf_subenc->length, &buf_subenc->byte0 - buf); ENC_VLOG(enc, "WWN: %jx\n", (uintmax_t)scsi_8btou64(buf_subenc->logical_id)); ntype += buf_subenc->num_types; *cur_subenc = buf_subenc; cur_subenc++; buf_subenc = ses_enc_desc_next(buf_subenc); } /* Process the type headers. */ ses_types = malloc(ntype * sizeof(*ses_types), M_SCSIENC, M_WAITOK|M_ZERO); /* * Type data is const after construction (i.e. when accessed via * our cache object. */ ses_cache->ses_ntypes = ntype; ses_cache->ses_types = ses_types; cur_buf_type = (const struct ses_elm_type_desc *) (&(*last_subenc)->length + (*last_subenc)->length + 1); last_buf_type = cur_buf_type + ntype - 1; type_text = (const uint8_t *)(last_buf_type + 1); nelm = 0; sestype = ses_types; while (cur_buf_type <= last_buf_type) { if (&cur_buf_type->etype_txt_len > last_valid_byte) { ENC_VLOG(enc, "Runt Enclosure Type Header %d\n", sestype - ses_types); err = EIO; goto out; } sestype->hdr = cur_buf_type; sestype->text = type_text; type_text += cur_buf_type->etype_txt_len; ENC_VLOG(enc, " Type Desc[%d]: Type 0x%x, MaxElt %d, In Subenc " "%d, Text Length %d: %.*s\n", sestype - ses_types, sestype->hdr->etype_elm_type, sestype->hdr->etype_maxelt, sestype->hdr->etype_subenc, sestype->hdr->etype_txt_len, sestype->hdr->etype_txt_len, sestype->text); nelm += sestype->hdr->etype_maxelt + /*overall status element*/1; sestype++; cur_buf_type++; } /* Create the object map. */ enc_cache->elm_map = malloc(nelm * sizeof(enc_element_t), M_SCSIENC, M_WAITOK|M_ZERO); enc_cache->nelms = nelm; ses_iter_init(enc, enc_cache, &iter); while ((element = ses_iter_next(&iter)) != NULL) { const struct ses_elm_type_desc *thdr; ENC_DLOG(enc, "%s: checking obj %d(%d,%d)\n", __func__, iter.global_element_index, iter.type_index, nelm, iter.type_element_index); thdr = ses_cache->ses_types[iter.type_index].hdr; element->subenclosure = thdr->etype_subenc; element->enctype = thdr->etype_elm_type; element->overall_status_elem = iter.type_element_index == 0; element->elm_private = malloc(sizeof(ses_element_t), M_SCSIENC, M_WAITOK|M_ZERO); ENC_DLOG(enc, "%s: creating elmpriv %d(%d,%d) subenc %d " "type 0x%x\n", __func__, iter.global_element_index, iter.type_index, iter.type_element_index, thdr->etype_subenc, thdr->etype_elm_type); } err = 0; out: if (err) ses_cache_free(enc, enc_cache); else { ses_poll_status(enc); enc_update_request(enc, SES_PUBLISH_CACHE); } ENC_DLOG(enc, "%s: exiting with err %d\n", __func__, err); return (err); } /** * \brief Update the status page and associated structures. * * \param enc SES softc to update for. * \param buf Buffer containing the status page. * \param bufsz Amount of data in the buffer. * * \return 0 on success, errno otherwise. */ static int ses_process_status(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct ses_iterator iter; enc_element_t *element; ses_softc_t *ses; enc_cache_t *enc_cache; ses_cache_t *ses_cache; uint8_t *buf; int err = -1; int length; struct ses_status_page *page; union ses_status_element *cur_stat; union ses_status_element *last_stat; ses = enc->enc_private; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; buf = *bufp; ENC_DLOG(enc, "%s: enter (%p, %p, %d)\n", __func__, enc, buf, xfer_len); page = (struct ses_status_page *)buf; length = ses_page_length(&page->hdr); if (error != 0) { err = error; goto out; } /* * Make sure the length fits in the buffer. * * XXX all this means is that the page is larger than the space * we allocated. Since we use a statically sized buffer, this * could happen... Need to use dynamic discovery of the size. */ if (length > xfer_len) { ENC_VLOG(enc, "Enclosure Status Page Too Long\n"); goto out; } /* Check for simple enclosure reporting short enclosure status. */ if (length >= 4 && page->hdr.page_code == SesShortStatus) { ENC_DLOG(enc, "Got Short Enclosure Status page\n"); ses->ses_flags &= ~(SES_FLAG_ADDLSTATUS | SES_FLAG_DESC); ses_cache_free(enc, enc_cache); enc_cache->enc_status = page->hdr.page_specific_flags; enc_update_request(enc, SES_PUBLISH_CACHE); err = 0; goto out; } /* Make sure the length contains at least one header and status */ if (length < (sizeof(*page) + sizeof(*page->elements))) { ENC_VLOG(enc, "Enclosure Status Page Too Short\n"); goto out; } if (!ses_config_cache_valid(ses_cache, page->hdr.gen_code)) { ENC_DLOG(enc, "%s: Generation count change detected\n", __func__); enc_update_request(enc, SES_UPDATE_GETCONFIG); goto out; } ses_cache_free_status(enc, enc_cache); ses_cache->status_page = page; *bufp = NULL; enc_cache->enc_status = page->hdr.page_specific_flags; /* * Read in individual element status. The element order * matches the order reported in the config page (i.e. the * order of an unfiltered iteration of the config objects).. */ ses_iter_init(enc, enc_cache, &iter); cur_stat = page->elements; last_stat = (union ses_status_element *) &buf[length - sizeof(*last_stat)]; ENC_DLOG(enc, "%s: total page length %d, xfer_len %d\n", __func__, length, xfer_len); while (cur_stat <= last_stat && (element = ses_iter_next(&iter)) != NULL) { ENC_DLOG(enc, "%s: obj %d(%d,%d) off=0x%tx status=%jx\n", __func__, iter.global_element_index, iter.type_index, iter.type_element_index, (uint8_t *)cur_stat - buf, scsi_4btoul(cur_stat->bytes)); memcpy(&element->encstat, cur_stat, sizeof(element->encstat)); element->svalid = 1; cur_stat++; } if (ses_iter_next(&iter) != NULL) { ENC_VLOG(enc, "Status page, length insufficient for " "expected number of objects\n"); } else { if (cur_stat <= last_stat) ENC_VLOG(enc, "Status page, exhausted objects before " "exhausing page\n"); enc_update_request(enc, SES_PUBLISH_CACHE); err = 0; } out: ENC_DLOG(enc, "%s: exiting with error %d\n", __func__, err); return (err); } typedef enum { /** * The enclosure should not provide additional element * status for this element type in page 0x0A. * * \note This status is returned for any types not * listed SES3r02. Further types added in a * future specification will be incorrectly * classified. */ TYPE_ADDLSTATUS_NONE, /** * The element type provides additional element status * in page 0x0A. */ TYPE_ADDLSTATUS_MANDATORY, /** * The element type may provide additional element status * in page 0x0A, but i */ TYPE_ADDLSTATUS_OPTIONAL } ses_addlstatus_avail_t; /** * \brief Check to see whether a given type (as obtained via type headers) is * supported by the additional status command. * * \param enc SES softc to check. * \param typidx Type index to check for. * * \return An enumeration indicating if additional status is mandatory, * optional, or not required for this type. */ static ses_addlstatus_avail_t ses_typehasaddlstatus(enc_softc_t *enc, uint8_t typidx) { enc_cache_t *enc_cache; ses_cache_t *ses_cache; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; switch(ses_cache->ses_types[typidx].hdr->etype_elm_type) { case ELMTYP_DEVICE: case ELMTYP_ARRAY_DEV: case ELMTYP_SAS_EXP: return (TYPE_ADDLSTATUS_MANDATORY); case ELMTYP_SCSI_INI: case ELMTYP_SCSI_TGT: case ELMTYP_ESCC: return (TYPE_ADDLSTATUS_OPTIONAL); default: /* No additional status information available. */ break; } return (TYPE_ADDLSTATUS_NONE); } static int ses_get_elm_addlstatus_fc(enc_softc_t *, enc_cache_t *, uint8_t *, int); static int ses_get_elm_addlstatus_sas(enc_softc_t *, enc_cache_t *, uint8_t *, int, int, int, int); /** * \brief Parse the additional status element data for each object. * * \param enc The SES softc to update. * \param buf The buffer containing the additional status * element response. * \param xfer_len Size of the buffer. * * \return 0 on success, errno otherwise. */ static int ses_process_elm_addlstatus(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct ses_iterator iter, titer; int eip; int err; int ignore_index = 0; int length; int offset; enc_cache_t *enc_cache; ses_cache_t *ses_cache; uint8_t *buf; ses_element_t *elmpriv; const struct ses_page_hdr *hdr; enc_element_t *element, *telement; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; buf = *bufp; err = -1; if (error != 0) { err = error; goto out; } ses_cache_free_elm_addlstatus(enc, enc_cache); ses_cache->elm_addlstatus_page = (struct ses_addl_elem_status_page *)buf; *bufp = NULL; /* * The objects appear in the same order here as in Enclosure Status, * which itself is ordered by the Type Descriptors from the Config * page. However, it is necessary to skip elements that are not * supported by this page when counting them. */ hdr = &ses_cache->elm_addlstatus_page->hdr; length = ses_page_length(hdr); ENC_DLOG(enc, "Additional Element Status Page Length 0x%x\n", length); /* Make sure the length includes at least one header. */ if (length < sizeof(*hdr)+sizeof(struct ses_elm_addlstatus_base_hdr)) { ENC_VLOG(enc, "Runt Additional Element Status Page\n"); goto out; } if (length > xfer_len) { ENC_VLOG(enc, "Additional Element Status Page Too Long\n"); goto out; } if (!ses_config_cache_valid(ses_cache, hdr->gen_code)) { ENC_DLOG(enc, "%s: Generation count change detected\n", __func__); enc_update_request(enc, SES_UPDATE_GETCONFIG); goto out; } offset = sizeof(struct ses_page_hdr); ses_iter_init(enc, enc_cache, &iter); while (offset < length && (element = ses_iter_next(&iter)) != NULL) { struct ses_elm_addlstatus_base_hdr *elm_hdr; int proto_info_len; ses_addlstatus_avail_t status_type; /* * Additional element status is only provided for * individual elements (i.e. overal status elements * are excluded) and those of the types specified * in the SES spec. */ status_type = ses_typehasaddlstatus(enc, iter.type_index); if (iter.individual_element_index == ITERATOR_INDEX_INVALID || status_type == TYPE_ADDLSTATUS_NONE) continue; elm_hdr = (struct ses_elm_addlstatus_base_hdr *)&buf[offset]; eip = ses_elm_addlstatus_eip(elm_hdr); if (eip && !ignore_index) { struct ses_elm_addlstatus_eip_hdr *eip_hdr; int expected_index, index; ses_elem_index_type_t index_type; eip_hdr = (struct ses_elm_addlstatus_eip_hdr *)elm_hdr; if (eip_hdr->byte2 & SES_ADDL_EIP_EIIOE) { index_type = SES_ELEM_INDEX_GLOBAL; expected_index = iter.global_element_index; } else { index_type = SES_ELEM_INDEX_INDIVIDUAL; expected_index = iter.individual_element_index; } titer = iter; telement = ses_iter_seek_to(&titer, eip_hdr->element_index, index_type); if (telement != NULL && (ses_typehasaddlstatus(enc, titer.type_index) != TYPE_ADDLSTATUS_NONE || titer.type_index > ELMTYP_SAS_CONN)) { iter = titer; element = telement; } else ignore_index = 1; if (eip_hdr->byte2 & SES_ADDL_EIP_EIIOE) index = iter.global_element_index; else index = iter.individual_element_index; if (index > expected_index && status_type == TYPE_ADDLSTATUS_MANDATORY) { ENC_VLOG(enc, "%s: provided %s element" "index %d skips mandatory status " " element at index %d\n", __func__, (eip_hdr->byte2 & SES_ADDL_EIP_EIIOE) ? "global " : "", index, expected_index); } } elmpriv = element->elm_private; elmpriv->addl.hdr = elm_hdr; ENC_DLOG(enc, "%s: global element index=%d, type index=%d " "type element index=%d, offset=0x%x, " "byte0=0x%x, length=0x%x\n", __func__, iter.global_element_index, iter.type_index, iter.type_element_index, offset, elmpriv->addl.hdr->byte0, elmpriv->addl.hdr->length); /* Skip to after the length field */ offset += sizeof(struct ses_elm_addlstatus_base_hdr); /* Make sure the descriptor is within bounds */ if ((offset + elmpriv->addl.hdr->length) > length) { ENC_VLOG(enc, "Element %d Beyond End " "of Additional Element Status Descriptors\n", iter.global_element_index); break; } /* Advance to the protocol data, skipping eip bytes if needed */ offset += (eip * SES_EIP_HDR_EXTRA_LEN); proto_info_len = elmpriv->addl.hdr->length - (eip * SES_EIP_HDR_EXTRA_LEN); /* Errors in this block are ignored as they are non-fatal */ switch(ses_elm_addlstatus_proto(elmpriv->addl.hdr)) { case SPSP_PROTO_FC: if (elmpriv->addl.hdr->length == 0) break; ses_get_elm_addlstatus_fc(enc, enc_cache, &buf[offset], proto_info_len); break; case SPSP_PROTO_SAS: if (elmpriv->addl.hdr->length <= 2) break; ses_get_elm_addlstatus_sas(enc, enc_cache, &buf[offset], proto_info_len, eip, iter.type_index, iter.global_element_index); break; default: ENC_VLOG(enc, "Element %d: Unknown Additional Element " "Protocol 0x%x\n", iter.global_element_index, ses_elm_addlstatus_proto(elmpriv->addl.hdr)); break; } offset += proto_info_len; } err = 0; out: if (err) ses_cache_free_elm_addlstatus(enc, enc_cache); enc_update_request(enc, SES_PUBLISH_PHYSPATHS); enc_update_request(enc, SES_PUBLISH_CACHE); return (err); } static int ses_process_control_request(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { ses_softc_t *ses; ses = enc->enc_private; /* * Possible errors: * o Generation count wrong. * o Some SCSI status error. */ ses_terminate_control_requests(&ses->ses_pending_requests, error); ses_poll_status(enc); return (0); } static int ses_publish_physpaths(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct ses_iterator iter; enc_cache_t *enc_cache; enc_element_t *element; enc_cache = &enc->enc_daemon_cache; ses_iter_init(enc, enc_cache, &iter); while ((element = ses_iter_next(&iter)) != NULL) { /* * ses_set_physpath() returns success if we changed * the physpath of any element. This allows us to * only announce devices once regardless of how * many times we process additional element status. */ if (ses_set_physpath(enc, element, &iter) == 0) ses_print_addl_data(enc, element); } return (0); } static int ses_publish_cache(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { sx_xlock(&enc->enc_cache_lock); ses_cache_clone(enc, /*src*/&enc->enc_daemon_cache, /*dst*/&enc->enc_cache); sx_xunlock(&enc->enc_cache_lock); return (0); } /** * \brief Parse the descriptors for each object. * * \param enc The SES softc to update. * \param buf The buffer containing the descriptor list response. * \param xfer_len Size of the buffer. * * \return 0 on success, errno otherwise. */ static int ses_process_elm_descs(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { ses_softc_t *ses; struct ses_iterator iter; enc_element_t *element; int err; int offset; u_long length, plength; enc_cache_t *enc_cache; ses_cache_t *ses_cache; uint8_t *buf; ses_element_t *elmpriv; const struct ses_page_hdr *phdr; const struct ses_elm_desc_hdr *hdr; ses = enc->enc_private; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; buf = *bufp; err = -1; if (error != 0) { err = error; goto out; } ses_cache_free_elm_descs(enc, enc_cache); ses_cache->elm_descs_page = (struct ses_elem_descr_page *)buf; *bufp = NULL; phdr = &ses_cache->elm_descs_page->hdr; plength = ses_page_length(phdr); if (xfer_len < sizeof(struct ses_page_hdr)) { ENC_VLOG(enc, "Runt Element Descriptor Page\n"); goto out; } if (plength > xfer_len) { ENC_VLOG(enc, "Element Descriptor Page Too Long\n"); goto out; } if (!ses_config_cache_valid(ses_cache, phdr->gen_code)) { ENC_VLOG(enc, "%s: Generation count change detected\n", __func__); enc_update_request(enc, SES_UPDATE_GETCONFIG); goto out; } offset = sizeof(struct ses_page_hdr); ses_iter_init(enc, enc_cache, &iter); while (offset < plength && (element = ses_iter_next(&iter)) != NULL) { if ((offset + sizeof(struct ses_elm_desc_hdr)) > plength) { ENC_VLOG(enc, "Element %d Descriptor Header Past " "End of Buffer\n", iter.global_element_index); goto out; } hdr = (struct ses_elm_desc_hdr *)&buf[offset]; length = scsi_2btoul(hdr->length); ENC_DLOG(enc, "%s: obj %d(%d,%d) length=%d off=%d\n", __func__, iter.global_element_index, iter.type_index, iter.type_element_index, length, offset); if ((offset + sizeof(*hdr) + length) > plength) { ENC_VLOG(enc, "Element%d Descriptor Past " "End of Buffer\n", iter.global_element_index); goto out; } offset += sizeof(*hdr); if (length > 0) { elmpriv = element->elm_private; elmpriv->descr_len = length; elmpriv->descr = &buf[offset]; } /* skip over the descriptor itself */ offset += length; } err = 0; out: if (err == 0) { if (ses->ses_flags & SES_FLAG_ADDLSTATUS) enc_update_request(enc, SES_UPDATE_GETELMADDLSTATUS); } enc_update_request(enc, SES_PUBLISH_CACHE); return (err); } static int ses_fill_rcv_diag_io(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t *buf) { if (enc->enc_type == ENC_SEMB_SES) { semb_receive_diagnostic_results(&ccb->ataio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, /*pcv*/1, state->page_code, buf, state->buf_size, state->timeout); } else { scsi_receive_diagnostic_results(&ccb->csio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, /*pcv*/1, state->page_code, buf, state->buf_size, SSD_FULL_SIZE, state->timeout); } return (0); } /** * \brief Encode the object status into the response buffer, which is * expected to contain the current enclosure status. This function * turns off all the 'select' bits for the objects except for the * object specified, then sends it back to the enclosure. * * \param enc SES enclosure the change is being applied to. * \param buf Buffer containing the current enclosure status response. * \param amt Length of the response in the buffer. * \param req The control request to be applied to buf. * * \return 0 on success, errno otherwise. */ static int ses_encode(enc_softc_t *enc, uint8_t *buf, int amt, ses_control_request_t *req) { struct ses_iterator iter; enc_element_t *element; int offset; struct ses_control_page_hdr *hdr; ses_iter_init(enc, &enc->enc_cache, &iter); hdr = (struct ses_control_page_hdr *)buf; if (req->elm_idx == -1) { /* for enclosure status, at least 2 bytes are needed */ if (amt < 2) return EIO; hdr->control_flags = req->elm_stat.comstatus & SES_SET_STATUS_MASK; ENC_DLOG(enc, "Set EncStat %x\n", hdr->control_flags); return (0); } element = ses_iter_seek_to(&iter, req->elm_idx, SES_ELEM_INDEX_GLOBAL); if (element == NULL) return (ENXIO); /* * Seek to the type set that corresponds to the requested object. * The +1 is for the overall status element for the type. */ offset = sizeof(struct ses_control_page_hdr) + (iter.global_element_index * sizeof(struct ses_comstat)); /* Check for buffer overflow. */ if (offset + sizeof(struct ses_comstat) > amt) return (EIO); /* Set the status. */ memcpy(&buf[offset], &req->elm_stat, sizeof(struct ses_comstat)); ENC_DLOG(enc, "Set Type 0x%x Obj 0x%x (offset %d) with %x %x %x %x\n", iter.type_index, iter.global_element_index, offset, req->elm_stat.comstatus, req->elm_stat.comstat[0], req->elm_stat.comstat[1], req->elm_stat.comstat[2]); return (0); } static int ses_fill_control_request(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t *buf) { ses_softc_t *ses; enc_cache_t *enc_cache; ses_cache_t *ses_cache; struct ses_control_page_hdr *hdr; ses_control_request_t *req; size_t plength; size_t offset; ses = enc->enc_private; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; hdr = (struct ses_control_page_hdr *)buf; if (ses_cache->status_page == NULL) { ses_terminate_control_requests(&ses->ses_requests, EIO); return (EIO); } plength = ses_page_length(&ses_cache->status_page->hdr); memcpy(buf, ses_cache->status_page, plength); /* Disable the select bits in all status entries. */ offset = sizeof(struct ses_control_page_hdr); for (offset = sizeof(struct ses_control_page_hdr); offset < plength; offset += sizeof(struct ses_comstat)) { buf[offset] &= ~SESCTL_CSEL; } /* And make sure the INVOP bit is clear. */ hdr->control_flags &= ~SES_ENCSTAT_INVOP; /* Apply incoming requests. */ while ((req = TAILQ_FIRST(&ses->ses_requests)) != NULL) { TAILQ_REMOVE(&ses->ses_requests, req, links); req->result = ses_encode(enc, buf, plength, req); if (req->result != 0) { wakeup(req); continue; } TAILQ_INSERT_TAIL(&ses->ses_pending_requests, req, links); } if (TAILQ_EMPTY(&ses->ses_pending_requests) != 0) return (ENOENT); /* Fill out the ccb */ if (enc->enc_type == ENC_SEMB_SES) { semb_send_diagnostic(&ccb->ataio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, buf, ses_page_length(&ses_cache->status_page->hdr), state->timeout); } else { scsi_send_diagnostic(&ccb->csio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, /*unit_offline*/0, /*device_offline*/0, /*self_test*/0, /*page_format*/1, /*self_test_code*/0, buf, ses_page_length(&ses_cache->status_page->hdr), SSD_FULL_SIZE, state->timeout); } return (0); } static int ses_get_elm_addlstatus_fc(enc_softc_t *enc, enc_cache_t *enc_cache, uint8_t *buf, int bufsiz) { ENC_VLOG(enc, "FC Device Support Stubbed in Additional Status Page\n"); return (ENODEV); } #define SES_PRINT_PORTS(p, type) do { \ sbuf_printf(sbp, " %s(", type); \ if (((p) & SES_SASOBJ_DEV_PHY_PROTOMASK) == 0) \ sbuf_printf(sbp, " None"); \ else { \ if ((p) & SES_SASOBJ_DEV_PHY_SMP) \ sbuf_printf(sbp, " SMP"); \ if ((p) & SES_SASOBJ_DEV_PHY_STP) \ sbuf_printf(sbp, " STP"); \ if ((p) & SES_SASOBJ_DEV_PHY_SSP) \ sbuf_printf(sbp, " SSP"); \ } \ sbuf_printf(sbp, " )"); \ } while(0) /** * \brief Print the additional element status data for this object, for SAS * type 0 objects. See SES2 r20 Section 6.1.13.3.2. * * \param sesname SES device name associated with the object. * \param sbp Sbuf to print to. * \param obj The object to print the data for. * \param periph_name Peripheral string associated with the object. */ static void ses_print_addl_data_sas_type0(char *sesname, struct sbuf *sbp, enc_element_t *obj, char *periph_name) { int i; ses_element_t *elmpriv; struct ses_addl_status *addl; struct ses_elm_sas_device_phy *phy; elmpriv = obj->elm_private; addl = &(elmpriv->addl); if (addl->proto_hdr.sas == NULL) return; sbuf_printf(sbp, "%s: %s: SAS Device Slot Element:", sesname, periph_name); sbuf_printf(sbp, " %d Phys", addl->proto_hdr.sas->base_hdr.num_phys); if (ses_elm_addlstatus_eip(addl->hdr)) sbuf_printf(sbp, " at Slot %d", addl->proto_hdr.sas->type0_eip.dev_slot_num); if (ses_elm_sas_type0_not_all_phys(addl->proto_hdr.sas)) sbuf_printf(sbp, ", Not All Phys"); sbuf_printf(sbp, "\n"); if (addl->proto_data.sasdev_phys == NULL) return; for (i = 0;i < addl->proto_hdr.sas->base_hdr.num_phys;i++) { phy = &addl->proto_data.sasdev_phys[i]; sbuf_printf(sbp, "%s: phy %d:", sesname, i); if (ses_elm_sas_dev_phy_sata_dev(phy)) /* Spec says all other fields are specific values */ sbuf_printf(sbp, " SATA device\n"); else { sbuf_printf(sbp, " SAS device type %d id %d\n", ses_elm_sas_dev_phy_dev_type(phy), phy->phy_id); sbuf_printf(sbp, "%s: phy %d: protocols:", sesname, i); SES_PRINT_PORTS(phy->initiator_ports, "Initiator"); SES_PRINT_PORTS(phy->target_ports, "Target"); sbuf_printf(sbp, "\n"); } sbuf_printf(sbp, "%s: phy %d: parent %jx addr %jx\n", sesname, i, (uintmax_t)scsi_8btou64(phy->parent_addr), (uintmax_t)scsi_8btou64(phy->phy_addr)); } } #undef SES_PRINT_PORTS /** * \brief Report whether a given enclosure object is an expander. * * \param enc SES softc associated with object. * \param obj Enclosure object to report for. * * \return 1 if true, 0 otherwise. */ static int ses_obj_is_expander(enc_softc_t *enc, enc_element_t *obj) { return (obj->enctype == ELMTYP_SAS_EXP); } /** * \brief Print the additional element status data for this object, for SAS * type 1 objects. See SES2 r20 Sections 6.1.13.3.3 and 6.1.13.3.4. * * \param enc SES enclosure, needed for type identification. * \param sesname SES device name associated with the object. * \param sbp Sbuf to print to. * \param obj The object to print the data for. * \param periph_name Peripheral string associated with the object. */ static void ses_print_addl_data_sas_type1(enc_softc_t *enc, char *sesname, struct sbuf *sbp, enc_element_t *obj, char *periph_name) { int i, num_phys; ses_element_t *elmpriv; struct ses_addl_status *addl; struct ses_elm_sas_expander_phy *exp_phy; struct ses_elm_sas_port_phy *port_phy; elmpriv = obj->elm_private; addl = &(elmpriv->addl); if (addl->proto_hdr.sas == NULL) return; sbuf_printf(sbp, "%s: %s: SAS ", sesname, periph_name); if (ses_obj_is_expander(enc, obj)) { num_phys = addl->proto_hdr.sas->base_hdr.num_phys; sbuf_printf(sbp, "Expander: %d Phys", num_phys); if (addl->proto_data.sasexp_phys == NULL) return; for (i = 0;i < num_phys;i++) { exp_phy = &addl->proto_data.sasexp_phys[i]; sbuf_printf(sbp, "%s: phy %d: connector %d other %d\n", sesname, i, exp_phy->connector_index, exp_phy->other_index); } } else { num_phys = addl->proto_hdr.sas->base_hdr.num_phys; sbuf_printf(sbp, "Port: %d Phys", num_phys); if (addl->proto_data.sasport_phys == NULL) return; for (i = 0;i < num_phys;i++) { port_phy = &addl->proto_data.sasport_phys[i]; sbuf_printf(sbp, "%s: phy %d: id %d connector %d other %d\n", sesname, i, port_phy->phy_id, port_phy->connector_index, port_phy->other_index); sbuf_printf(sbp, "%s: phy %d: addr %jx\n", sesname, i, (uintmax_t)scsi_8btou64(port_phy->phy_addr)); } } } /** * \brief Print the additional element status data for this object. * * \param enc SES softc associated with the object. * \param obj The object to print the data for. */ static void ses_print_addl_data(enc_softc_t *enc, enc_element_t *obj) { ses_element_t *elmpriv; struct ses_addl_status *addl; struct sbuf sesname, name, out; elmpriv = obj->elm_private; if (elmpriv == NULL) return; addl = &(elmpriv->addl); if (addl->hdr == NULL) return; sbuf_new(&sesname, NULL, 16, SBUF_AUTOEXTEND); sbuf_new(&name, NULL, 16, SBUF_AUTOEXTEND); sbuf_new(&out, NULL, 512, SBUF_AUTOEXTEND); ses_paths_iter(enc, obj, ses_elmdevname_callback, &name); if (sbuf_len(&name) == 0) sbuf_printf(&name, "(none)"); sbuf_finish(&name); sbuf_printf(&sesname, "%s%d", enc->periph->periph_name, enc->periph->unit_number); sbuf_finish(&sesname); if (elmpriv->descr != NULL) sbuf_printf(&out, "%s: %s: Element descriptor: '%s'\n", sbuf_data(&sesname), sbuf_data(&name), elmpriv->descr); switch(ses_elm_addlstatus_proto(addl->hdr)) { case SPSP_PROTO_SAS: switch(ses_elm_sas_descr_type(addl->proto_hdr.sas)) { case SES_SASOBJ_TYPE_SLOT: ses_print_addl_data_sas_type0(sbuf_data(&sesname), &out, obj, sbuf_data(&name)); break; case SES_SASOBJ_TYPE_OTHER: ses_print_addl_data_sas_type1(enc, sbuf_data(&sesname), &out, obj, sbuf_data(&name)); break; default: break; } break; case SPSP_PROTO_FC: /* stubbed for now */ break; default: break; } sbuf_finish(&out); printf("%s", sbuf_data(&out)); sbuf_delete(&out); sbuf_delete(&name); sbuf_delete(&sesname); } /** * \brief Update the softc with the additional element status data for this * object, for SAS type 0 objects. * * \param enc SES softc to be updated. * \param buf The additional element status response buffer. * \param bufsiz Size of the response buffer. * \param eip The EIP bit value. * \param nobj Number of objects attached to the SES softc. * * \return 0 on success, errno otherwise. */ static int ses_get_elm_addlstatus_sas_type0(enc_softc_t *enc, enc_cache_t *enc_cache, uint8_t *buf, int bufsiz, int eip, int nobj) { int err, offset, physz; enc_element_t *obj; ses_element_t *elmpriv; struct ses_addl_status *addl; err = offset = 0; /* basic object setup */ obj = &(enc_cache->elm_map[nobj]); elmpriv = obj->elm_private; addl = &(elmpriv->addl); addl->proto_hdr.sas = (union ses_elm_sas_hdr *)&buf[offset]; /* Don't assume this object has any phys */ bzero(&addl->proto_data, sizeof(addl->proto_data)); if (addl->proto_hdr.sas->base_hdr.num_phys == 0) goto out; /* Skip forward to the phy list */ if (eip) offset += sizeof(struct ses_elm_sas_type0_eip_hdr); else offset += sizeof(struct ses_elm_sas_type0_base_hdr); /* Make sure the phy list fits in the buffer */ physz = addl->proto_hdr.sas->base_hdr.num_phys; physz *= sizeof(struct ses_elm_sas_device_phy); if (physz > (bufsiz - offset + 4)) { ENC_VLOG(enc, "Element %d Device Phy List Beyond End Of Buffer\n", nobj); err = EIO; goto out; } /* Point to the phy list */ addl->proto_data.sasdev_phys = (struct ses_elm_sas_device_phy *)&buf[offset]; out: return (err); } /** * \brief Update the softc with the additional element status data for this * object, for SAS type 1 objects. * * \param enc SES softc to be updated. * \param buf The additional element status response buffer. * \param bufsiz Size of the response buffer. * \param eip The EIP bit value. * \param nobj Number of objects attached to the SES softc. * * \return 0 on success, errno otherwise. */ static int ses_get_elm_addlstatus_sas_type1(enc_softc_t *enc, enc_cache_t *enc_cache, uint8_t *buf, int bufsiz, int eip, int nobj) { int err, offset, physz; enc_element_t *obj; ses_element_t *elmpriv; struct ses_addl_status *addl; err = offset = 0; /* basic object setup */ obj = &(enc_cache->elm_map[nobj]); elmpriv = obj->elm_private; addl = &(elmpriv->addl); addl->proto_hdr.sas = (union ses_elm_sas_hdr *)&buf[offset]; /* Don't assume this object has any phys */ bzero(&addl->proto_data, sizeof(addl->proto_data)); if (addl->proto_hdr.sas->base_hdr.num_phys == 0) goto out; /* Process expanders differently from other type1 cases */ if (ses_obj_is_expander(enc, obj)) { offset += sizeof(struct ses_elm_sas_type1_expander_hdr); physz = addl->proto_hdr.sas->base_hdr.num_phys * sizeof(struct ses_elm_sas_expander_phy); if (physz > (bufsiz - offset)) { ENC_VLOG(enc, "Element %d: Expander Phy List Beyond " "End Of Buffer\n", nobj); err = EIO; goto out; } addl->proto_data.sasexp_phys = (struct ses_elm_sas_expander_phy *)&buf[offset]; } else { offset += sizeof(struct ses_elm_sas_type1_nonexpander_hdr); physz = addl->proto_hdr.sas->base_hdr.num_phys * sizeof(struct ses_elm_sas_port_phy); if (physz > (bufsiz - offset + 4)) { ENC_VLOG(enc, "Element %d: Port Phy List Beyond End " "Of Buffer\n", nobj); err = EIO; goto out; } addl->proto_data.sasport_phys = (struct ses_elm_sas_port_phy *)&buf[offset]; } out: return (err); } /** * \brief Update the softc with the additional element status data for this * object, for SAS objects. * * \param enc SES softc to be updated. * \param buf The additional element status response buffer. * \param bufsiz Size of the response buffer. * \param eip The EIP bit value. * \param tidx Type index for this object. * \param nobj Number of objects attached to the SES softc. * * \return 0 on success, errno otherwise. */ static int ses_get_elm_addlstatus_sas(enc_softc_t *enc, enc_cache_t *enc_cache, uint8_t *buf, int bufsiz, int eip, int tidx, int nobj) { int dtype, err; ses_cache_t *ses_cache; union ses_elm_sas_hdr *hdr; /* Need to be able to read the descriptor type! */ if (bufsiz < sizeof(union ses_elm_sas_hdr)) { err = EIO; goto out; } ses_cache = enc_cache->private; hdr = (union ses_elm_sas_hdr *)buf; dtype = ses_elm_sas_descr_type(hdr); switch(dtype) { case SES_SASOBJ_TYPE_SLOT: switch(ses_cache->ses_types[tidx].hdr->etype_elm_type) { case ELMTYP_DEVICE: case ELMTYP_ARRAY_DEV: break; default: ENC_VLOG(enc, "Element %d has Additional Status type 0, " "invalid for SES element type 0x%x\n", nobj, ses_cache->ses_types[tidx].hdr->etype_elm_type); err = ENODEV; goto out; } err = ses_get_elm_addlstatus_sas_type0(enc, enc_cache, buf, bufsiz, eip, nobj); break; case SES_SASOBJ_TYPE_OTHER: switch(ses_cache->ses_types[tidx].hdr->etype_elm_type) { case ELMTYP_SAS_EXP: case ELMTYP_SCSI_INI: case ELMTYP_SCSI_TGT: case ELMTYP_ESCC: break; default: ENC_VLOG(enc, "Element %d has Additional Status type 1, " "invalid for SES element type 0x%x\n", nobj, ses_cache->ses_types[tidx].hdr->etype_elm_type); err = ENODEV; goto out; } err = ses_get_elm_addlstatus_sas_type1(enc, enc_cache, buf, bufsiz, eip, nobj); break; default: ENC_VLOG(enc, "Element %d of type 0x%x has Additional Status " "of unknown type 0x%x\n", nobj, ses_cache->ses_types[tidx].hdr->etype_elm_type, dtype); err = ENODEV; break; } out: return (err); } static void ses_softc_invalidate(enc_softc_t *enc) { ses_softc_t *ses; ses = enc->enc_private; ses_terminate_control_requests(&ses->ses_requests, ENXIO); } static void ses_softc_cleanup(enc_softc_t *enc) { ses_cache_free(enc, &enc->enc_cache); ses_cache_free(enc, &enc->enc_daemon_cache); ENC_FREE_AND_NULL(enc->enc_private); ENC_FREE_AND_NULL(enc->enc_cache.private); ENC_FREE_AND_NULL(enc->enc_daemon_cache.private); } static int ses_init_enc(enc_softc_t *enc) { return (0); } static int ses_get_enc_status(enc_softc_t *enc, int slpflag) { /* Automatically updated, caller checks enc_cache->encstat itself */ return (0); } static int ses_set_enc_status(enc_softc_t *enc, uint8_t encstat, int slpflag) { ses_control_request_t req; ses_softc_t *ses; ses = enc->enc_private; req.elm_idx = SES_SETSTATUS_ENC_IDX; req.elm_stat.comstatus = encstat & 0xf; TAILQ_INSERT_TAIL(&ses->ses_requests, &req, links); enc_update_request(enc, SES_PROCESS_CONTROL_REQS); cam_periph_sleep(enc->periph, &req, PUSER, "encstat", 0); return (req.result); } static int ses_get_elm_status(enc_softc_t *enc, encioc_elm_status_t *elms, int slpflag) { unsigned int i = elms->elm_idx; memcpy(elms->cstat, &enc->enc_cache.elm_map[i].encstat, 4); return (0); } static int ses_set_elm_status(enc_softc_t *enc, encioc_elm_status_t *elms, int slpflag) { ses_control_request_t req; ses_softc_t *ses; /* If this is clear, we don't do diddly. */ if ((elms->cstat[0] & SESCTL_CSEL) == 0) return (0); ses = enc->enc_private; req.elm_idx = elms->elm_idx; memcpy(&req.elm_stat, elms->cstat, sizeof(req.elm_stat)); TAILQ_INSERT_TAIL(&ses->ses_requests, &req, links); enc_update_request(enc, SES_PROCESS_CONTROL_REQS); cam_periph_sleep(enc->periph, &req, PUSER, "encstat", 0); return (req.result); } static int ses_get_elm_desc(enc_softc_t *enc, encioc_elm_desc_t *elmd) { int i = (int)elmd->elm_idx; ses_element_t *elmpriv; /* Assume caller has already checked obj_id validity */ elmpriv = enc->enc_cache.elm_map[i].elm_private; /* object might not have a descriptor */ if (elmpriv == NULL || elmpriv->descr == NULL) { elmd->elm_desc_len = 0; return (0); } if (elmd->elm_desc_len > elmpriv->descr_len) elmd->elm_desc_len = elmpriv->descr_len; copyout(elmpriv->descr, elmd->elm_desc_str, elmd->elm_desc_len); return (0); } /** * \brief Respond to ENCIOC_GETELMDEVNAME, providing a device name for the * given object id if one is available. * * \param enc SES softc to examine. * \param objdn ioctl structure to read/write device name info. * * \return 0 on success, errno otherwise. */ static int ses_get_elm_devnames(enc_softc_t *enc, encioc_elm_devnames_t *elmdn) { struct sbuf sb; int len; len = elmdn->elm_names_size; if (len < 0) return (EINVAL); cam_periph_unlock(enc->periph); sbuf_new(&sb, NULL, len, SBUF_FIXEDLEN); ses_paths_iter(enc, &enc->enc_cache.elm_map[elmdn->elm_idx], ses_elmdevname_callback, &sb); sbuf_finish(&sb); elmdn->elm_names_len = sbuf_len(&sb); copyout(sbuf_data(&sb), elmdn->elm_devnames, elmdn->elm_names_len + 1); sbuf_delete(&sb); cam_periph_lock(enc->periph); return (elmdn->elm_names_len > 0 ? 0 : ENODEV); } /** * \brief Send a string to the primary subenclosure using the String Out * SES diagnostic page. * * \param enc SES enclosure to run the command on. * \param sstr SES string structure to operate on * \param ioc Ioctl being performed * * \return 0 on success, errno otherwise. */ static int ses_handle_string(enc_softc_t *enc, encioc_string_t *sstr, int ioc) { ses_softc_t *ses; enc_cache_t *enc_cache; ses_cache_t *ses_cache; const struct ses_enc_desc *enc_desc; int amt, payload, ret; char cdb[6]; char str[32]; char vendor[9]; char product[17]; char rev[5]; uint8_t *buf; size_t size, rsize; ses = enc->enc_private; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; /* Implement SES2r20 6.1.6 */ if (sstr->bufsiz > 0xffff) return (EINVAL); /* buffer size too large */ if (ioc == ENCIOC_SETSTRING) { payload = sstr->bufsiz + 4; /* header for SEND DIAGNOSTIC */ amt = 0 - payload; buf = ENC_MALLOC(payload); if (buf == NULL) return ENOMEM; ses_page_cdb(cdb, payload, 0, CAM_DIR_OUT); /* Construct the page request */ buf[0] = SesStringOut; buf[1] = 0; buf[2] = sstr->bufsiz >> 8; buf[3] = sstr->bufsiz & 0xff; memcpy(&buf[4], sstr->buf, sstr->bufsiz); } else if (ioc == ENCIOC_GETSTRING) { payload = sstr->bufsiz; amt = payload; ses_page_cdb(cdb, payload, SesStringIn, CAM_DIR_IN); buf = sstr->buf; } else if (ioc == ENCIOC_GETENCNAME) { if (ses_cache->ses_nsubencs < 1) return (ENODEV); enc_desc = ses_cache->subencs[0]; cam_strvis(vendor, enc_desc->vendor_id, sizeof(enc_desc->vendor_id), sizeof(vendor)); cam_strvis(product, enc_desc->product_id, sizeof(enc_desc->product_id), sizeof(product)); cam_strvis(rev, enc_desc->product_rev, sizeof(enc_desc->product_rev), sizeof(rev)); rsize = snprintf(str, sizeof(str), "%s %s %s", vendor, product, rev) + 1; if (rsize > sizeof(str)) rsize = sizeof(str); copyout(&rsize, &sstr->bufsiz, sizeof(rsize)); size = rsize; if (size > sstr->bufsiz) size = sstr->bufsiz; copyout(str, sstr->buf, size); return (size == rsize ? 0 : ENOMEM); } else if (ioc == ENCIOC_GETENCID) { if (ses_cache->ses_nsubencs < 1) return (ENODEV); enc_desc = ses_cache->subencs[0]; rsize = snprintf(str, sizeof(str), "%16jx", scsi_8btou64(enc_desc->logical_id)) + 1; if (rsize > sizeof(str)) rsize = sizeof(str); copyout(&rsize, &sstr->bufsiz, sizeof(rsize)); size = rsize; if (size > sstr->bufsiz) size = sstr->bufsiz; copyout(str, sstr->buf, size); return (size == rsize ? 0 : ENOMEM); } else return EINVAL; ret = enc_runcmd(enc, cdb, 6, buf, &amt); if (ioc == ENCIOC_SETSTRING) ENC_FREE(buf); return ret; } /** * \invariant Called with cam_periph mutex held. */ static void ses_poll_status(enc_softc_t *enc) { ses_softc_t *ses; ses = enc->enc_private; enc_update_request(enc, SES_UPDATE_GETSTATUS); if (ses->ses_flags & SES_FLAG_DESC) enc_update_request(enc, SES_UPDATE_GETELMDESCS); if (ses->ses_flags & SES_FLAG_ADDLSTATUS) enc_update_request(enc, SES_UPDATE_GETELMADDLSTATUS); } /** * \brief Notification received when CAM detects a new device in the * SCSI domain in which this SEP resides. * * \param enc SES enclosure instance. */ static void ses_device_found(enc_softc_t *enc) { ses_poll_status(enc); enc_update_request(enc, SES_PUBLISH_PHYSPATHS); } static struct enc_vec ses_enc_vec = { .softc_invalidate = ses_softc_invalidate, .softc_cleanup = ses_softc_cleanup, .init_enc = ses_init_enc, .get_enc_status = ses_get_enc_status, .set_enc_status = ses_set_enc_status, .get_elm_status = ses_get_elm_status, .set_elm_status = ses_set_elm_status, .get_elm_desc = ses_get_elm_desc, .get_elm_devnames = ses_get_elm_devnames, .handle_string = ses_handle_string, .device_found = ses_device_found, .poll_status = ses_poll_status }; /** * \brief Initialize a new SES instance. * * \param enc SES softc structure to set up the instance in. * \param doinit Do the initialization (see main driver). * * \return 0 on success, errno otherwise. */ int ses_softc_init(enc_softc_t *enc) { ses_softc_t *ses_softc; CAM_DEBUG(enc->periph->path, CAM_DEBUG_SUBTRACE, ("entering enc_softc_init(%p)\n", enc)); enc->enc_vec = ses_enc_vec; enc->enc_fsm_states = enc_fsm_states; if (enc->enc_private == NULL) enc->enc_private = ENC_MALLOCZ(sizeof(ses_softc_t)); if (enc->enc_cache.private == NULL) enc->enc_cache.private = ENC_MALLOCZ(sizeof(ses_cache_t)); if (enc->enc_daemon_cache.private == NULL) enc->enc_daemon_cache.private = ENC_MALLOCZ(sizeof(ses_cache_t)); if (enc->enc_private == NULL || enc->enc_cache.private == NULL || enc->enc_daemon_cache.private == NULL) { ENC_FREE_AND_NULL(enc->enc_private); ENC_FREE_AND_NULL(enc->enc_cache.private); ENC_FREE_AND_NULL(enc->enc_daemon_cache.private); return (ENOMEM); } ses_softc = enc->enc_private; TAILQ_INIT(&ses_softc->ses_requests); TAILQ_INIT(&ses_softc->ses_pending_requests); enc_update_request(enc, SES_UPDATE_PAGES); // XXX: Move this to the FSM so it doesn't hang init if (0) (void) ses_set_timed_completion(enc, 1); return (0); } Index: head/sys/cam/scsi/scsi_low.c =================================================================== --- head/sys/cam/scsi/scsi_low.c (revision 326264) +++ head/sys/cam/scsi/scsi_low.c (revision 326265) @@ -1,4200 +1,4202 @@ /* $NecBSD: scsi_low.c,v 1.24.10.8 2001/06/26 07:39:44 honda Exp $ */ /* $NetBSD$ */ #include __FBSDID("$FreeBSD$"); #define SCSI_LOW_STATICS #define SCSI_LOW_DEBUG #define SCSI_LOW_NEGOTIATE_BEFORE_SENSE #define SCSI_LOW_START_UP_CHECK /* #define SCSI_LOW_INFO_DETAIL */ /* #define SCSI_LOW_QCLEAR_AFTER_CA */ /* #define SCSI_LOW_FLAGS_QUIRKS_OK */ #define SCSI_LOW_FLAGS_QUIRKS_OK /*- + * SPDX-License-Identifier: BSD-3-Clause + * * [NetBSD for NEC PC-98 series] * Copyright (c) 1995, 1996, 1997, 1998, 1999, 2000, 2001 * NetBSD/pc98 porting staff. All rights reserved. * Copyright (c) 1995, 1996, 1997, 1998, 1999, 2000, 2001 * Naofumi HONDA. All rights reserved. * * [Ported for FreeBSD CAM] * Copyright (c) 2000, 2001 * MITSUNAGA Noriaki, NOKUBI Hirotaka and TAKAHASHI Yoshihiro. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * When our host is reselected, * nexus establish processes are little complicated. * Normal steps are followings: * 1) Our host selected by target => target nexus (slp->sl_Tnexus) * 2) Identify msgin => lun nexus (slp->sl_Lnexus) * 3) Qtag msg => ccb nexus (slp->sl_Qnexus) */ #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /************************************************************** * Constants **************************************************************/ #define SCSI_LOW_POLL_HZ 1000 /* functions return values */ #define SCSI_LOW_START_NO_QTAG 0 #define SCSI_LOW_START_QTAG 1 #define SCSI_LOW_DONE_COMPLETE 0 #define SCSI_LOW_DONE_RETRY 1 /* internal disk flags */ #define SCSI_LOW_DISK_DISC 0x00000001 #define SCSI_LOW_DISK_QTAG 0x00000002 #define SCSI_LOW_DISK_LINK 0x00000004 #define SCSI_LOW_DISK_PARITY 0x00000008 #define SCSI_LOW_DISK_SYNC 0x00010000 #define SCSI_LOW_DISK_WIDE_16 0x00020000 #define SCSI_LOW_DISK_WIDE_32 0x00040000 #define SCSI_LOW_DISK_WIDE (SCSI_LOW_DISK_WIDE_16 | SCSI_LOW_DISK_WIDE_32) #define SCSI_LOW_DISK_LFLAGS 0x0000ffff #define SCSI_LOW_DISK_TFLAGS 0xffff0000 static MALLOC_DEFINE(M_SCSILOW, "SCSI low", "SCSI low buffers"); /************************************************************** * Declarations **************************************************************/ /* static */ void scsi_low_info(struct scsi_low_softc *, struct targ_info *, u_char *); static void scsi_low_engage(void *); static struct slccb *scsi_low_establish_ccb(struct targ_info *, struct lun_info *, scsi_low_tag_t); static int scsi_low_done(struct scsi_low_softc *, struct slccb *); static int scsi_low_setup_done(struct scsi_low_softc *, struct slccb *); static void scsi_low_bus_release(struct scsi_low_softc *, struct targ_info *); static void scsi_low_twiddle_wait(void); static struct lun_info *scsi_low_alloc_li(struct targ_info *, int, int); static struct targ_info *scsi_low_alloc_ti(struct scsi_low_softc *, int); static void scsi_low_calcf_lun(struct lun_info *); static void scsi_low_calcf_target(struct targ_info *); static void scsi_low_calcf_show(struct lun_info *); static void scsi_low_reset_nexus(struct scsi_low_softc *, int); static void scsi_low_reset_nexus_target(struct scsi_low_softc *, struct targ_info *, int); static void scsi_low_reset_nexus_lun(struct scsi_low_softc *, struct lun_info *, int); static int scsi_low_init(struct scsi_low_softc *, u_int); static void scsi_low_start(struct scsi_low_softc *); static void scsi_low_free_ti(struct scsi_low_softc *); static int scsi_low_alloc_qtag(struct slccb *); static int scsi_low_dealloc_qtag(struct slccb *); static int scsi_low_enqueue(struct scsi_low_softc *, struct targ_info *, struct lun_info *, struct slccb *, u_int, u_int); static int scsi_low_message_enqueue(struct scsi_low_softc *, struct targ_info *, struct lun_info *, u_int); static void scsi_low_unit_ready_cmd(struct slccb *); static void scsi_low_timeout(void *); static int scsi_low_timeout_check(struct scsi_low_softc *); #ifdef SCSI_LOW_START_UP_CHECK static int scsi_low_start_up(struct scsi_low_softc *); #endif /* SCSI_LOW_START_UP_CHECK */ static int scsi_low_abort_ccb(struct scsi_low_softc *, struct slccb *); static struct slccb *scsi_low_revoke_ccb(struct scsi_low_softc *, struct slccb *, int); int scsi_low_version_major = 2; int scsi_low_version_minor = 17; static struct scsi_low_softc_tab sl_tab = LIST_HEAD_INITIALIZER(sl_tab); static struct mtx sl_tab_lock; MTX_SYSINIT(sl_tab_lock, &sl_tab_lock, "scsi low table", MTX_DEF); /************************************************************** * Debug, Run test and Statics **************************************************************/ #ifdef SCSI_LOW_INFO_DETAIL #define SCSI_LOW_INFO(slp, ti, s) scsi_low_info((slp), (ti), (s)) #else /* !SCSI_LOW_INFO_DETAIL */ #define SCSI_LOW_INFO(slp, ti, s) device_printf((slp)->sl_dev, "%s\n", (s)) #endif /* !SCSI_LOW_INFO_DETAIL */ #ifdef SCSI_LOW_STATICS static struct scsi_low_statics { int nexus_win; int nexus_fail; int nexus_disconnected; int nexus_reselected; int nexus_conflict; } scsi_low_statics; #endif /* SCSI_LOW_STATICS */ #ifdef SCSI_LOW_DEBUG #define SCSI_LOW_DEBUG_DONE 0x00001 #define SCSI_LOW_DEBUG_DISC 0x00002 #define SCSI_LOW_DEBUG_SENSE 0x00004 #define SCSI_LOW_DEBUG_CALCF 0x00008 #define SCSI_LOW_DEBUG_ACTION 0x10000 int scsi_low_debug = 0; #define SCSI_LOW_MAX_ATTEN_CHECK 32 #define SCSI_LOW_ATTEN_CHECK 0x0001 #define SCSI_LOW_CMDLNK_CHECK 0x0002 #define SCSI_LOW_ABORT_CHECK 0x0004 #define SCSI_LOW_NEXUS_CHECK 0x0008 int scsi_low_test = 0; int scsi_low_test_id = 0; static void scsi_low_test_abort(struct scsi_low_softc *, struct targ_info *, struct lun_info *); static void scsi_low_test_cmdlnk(struct scsi_low_softc *, struct slccb *); static void scsi_low_test_atten(struct scsi_low_softc *, struct targ_info *, u_int); #define SCSI_LOW_DEBUG_TEST_GO(fl, id) \ ((scsi_low_test & (fl)) != 0 && (scsi_low_test_id & (1 << (id))) == 0) #define SCSI_LOW_DEBUG_GO(fl, id) \ ((scsi_low_debug & (fl)) != 0 && (scsi_low_test_id & (1 << (id))) == 0) #endif /* SCSI_LOW_DEBUG */ /************************************************************** * CCB **************************************************************/ GENERIC_CCB_STATIC_ALLOC(scsi_low, slccb) GENERIC_CCB(scsi_low, slccb, ccb_chain) /************************************************************** * Inline functions **************************************************************/ #define SCSI_LOW_INLINE static __inline SCSI_LOW_INLINE void scsi_low_activate_qtag(struct slccb *); SCSI_LOW_INLINE void scsi_low_deactivate_qtag(struct slccb *); SCSI_LOW_INLINE void scsi_low_ccb_message_assert(struct slccb *, u_int); SCSI_LOW_INLINE void scsi_low_ccb_message_exec(struct scsi_low_softc *, struct slccb *); SCSI_LOW_INLINE void scsi_low_ccb_message_retry(struct slccb *); SCSI_LOW_INLINE void scsi_low_ccb_message_clear(struct slccb *); SCSI_LOW_INLINE void scsi_low_init_msgsys(struct scsi_low_softc *, struct targ_info *); SCSI_LOW_INLINE void scsi_low_activate_qtag(cb) struct slccb *cb; { struct lun_info *li = cb->li; if (cb->ccb_tag != SCSI_LOW_UNKTAG) return; li->li_nqio ++; cb->ccb_tag = cb->ccb_otag; } SCSI_LOW_INLINE void scsi_low_deactivate_qtag(cb) struct slccb *cb; { struct lun_info *li = cb->li; if (cb->ccb_tag == SCSI_LOW_UNKTAG) return; li->li_nqio --; cb->ccb_tag = SCSI_LOW_UNKTAG; } SCSI_LOW_INLINE void scsi_low_ccb_message_exec(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { scsi_low_assert_msg(slp, cb->ti, cb->ccb_msgoutflag, 0); cb->ccb_msgoutflag = 0; } SCSI_LOW_INLINE void scsi_low_ccb_message_assert(cb, msg) struct slccb *cb; u_int msg; { cb->ccb_msgoutflag = cb->ccb_omsgoutflag = msg; } SCSI_LOW_INLINE void scsi_low_ccb_message_retry(cb) struct slccb *cb; { cb->ccb_msgoutflag = cb->ccb_omsgoutflag; } SCSI_LOW_INLINE void scsi_low_ccb_message_clear(cb) struct slccb *cb; { cb->ccb_msgoutflag = 0; } SCSI_LOW_INLINE void scsi_low_init_msgsys(slp, ti) struct scsi_low_softc *slp; struct targ_info *ti; { ti->ti_msginptr = 0; ti->ti_emsgflags = ti->ti_msgflags = ti->ti_omsgflags = 0; SCSI_LOW_DEASSERT_ATN(slp); SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_NULL); } /*============================================================= * START OF OS switch (All OS depend fucntions should be here) =============================================================*/ /* common os depend utitlities */ #define SCSI_LOW_CMD_RESIDUAL_CHK 0x0001 #define SCSI_LOW_CMD_ORDERED_QTAG 0x0002 #define SCSI_LOW_CMD_ABORT_WARNING 0x0004 static u_int8_t scsi_low_cmd_flags[256] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /*0*/ 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 5, 0, 0, 0, 0, 0, /*1*/ 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, /*2*/ 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 5, 0, 0, 0, 5, 5, /*3*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, }; struct scsi_low_error_code { int error_bits; int error_code; }; static struct slccb *scsi_low_find_ccb(struct scsi_low_softc *, u_int, u_int, void *); static int scsi_low_translate_error_code(struct slccb *, struct scsi_low_error_code *); static struct slccb * scsi_low_find_ccb(slp, target, lun, osdep) struct scsi_low_softc *slp; u_int target, lun; void *osdep; { struct targ_info *ti; struct lun_info *li; struct slccb *cb; ti = slp->sl_ti[target]; li = scsi_low_alloc_li(ti, lun, 0); if (li == NULL) return NULL; if ((cb = slp->sl_Qnexus) != NULL && cb->osdep == osdep) return cb; TAILQ_FOREACH(cb, &slp->sl_start, ccb_chain) { if (cb->osdep == osdep) return cb; } TAILQ_FOREACH(cb, &li->li_discq, ccb_chain) { if (cb->osdep == osdep) return cb; } return NULL; } static int scsi_low_translate_error_code(cb, tp) struct slccb *cb; struct scsi_low_error_code *tp; { if (cb->ccb_error == 0) return tp->error_code; for (tp ++; (cb->ccb_error & tp->error_bits) == 0; tp ++) ; return tp->error_code; } /************************************************************** * SCSI INTERFACE (CAM) **************************************************************/ #define SCSI_LOW_MALLOC(size) malloc((size), M_SCSILOW, M_NOWAIT) #define SCSI_LOW_FREE(pt) free((pt), M_SCSILOW) #define SCSI_LOW_ALLOC_CCB(flags) scsi_low_get_ccb() static void scsi_low_poll_cam(struct cam_sim *); void scsi_low_scsi_action_cam(struct cam_sim *, union ccb *); static int scsi_low_attach_cam(struct scsi_low_softc *); static int scsi_low_detach_cam(struct scsi_low_softc *); static int scsi_low_ccb_setup_cam(struct scsi_low_softc *, struct slccb *); static int scsi_low_done_cam(struct scsi_low_softc *, struct slccb *); struct scsi_low_error_code scsi_low_error_code_cam[] = { {0, CAM_REQ_CMP}, {SENSEIO, CAM_AUTOSNS_VALID | CAM_REQ_CMP_ERR}, {SENSEERR, CAM_AUTOSENSE_FAIL}, {UACAERR, CAM_SCSI_STATUS_ERROR}, {BUSYERR | STATERR, CAM_SCSI_STATUS_ERROR}, {SELTIMEOUTIO, CAM_SEL_TIMEOUT}, {TIMEOUTIO, CAM_CMD_TIMEOUT}, {PDMAERR, CAM_DATA_RUN_ERR}, {PARITYERR, CAM_UNCOR_PARITY}, {UBFERR, CAM_UNEXP_BUSFREE}, {ABORTIO, CAM_REQ_ABORTED}, {-1, CAM_UNREC_HBA_ERROR} }; #define SIM2SLP(sim) ((struct scsi_low_softc *) cam_sim_softc((sim))) /* XXX: * Please check a polling hz, currently we assume scsi_low_poll() is * called each 1 ms. */ #define SCSI_LOW_CAM_POLL_HZ 1000 /* OK ? */ static void scsi_low_poll_cam(sim) struct cam_sim *sim; { struct scsi_low_softc *slp = SIM2SLP(sim); SCSI_LOW_ASSERT_LOCKED(slp); (*slp->sl_funcs->scsi_low_poll) (slp); if (slp->sl_poll_count ++ >= SCSI_LOW_CAM_POLL_HZ / SCSI_LOW_TIMEOUT_HZ) { slp->sl_poll_count = 0; scsi_low_timeout_check(slp); } } void scsi_low_scsi_action_cam(sim, ccb) struct cam_sim *sim; union ccb *ccb; { struct scsi_low_softc *slp = SIM2SLP(sim); struct targ_info *ti; struct lun_info *li; struct slccb *cb; u_int lun, flags, msg, target; int rv; SCSI_LOW_ASSERT_LOCKED(slp); target = (u_int) (ccb->ccb_h.target_id); lun = (u_int) ccb->ccb_h.target_lun; #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_ACTION, target) != 0) { device_printf(slp->sl_dev, "cam_action: func code 0x%x target: %d, lun: %d\n", ccb->ccb_h.func_code, target, lun); } #endif /* SCSI_LOW_DEBUG */ switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ #ifdef SCSI_LOW_DIAGNOSTIC if (target == CAM_TARGET_WILDCARD || lun == CAM_LUN_WILDCARD) { device_printf(slp->sl_dev, "invalid target/lun\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #endif /* SCSI_LOW_DIAGNOSTIC */ if (((cb = SCSI_LOW_ALLOC_CCB(1)) == NULL)) { ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } ti = slp->sl_ti[target]; cb->osdep = ccb; cb->bp = NULL; if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) flags = CCB_AUTOSENSE | CCB_SCSIIO; else flags = CCB_SCSIIO; li = scsi_low_alloc_li(ti, lun, 1); if (ti->ti_setup_msg != 0) { scsi_low_message_enqueue(slp, ti, li, CCB_AUTOSENSE); } scsi_low_enqueue(slp, ti, li, cb, flags, 0); #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_ABORT_CHECK, target) != 0) { scsi_low_test_abort(slp, ti, li); } #endif /* SCSI_LOW_DEBUG */ break; case XPT_ABORT: /* Abort the specified CCB */ #ifdef SCSI_LOW_DIAGNOSTIC if (target == CAM_TARGET_WILDCARD || lun == CAM_LUN_WILDCARD) { device_printf(slp->sl_dev, "invalid target/lun\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #endif /* SCSI_LOW_DIAGNOSTIC */ cb = scsi_low_find_ccb(slp, target, lun, ccb->cab.abort_ccb); rv = scsi_low_abort_ccb(slp, cb); if (rv == 0) ccb->ccb_h.status = CAM_REQ_CMP; else ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; u_int val; #ifdef SCSI_LOW_DIAGNOSTIC if (target == CAM_TARGET_WILDCARD) { device_printf(slp->sl_dev, "invalid target\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #endif /* SCSI_LOW_DIAGNOSTIC */ cts = &ccb->cts; ti = slp->sl_ti[target]; if (lun == CAM_LUN_WILDCARD) lun = 0; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if ((spi->valid & (CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET)) != 0) { if (spi->valid & CTS_SPI_VALID_BUS_WIDTH) { val = spi->bus_width; if (val < ti->ti_width) ti->ti_width = val; } if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { val = spi->sync_period; if (val == 0 || val > ti->ti_maxsynch.period) ti->ti_maxsynch.period = val; } if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { val = spi->sync_offset; if (val < ti->ti_maxsynch.offset) ti->ti_maxsynch.offset = val; } ti->ti_flags_valid |= SCSI_LOW_TARG_FLAGS_QUIRKS_VALID; scsi_low_calcf_target(ti); } if ((spi->valid & CTS_SPI_FLAGS_DISC_ENB) != 0 || (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) { li = scsi_low_alloc_li(ti, lun, 1); if (spi->valid & CTS_SPI_FLAGS_DISC_ENB) { li->li_quirks |= SCSI_LOW_DISK_DISC; } else { li->li_quirks &= ~SCSI_LOW_DISK_DISC; } if (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) { li->li_quirks |= SCSI_LOW_DISK_QTAG; } else { li->li_quirks &= ~SCSI_LOW_DISK_QTAG; } li->li_flags_valid |= SCSI_LOW_LUN_FLAGS_QUIRKS_VALID; scsi_low_calcf_target(ti); scsi_low_calcf_lun(li); if ((slp->sl_show_result & SHOW_CALCF_RES) != 0) scsi_low_calcf_show(li); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; u_int diskflags; cts = &ccb->cts; #ifdef SCSI_LOW_DIAGNOSTIC if (target == CAM_TARGET_WILDCARD) { device_printf(slp->sl_dev, "invalid target\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #endif /* SCSI_LOW_DIAGNOSTIC */ ti = slp->sl_ti[target]; if (lun == CAM_LUN_WILDCARD) lun = 0; li = scsi_low_alloc_li(ti, lun, 1); if (li != NULL && cts->type == CTS_TYPE_CURRENT_SETTINGS) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; #ifdef SCSI_LOW_DIAGNOSTIC if (li->li_flags_valid != SCSI_LOW_LUN_FLAGS_ALL_VALID) { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; device_printf(slp->sl_dev, "invalid GET_TRANS_CURRENT_SETTINGS call\n"); goto settings_out; } #endif /* SCSI_LOW_DIAGNOSTIC */ cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; diskflags = li->li_diskflags & li->li_cfgflags; if (diskflags & SCSI_LOW_DISK_DISC) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if (diskflags & SCSI_LOW_DISK_QTAG) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; spi->sync_period = ti->ti_maxsynch.period; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->sync_offset = ti->ti_maxsynch.offset; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; spi->bus_width = ti->ti_width; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else scsi->valid = 0; } else ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; settings_out: xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { /* not yet HN2 */ cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, NULL); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ #ifdef SCSI_LOW_DIAGNOSTIC if (target == CAM_TARGET_WILDCARD) { device_printf(slp->sl_dev, "invalid target\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #endif /* SCSI_LOW_DIAGNOSTIC */ msg = SCSI_LOW_MSG_RESET; if (((cb = SCSI_LOW_ALLOC_CCB(1)) == NULL)) { ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } ti = slp->sl_ti[target]; if (lun == CAM_LUN_WILDCARD) lun = 0; cb->osdep = ccb; cb->bp = NULL; if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) flags = CCB_AUTOSENSE | CCB_NORETRY | CCB_URGENT; else flags = CCB_NORETRY | CCB_URGENT; li = scsi_low_alloc_li(ti, lun, 1); scsi_low_enqueue(slp, ti, li, cb, flags, msg); break; case XPT_PATH_INQ: { /* Path routing inquiry */ struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = scsi_low_version_major; cpi->hba_inquiry = PI_TAG_ABLE | PI_LINKED_CDB; ti = slp->sl_ti[slp->sl_hostid]; /* host id */ if (ti->ti_width > SCSI_LOW_BUS_WIDTH_8) cpi->hba_inquiry |= PI_WIDE_16; if (ti->ti_width > SCSI_LOW_BUS_WIDTH_16) cpi->hba_inquiry |= PI_WIDE_32; if (ti->ti_maxsynch.offset > 0) cpi->hba_inquiry |= PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = slp->sl_ntargs - 1; cpi->max_lun = slp->sl_nluns - 1; cpi->initiator_id = slp->sl_hostid; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "SCSI_LOW", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: printf("scsi_low: non support func_code = %d ", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static int scsi_low_attach_cam(slp) struct scsi_low_softc *slp; { struct cam_devq *devq; int tagged_openings; devq = cam_simq_alloc(SCSI_LOW_NCCB); if (devq == NULL) return (ENOMEM); /* * ask the adapter what subunits are present */ tagged_openings = min(slp->sl_openings, SCSI_LOW_MAXNEXUS); slp->sl_sim = cam_sim_alloc(scsi_low_scsi_action_cam, scsi_low_poll_cam, device_get_name(slp->sl_dev), slp, device_get_unit(slp->sl_dev), &slp->sl_lock, slp->sl_openings, tagged_openings, devq); if (slp->sl_sim == NULL) { cam_simq_free(devq); return ENODEV; } SCSI_LOW_LOCK(slp); if (xpt_bus_register(slp->sl_sim, slp->sl_dev, 0) != CAM_SUCCESS) { cam_sim_free(slp->sl_sim, TRUE); SCSI_LOW_UNLOCK(slp); return ENODEV; } if (xpt_create_path(&slp->sl_path, /*periph*/NULL, cam_sim_path(slp->sl_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(slp->sl_sim)); cam_sim_free(slp->sl_sim, /*free_simq*/TRUE); SCSI_LOW_UNLOCK(slp); return ENODEV; } slp->sl_show_result = SHOW_CALCF_RES; /* OK ? */ SCSI_LOW_UNLOCK(slp); return 0; } static int scsi_low_detach_cam(slp) struct scsi_low_softc *slp; { xpt_async(AC_LOST_DEVICE, slp->sl_path, NULL); xpt_free_path(slp->sl_path); xpt_bus_deregister(cam_sim_path(slp->sl_sim)); cam_sim_free(slp->sl_sim, /* free_devq */ TRUE); return 0; } static int scsi_low_ccb_setup_cam(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { union ccb *ccb = (union ccb *) cb->osdep; if ((cb->ccb_flags & CCB_SCSIIO) != 0) { cb->ccb_scp.scp_cmd = ccb->csio.cdb_io.cdb_bytes; cb->ccb_scp.scp_cmdlen = (int) ccb->csio.cdb_len; cb->ccb_scp.scp_data = ccb->csio.data_ptr; cb->ccb_scp.scp_datalen = (int) ccb->csio.dxfer_len; if((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) cb->ccb_scp.scp_direction = SCSI_LOW_WRITE; else /* if((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) */ cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = ccb->ccb_h.timeout / 1000; } else { scsi_low_unit_ready_cmd(cb); } return SCSI_LOW_START_QTAG; } static int scsi_low_done_cam(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { union ccb *ccb; ccb = (union ccb *) cb->osdep; if (cb->ccb_error == 0) { ccb->ccb_h.status = CAM_REQ_CMP; ccb->csio.resid = 0; } else { if (cb->ccb_rcnt >= slp->sl_max_retry) cb->ccb_error |= ABORTIO; if ((cb->ccb_flags & CCB_NORETRY) == 0 && (cb->ccb_error & ABORTIO) == 0) return EJUSTRETURN; if ((cb->ccb_error & SENSEIO) != 0) { memcpy(&ccb->csio.sense_data, &cb->ccb_sense, sizeof(ccb->csio.sense_data)); } ccb->ccb_h.status = scsi_low_translate_error_code(cb, &scsi_low_error_code_cam[0]); #ifdef SCSI_LOW_DIAGNOSTIC if ((cb->ccb_flags & CCB_SILENT) == 0 && cb->ccb_scp.scp_cmdlen > 0 && (scsi_low_cmd_flags[cb->ccb_scp.scp_cmd[0]] & SCSI_LOW_CMD_ABORT_WARNING) != 0) { device_printf(slp->sl_dev, "WARNING: scsi_low IO abort\n"); scsi_low_print(slp, NULL); } #endif /* SCSI_LOW_DIAGNOSTIC */ } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == 0) ccb->ccb_h.status |= CAM_REQ_CMP_ERR; if (cb->ccb_scp.scp_status == ST_UNKNOWN) ccb->csio.scsi_status = 0; /* XXX */ else ccb->csio.scsi_status = cb->ccb_scp.scp_status; if ((cb->ccb_flags & CCB_NOSDONE) == 0) xpt_done(ccb); return 0; } /************************************************************** * scsi low deactivate and activate **************************************************************/ int scsi_low_is_busy(slp) struct scsi_low_softc *slp; { if (slp->sl_nio > 0) return EBUSY; return 0; } int scsi_low_deactivate(slp) struct scsi_low_softc *slp; { slp->sl_flags |= HW_INACTIVE; callout_stop(&slp->sl_timeout_timer); callout_stop(&slp->sl_engage_timer); return 0; } int scsi_low_activate(slp) struct scsi_low_softc *slp; { int error; slp->sl_flags &= ~HW_INACTIVE; if ((error = scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, NULL)) != 0) { slp->sl_flags |= HW_INACTIVE; return error; } slp->sl_timeout_count = 0; callout_reset(&slp->sl_timeout_timer, hz / SCSI_LOW_TIMEOUT_HZ, scsi_low_timeout, slp); return 0; } /************************************************************** * scsi low log **************************************************************/ #ifdef SCSI_LOW_DIAGNOSTIC static void scsi_low_msg_log_init(struct scsi_low_msg_log *); static void scsi_low_msg_log_write(struct scsi_low_msg_log *, u_int8_t *, int); static void scsi_low_msg_log_show(struct scsi_low_msg_log *, char *, int); static void scsi_low_msg_log_init(slmlp) struct scsi_low_msg_log *slmlp; { slmlp->slml_ptr = 0; } static void scsi_low_msg_log_write(slmlp, datap, len) struct scsi_low_msg_log *slmlp; u_int8_t *datap; int len; { int ptr, ind; if (slmlp->slml_ptr >= SCSI_LOW_MSG_LOG_DATALEN) return; ptr = slmlp->slml_ptr ++; for (ind = 0; ind < sizeof(slmlp->slml_msg[0]) && ind < len; ind ++) slmlp->slml_msg[ptr].msg[ind] = datap[ind]; for ( ; ind < sizeof(slmlp->slml_msg[0]); ind ++) slmlp->slml_msg[ptr].msg[ind] = 0; } static void scsi_low_msg_log_show(slmlp, s, len) struct scsi_low_msg_log *slmlp; char *s; int len; { int ptr, ind; printf("%s: (%d) ", s, slmlp->slml_ptr); for (ptr = 0; ptr < slmlp->slml_ptr; ptr ++) { for (ind = 0; ind < len && ind < sizeof(slmlp->slml_msg[0]); ind ++) { printf("[%x]", (u_int) slmlp->slml_msg[ptr].msg[ind]); } printf(">"); } printf("\n"); } #endif /* SCSI_LOW_DIAGNOSTIC */ /************************************************************** * power control **************************************************************/ static void scsi_low_engage(arg) void *arg; { struct scsi_low_softc *slp = arg; SCSI_LOW_ASSERT_LOCKED(slp); switch (slp->sl_rstep) { case 0: slp->sl_rstep ++; (*slp->sl_funcs->scsi_low_power) (slp, SCSI_LOW_ENGAGE); callout_reset(&slp->sl_engage_timer, hz / 1000, scsi_low_engage, slp); break; case 1: slp->sl_rstep ++; slp->sl_flags &= ~HW_RESUME; scsi_low_start(slp); break; case 2: break; } } static int scsi_low_init(slp, flags) struct scsi_low_softc *slp; u_int flags; { int rv = 0; slp->sl_flags |= HW_INITIALIZING; /* clear power control timeout */ if ((slp->sl_flags & HW_POWERCTRL) != 0) { callout_stop(&slp->sl_engage_timer); slp->sl_flags &= ~(HW_POWDOWN | HW_RESUME); slp->sl_active = 1; slp->sl_powc = SCSI_LOW_POWDOWN_TC; } /* reset current nexus */ scsi_low_reset_nexus(slp, flags); if ((slp->sl_flags & HW_INACTIVE) != 0) { rv = EBUSY; goto out; } if (flags != SCSI_LOW_RESTART_SOFT) { rv = ((*slp->sl_funcs->scsi_low_init) (slp, flags)); } out: slp->sl_flags &= ~HW_INITIALIZING; return rv; } /************************************************************** * allocate lun_info **************************************************************/ static struct lun_info * scsi_low_alloc_li(ti, lun, alloc) struct targ_info *ti; int lun; int alloc; { struct scsi_low_softc *slp = ti->ti_sc; struct lun_info *li; li = LIST_FIRST(&ti->ti_litab); if (li != NULL) { if (li->li_lun == lun) return li; while ((li = LIST_NEXT(li, lun_chain)) != NULL) { if (li->li_lun == lun) { LIST_REMOVE(li, lun_chain); LIST_INSERT_HEAD(&ti->ti_litab, li, lun_chain); return li; } } } if (alloc == 0) return li; li = SCSI_LOW_MALLOC(ti->ti_lunsize); if (li == NULL) panic("no lun info mem"); bzero(li, ti->ti_lunsize); li->li_lun = lun; li->li_ti = ti; li->li_cfgflags = SCSI_LOW_SYNC | SCSI_LOW_LINK | SCSI_LOW_DISC | SCSI_LOW_QTAG; li->li_quirks = li->li_diskflags = SCSI_LOW_DISK_LFLAGS; li->li_flags_valid = SCSI_LOW_LUN_FLAGS_USER_VALID; #ifdef SCSI_LOW_FLAGS_QUIRKS_OK li->li_flags_valid |= SCSI_LOW_LUN_FLAGS_QUIRKS_VALID; #endif /* SCSI_LOW_FLAGS_QUIRKS_OK */ li->li_qtagbits = (u_int) -1; TAILQ_INIT(&li->li_discq); LIST_INSERT_HEAD(&ti->ti_litab, li, lun_chain); /* host specific structure initialization per lun */ if (slp->sl_funcs->scsi_low_lun_init != NULL) (*slp->sl_funcs->scsi_low_lun_init) (slp, ti, li, SCSI_LOW_INFO_ALLOC); scsi_low_calcf_lun(li); return li; } /************************************************************** * allocate targ_info **************************************************************/ static struct targ_info * scsi_low_alloc_ti(slp, targ) struct scsi_low_softc *slp; int targ; { struct targ_info *ti; if (TAILQ_FIRST(&slp->sl_titab) == NULL) TAILQ_INIT(&slp->sl_titab); ti = SCSI_LOW_MALLOC(slp->sl_targsize); if (ti == NULL) panic("%s short of memory", device_get_nameunit(slp->sl_dev)); bzero(ti, slp->sl_targsize); ti->ti_id = targ; ti->ti_sc = slp; slp->sl_ti[targ] = ti; TAILQ_INSERT_TAIL(&slp->sl_titab, ti, ti_chain); LIST_INIT(&ti->ti_litab); ti->ti_quirks = ti->ti_diskflags = SCSI_LOW_DISK_TFLAGS; ti->ti_owidth = SCSI_LOW_BUS_WIDTH_8; ti->ti_flags_valid = SCSI_LOW_TARG_FLAGS_USER_VALID; #ifdef SCSI_LOW_FLAGS_QUIRKS_OK ti->ti_flags_valid |= SCSI_LOW_TARG_FLAGS_QUIRKS_VALID; #endif /* SCSI_LOW_FLAGS_QUIRKS_OK */ if (slp->sl_funcs->scsi_low_targ_init != NULL) { (*slp->sl_funcs->scsi_low_targ_init) (slp, ti, SCSI_LOW_INFO_ALLOC); } scsi_low_calcf_target(ti); return ti; } static void scsi_low_free_ti(slp) struct scsi_low_softc *slp; { struct targ_info *ti, *tib; struct lun_info *li, *nli; for (ti = TAILQ_FIRST(&slp->sl_titab); ti; ti = tib) { for (li = LIST_FIRST(&ti->ti_litab); li != NULL; li = nli) { if (slp->sl_funcs->scsi_low_lun_init != NULL) { (*slp->sl_funcs->scsi_low_lun_init) (slp, ti, li, SCSI_LOW_INFO_DEALLOC); } nli = LIST_NEXT(li, lun_chain); SCSI_LOW_FREE(li); } if (slp->sl_funcs->scsi_low_targ_init != NULL) { (*slp->sl_funcs->scsi_low_targ_init) (slp, ti, SCSI_LOW_INFO_DEALLOC); } tib = TAILQ_NEXT(ti, ti_chain); SCSI_LOW_FREE(ti); } } /************************************************************** * timeout **************************************************************/ void scsi_low_bus_idle(slp) struct scsi_low_softc *slp; { slp->sl_retry_sel = 0; if (slp->sl_Tnexus == NULL) scsi_low_start(slp); } static void scsi_low_timeout(arg) void *arg; { struct scsi_low_softc *slp = arg; SCSI_LOW_ASSERT_LOCKED(slp); (void) scsi_low_timeout_check(slp); callout_schedule(&slp->sl_timeout_timer, hz / SCSI_LOW_TIMEOUT_HZ); } static int scsi_low_timeout_check(slp) struct scsi_low_softc *slp; { struct targ_info *ti; struct lun_info *li; struct slccb *cb = NULL; /* XXX */ /* selection restart */ if (slp->sl_retry_sel != 0) { slp->sl_retry_sel = 0; if (slp->sl_Tnexus != NULL) goto step1; cb = TAILQ_FIRST(&slp->sl_start); if (cb == NULL) goto step1; if (cb->ccb_selrcnt >= SCSI_LOW_MAX_SELECTION_RETRY) { cb->ccb_flags |= CCB_NORETRY; cb->ccb_error |= SELTIMEOUTIO; if (scsi_low_revoke_ccb(slp, cb, 1) != NULL) panic("%s: ccb not finished", device_get_nameunit(slp->sl_dev)); } if (slp->sl_Tnexus == NULL) scsi_low_start(slp); } /* call hardware timeout */ step1: if (slp->sl_funcs->scsi_low_timeout != NULL) { (*slp->sl_funcs->scsi_low_timeout) (slp); } if (slp->sl_timeout_count ++ < SCSI_LOW_TIMEOUT_CHECK_INTERVAL * SCSI_LOW_TIMEOUT_HZ) return 0; slp->sl_timeout_count = 0; if (slp->sl_nio > 0) { if ((cb = slp->sl_Qnexus) != NULL) { cb->ccb_tc -= SCSI_LOW_TIMEOUT_CHECK_INTERVAL; if (cb->ccb_tc < 0) goto bus_reset; } else if (slp->sl_disc == 0) { if ((cb = TAILQ_FIRST(&slp->sl_start)) == NULL) return 0; cb->ccb_tc -= SCSI_LOW_TIMEOUT_CHECK_INTERVAL; if (cb->ccb_tc < 0) goto bus_reset; } else for (ti = TAILQ_FIRST(&slp->sl_titab); ti != NULL; ti = TAILQ_NEXT(ti, ti_chain)) { if (ti->ti_disc == 0) continue; for (li = LIST_FIRST(&ti->ti_litab); li != NULL; li = LIST_NEXT(li, lun_chain)) { for (cb = TAILQ_FIRST(&li->li_discq); cb != NULL; cb = TAILQ_NEXT(cb, ccb_chain)) { cb->ccb_tc -= SCSI_LOW_TIMEOUT_CHECK_INTERVAL; if (cb->ccb_tc < 0) goto bus_reset; } } } } else if ((slp->sl_flags & HW_POWERCTRL) != 0) { if ((slp->sl_flags & (HW_POWDOWN | HW_RESUME)) != 0) return 0; if (slp->sl_active != 0) { slp->sl_powc = SCSI_LOW_POWDOWN_TC; slp->sl_active = 0; return 0; } slp->sl_powc --; if (slp->sl_powc < 0) { slp->sl_powc = SCSI_LOW_POWDOWN_TC; slp->sl_flags |= HW_POWDOWN; (*slp->sl_funcs->scsi_low_power) (slp, SCSI_LOW_POWDOWN); } } return 0; bus_reset: cb->ccb_error |= TIMEOUTIO; device_printf(slp->sl_dev, "slccb (0x%lx) timeout!\n", (u_long) cb); scsi_low_info(slp, NULL, "scsi bus hangup. try to recover."); scsi_low_init(slp, SCSI_LOW_RESTART_HARD); scsi_low_start(slp); return ERESTART; } static int scsi_low_abort_ccb(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { struct targ_info *ti; struct lun_info *li; u_int msg; if (cb == NULL) return EINVAL; if ((cb->ccb_omsgoutflag & (SCSI_LOW_MSG_ABORT | SCSI_LOW_MSG_ABORT_QTAG)) != 0) return EBUSY; ti = cb->ti; li = cb->li; if (cb->ccb_tag == SCSI_LOW_UNKTAG) msg = SCSI_LOW_MSG_ABORT; else msg = SCSI_LOW_MSG_ABORT_QTAG; cb->ccb_error |= ABORTIO; cb->ccb_flags |= CCB_NORETRY; scsi_low_ccb_message_assert(cb, msg); if (cb == slp->sl_Qnexus) { scsi_low_assert_msg(slp, ti, msg, 1); } else if ((cb->ccb_flags & CCB_DISCQ) != 0) { if (scsi_low_revoke_ccb(slp, cb, 0) == NULL) panic("%s: revoked ccb done", device_get_nameunit(slp->sl_dev)); cb->ccb_flags |= CCB_STARTQ; TAILQ_INSERT_HEAD(&slp->sl_start, cb, ccb_chain); if (slp->sl_Tnexus == NULL) scsi_low_start(slp); } else { if (scsi_low_revoke_ccb(slp, cb, 1) != NULL) panic("%s: revoked ccb retried", device_get_nameunit(slp->sl_dev)); } return 0; } /************************************************************** * Generic SCSI INTERFACE **************************************************************/ int scsi_low_attach(slp, openings, ntargs, nluns, targsize, lunsize) struct scsi_low_softc *slp; int openings, ntargs, nluns, targsize, lunsize; { struct targ_info *ti; struct lun_info *li; int i, nccb, rv; if (ntargs > SCSI_LOW_NTARGETS) { printf("scsi_low: %d targets are too large\n", ntargs); printf("change kernel options SCSI_LOW_NTARGETS"); return EINVAL; } if (openings <= 0) slp->sl_openings = (SCSI_LOW_NCCB / ntargs); else slp->sl_openings = openings; slp->sl_ntargs = ntargs; slp->sl_nluns = nluns; slp->sl_max_retry = SCSI_LOW_MAX_RETRY; if (lunsize < sizeof(struct lun_info)) lunsize = sizeof(struct lun_info); if (targsize < sizeof(struct targ_info)) targsize = sizeof(struct targ_info); slp->sl_targsize = targsize; for (i = 0; i < ntargs; i ++) { ti = scsi_low_alloc_ti(slp, i); ti->ti_lunsize = lunsize; li = scsi_low_alloc_li(ti, 0, 1); } /* initialize queue */ nccb = openings * ntargs; if (nccb >= SCSI_LOW_NCCB || nccb <= 0) nccb = SCSI_LOW_NCCB; scsi_low_init_ccbque(nccb); TAILQ_INIT(&slp->sl_start); /* call os depend attach */ rv = scsi_low_attach_cam(slp); if (rv != 0) { device_printf(slp->sl_dev, "scsi_low_attach: osdep attach failed\n"); return (rv); } /* check hardware */ DELAY(1000); /* wait for 1ms */ SCSI_LOW_LOCK(slp); if (scsi_low_init(slp, SCSI_LOW_RESTART_HARD) != 0) { device_printf(slp->sl_dev, "scsi_low_attach: initialization failed\n"); SCSI_LOW_UNLOCK(slp); return EINVAL; } /* start watch dog */ slp->sl_timeout_count = 0; callout_reset(&slp->sl_timeout_timer, hz / SCSI_LOW_TIMEOUT_HZ, scsi_low_timeout, slp); mtx_lock(&sl_tab_lock); LIST_INSERT_HEAD(&sl_tab, slp, sl_chain); mtx_unlock(&sl_tab_lock); /* fake call */ scsi_low_abort_ccb(slp, scsi_low_find_ccb(slp, 0, 0, NULL)); #ifdef SCSI_LOW_START_UP_CHECK /* probing devices */ scsi_low_start_up(slp); #endif /* SCSI_LOW_START_UP_CHECK */ SCSI_LOW_UNLOCK(slp); return 0; } int scsi_low_detach(slp) struct scsi_low_softc *slp; { int rv; SCSI_LOW_LOCK(slp); if (scsi_low_is_busy(slp) != 0) { SCSI_LOW_UNLOCK(slp); return EBUSY; } scsi_low_deactivate(slp); rv = scsi_low_detach_cam(slp); if (rv != 0) { SCSI_LOW_UNLOCK(slp); return EBUSY; } scsi_low_free_ti(slp); SCSI_LOW_UNLOCK(slp); callout_drain(&slp->sl_timeout_timer); callout_drain(&slp->sl_engage_timer); mtx_lock(&sl_tab_lock); LIST_REMOVE(slp, sl_chain); mtx_unlock(&sl_tab_lock); return 0; } /************************************************************** * Generic enqueue **************************************************************/ static int scsi_low_enqueue(slp, ti, li, cb, flags, msg) struct scsi_low_softc *slp; struct targ_info *ti; struct lun_info *li; struct slccb *cb; u_int flags, msg; { cb->ti = ti; cb->li = li; scsi_low_ccb_message_assert(cb, msg); cb->ccb_otag = cb->ccb_tag = SCSI_LOW_UNKTAG; scsi_low_alloc_qtag(cb); cb->ccb_flags = flags | CCB_STARTQ; cb->ccb_tc = cb->ccb_tcmax = SCSI_LOW_MIN_TOUT; cb->ccb_error |= PENDINGIO; if ((flags & CCB_URGENT) != 0) { TAILQ_INSERT_HEAD(&slp->sl_start, cb, ccb_chain); } else { TAILQ_INSERT_TAIL(&slp->sl_start, cb, ccb_chain); } slp->sl_nio ++; if (slp->sl_Tnexus == NULL) scsi_low_start(slp); return 0; } static int scsi_low_message_enqueue(slp, ti, li, flags) struct scsi_low_softc *slp; struct targ_info *ti; struct lun_info *li; u_int flags; { struct slccb *cb; u_int tmsgflags; tmsgflags = ti->ti_setup_msg; ti->ti_setup_msg = 0; flags |= CCB_NORETRY; if ((cb = SCSI_LOW_ALLOC_CCB(1)) == NULL) return ENOMEM; cb->osdep = NULL; cb->bp = NULL; scsi_low_enqueue(slp, ti, li, cb, flags, tmsgflags); return 0; } /************************************************************** * Generic Start & Done **************************************************************/ #define SLSC_MODE_SENSE_SHORT 0x1a static u_int8_t ss_cmd[6] = {START_STOP, 0, 0, 0, SSS_START, 0}; static u_int8_t sms_cmd[6] = {SLSC_MODE_SENSE_SHORT, 0x08, 0x0a, 0, sizeof(struct scsi_low_mode_sense_data), 0}; static u_int8_t inq_cmd[6] = {INQUIRY, 0, 0, 0, sizeof(struct scsi_low_inq_data), 0}; static u_int8_t unit_ready_cmd[6]; static int scsi_low_setup_start(struct scsi_low_softc *, struct targ_info *, struct lun_info *, struct slccb *); static int scsi_low_sense_abort_start(struct scsi_low_softc *, struct targ_info *, struct lun_info *, struct slccb *); static int scsi_low_resume(struct scsi_low_softc *); static void scsi_low_unit_ready_cmd(cb) struct slccb *cb; { cb->ccb_scp.scp_cmd = unit_ready_cmd; cb->ccb_scp.scp_cmdlen = sizeof(unit_ready_cmd); cb->ccb_scp.scp_datalen = 0; cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = 15; } static int scsi_low_sense_abort_start(slp, ti, li, cb) struct scsi_low_softc *slp; struct targ_info *ti; struct lun_info *li; struct slccb *cb; { cb->ccb_scp.scp_cmdlen = 6; bzero(cb->ccb_scsi_cmd, cb->ccb_scp.scp_cmdlen); cb->ccb_scsi_cmd[0] = REQUEST_SENSE; cb->ccb_scsi_cmd[4] = sizeof(cb->ccb_sense); cb->ccb_scp.scp_cmd = cb->ccb_scsi_cmd; cb->ccb_scp.scp_data = (u_int8_t *) &cb->ccb_sense; cb->ccb_scp.scp_datalen = sizeof(cb->ccb_sense); cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = 15; scsi_low_ccb_message_clear(cb); if ((cb->ccb_flags & CCB_CLEARQ) != 0) { scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); } else { bzero(&cb->ccb_sense, sizeof(cb->ccb_sense)); #ifdef SCSI_LOW_NEGOTIATE_BEFORE_SENSE scsi_low_assert_msg(slp, ti, ti->ti_setup_msg_done, 0); #endif /* SCSI_LOW_NEGOTIATE_BEFORE_SENSE */ } return SCSI_LOW_START_NO_QTAG; } static int scsi_low_setup_start(slp, ti, li, cb) struct scsi_low_softc *slp; struct targ_info *ti; struct lun_info *li; struct slccb *cb; { switch(li->li_state) { case SCSI_LOW_LUN_SLEEP: scsi_low_unit_ready_cmd(cb); break; case SCSI_LOW_LUN_START: cb->ccb_scp.scp_cmd = ss_cmd; cb->ccb_scp.scp_cmdlen = sizeof(ss_cmd); cb->ccb_scp.scp_datalen = 0; cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = 30; break; case SCSI_LOW_LUN_INQ: cb->ccb_scp.scp_cmd = inq_cmd; cb->ccb_scp.scp_cmdlen = sizeof(inq_cmd); cb->ccb_scp.scp_data = (u_int8_t *)&li->li_inq; cb->ccb_scp.scp_datalen = sizeof(li->li_inq); cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = 15; break; case SCSI_LOW_LUN_MODEQ: cb->ccb_scp.scp_cmd = sms_cmd; cb->ccb_scp.scp_cmdlen = sizeof(sms_cmd); cb->ccb_scp.scp_data = (u_int8_t *)&li->li_sms; cb->ccb_scp.scp_datalen = sizeof(li->li_sms); cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = 15; return SCSI_LOW_START_QTAG; default: panic("%s: no setup phase", device_get_nameunit(slp->sl_dev)); } return SCSI_LOW_START_NO_QTAG; } static int scsi_low_resume(slp) struct scsi_low_softc *slp; { if (slp->sl_flags & HW_RESUME) return EJUSTRETURN; slp->sl_flags &= ~HW_POWDOWN; if (slp->sl_funcs->scsi_low_power != NULL) { slp->sl_flags |= HW_RESUME; slp->sl_rstep = 0; (*slp->sl_funcs->scsi_low_power) (slp, SCSI_LOW_ENGAGE); callout_reset(&slp->sl_engage_timer, hz / 1000, scsi_low_engage, slp); return EJUSTRETURN; } return 0; } static void scsi_low_start(slp) struct scsi_low_softc *slp; { struct targ_info *ti; struct lun_info *li; struct slccb *cb; int rv; /* check hardware exists or under initializations ? */ if ((slp->sl_flags & (HW_INACTIVE | HW_INITIALIZING)) != 0) return; /* check hardware power up ? */ if ((slp->sl_flags & HW_POWERCTRL) != 0) { slp->sl_active ++; if (slp->sl_flags & (HW_POWDOWN | HW_RESUME)) { if (scsi_low_resume(slp) == EJUSTRETURN) return; } } /* setup nexus */ #ifdef SCSI_LOW_DIAGNOSTIC if (slp->sl_Tnexus || slp->sl_Lnexus || slp->sl_Qnexus) { scsi_low_info(slp, NULL, "NEXUS INCOSISTENT"); panic("%s: inconsistent", device_get_nameunit(slp->sl_dev)); } #endif /* SCSI_LOW_DIAGNOSTIC */ for (cb = TAILQ_FIRST(&slp->sl_start); cb != NULL; cb = TAILQ_NEXT(cb, ccb_chain)) { li = cb->li; if (li->li_disc == 0) { goto scsi_low_cmd_start; } else if (li->li_nqio > 0) { if (li->li_nqio < li->li_maxnqio || (cb->ccb_flags & (CCB_SENSE | CCB_CLEARQ)) != 0) goto scsi_low_cmd_start; } } return; scsi_low_cmd_start: cb->ccb_flags &= ~CCB_STARTQ; TAILQ_REMOVE(&slp->sl_start, cb, ccb_chain); ti = cb->ti; /* clear all error flag bits (for restart) */ cb->ccb_error = 0; cb->ccb_datalen = -1; cb->ccb_scp.scp_status = ST_UNKNOWN; /* setup nexus pointer */ slp->sl_Qnexus = cb; slp->sl_Lnexus = li; slp->sl_Tnexus = ti; /* initialize msgsys */ scsi_low_init_msgsys(slp, ti); /* exec cmd */ if ((cb->ccb_flags & (CCB_SENSE | CCB_CLEARQ)) != 0) { /* CA state or forced abort */ rv = scsi_low_sense_abort_start(slp, ti, li, cb); } else if (li->li_state >= SCSI_LOW_LUN_OK) { cb->ccb_flags &= ~CCB_INTERNAL; rv = scsi_low_ccb_setup_cam(slp, cb); if (cb->ccb_msgoutflag != 0) { scsi_low_ccb_message_exec(slp, cb); } } else { cb->ccb_flags |= CCB_INTERNAL; rv = scsi_low_setup_start(slp, ti, li, cb); } /* allocate qtag */ #define SCSI_LOW_QTAG_OK (SCSI_LOW_QTAG | SCSI_LOW_DISC) if (rv == SCSI_LOW_START_QTAG && (li->li_flags & SCSI_LOW_QTAG_OK) == SCSI_LOW_QTAG_OK && li->li_maxnqio > 0) { u_int qmsg; scsi_low_activate_qtag(cb); if ((scsi_low_cmd_flags[cb->ccb_scp.scp_cmd[0]] & SCSI_LOW_CMD_ORDERED_QTAG) != 0) qmsg = SCSI_LOW_MSG_ORDERED_QTAG; else if ((cb->ccb_flags & CCB_URGENT) != 0) qmsg = SCSI_LOW_MSG_HEAD_QTAG; else qmsg = SCSI_LOW_MSG_SIMPLE_QTAG; scsi_low_assert_msg(slp, ti, qmsg, 0); } /* timeout */ if (cb->ccb_tcmax < SCSI_LOW_MIN_TOUT) cb->ccb_tcmax = SCSI_LOW_MIN_TOUT; cb->ccb_tc = cb->ccb_tcmax; /* setup saved scsi data pointer */ cb->ccb_sscp = cb->ccb_scp; /* setup current scsi pointer */ slp->sl_scp = cb->ccb_sscp; slp->sl_error = cb->ccb_error; /* assert always an identify msg */ scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_IDENTIFY, 0); /* debug section */ #ifdef SCSI_LOW_DIAGNOSTIC scsi_low_msg_log_init(&ti->ti_log_msgin); scsi_low_msg_log_init(&ti->ti_log_msgout); #endif /* SCSI_LOW_DIAGNOSTIC */ /* selection start */ slp->sl_selid = cb; rv = ((*slp->sl_funcs->scsi_low_start_bus) (slp, cb)); if (rv == SCSI_LOW_START_OK) { #ifdef SCSI_LOW_STATICS scsi_low_statics.nexus_win ++; #endif /* SCSI_LOW_STATICS */ return; } scsi_low_arbit_fail(slp, cb); #ifdef SCSI_LOW_STATICS scsi_low_statics.nexus_fail ++; #endif /* SCSI_LOW_STATICS */ } void scsi_low_arbit_fail(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { struct targ_info *ti = cb->ti; scsi_low_deactivate_qtag(cb); scsi_low_ccb_message_retry(cb); cb->ccb_flags |= CCB_STARTQ; TAILQ_INSERT_HEAD(&slp->sl_start, cb, ccb_chain); scsi_low_bus_release(slp, ti); cb->ccb_selrcnt ++; if (slp->sl_disc == 0) { #ifdef SCSI_LOW_DIAGNOSTIC device_printf(slp->sl_dev, "try selection again\n"); #endif /* SCSI_LOW_DIAGNOSTIC */ slp->sl_retry_sel = 1; } } static void scsi_low_bus_release(slp, ti) struct scsi_low_softc *slp; struct targ_info *ti; { if (ti->ti_disc > 0) { SCSI_LOW_SETUP_PHASE(ti, PH_DISC); } else { SCSI_LOW_SETUP_PHASE(ti, PH_NULL); } /* clear all nexus pointer */ slp->sl_Qnexus = NULL; slp->sl_Lnexus = NULL; slp->sl_Tnexus = NULL; /* clear selection assert */ slp->sl_selid = NULL; /* clear nexus data */ slp->sl_scp.scp_direction = SCSI_LOW_RWUNK; /* clear phase change counter */ slp->sl_ph_count = 0; } static int scsi_low_setup_done(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { struct targ_info *ti; struct lun_info *li; ti = cb->ti; li = cb->li; if (cb->ccb_rcnt >= slp->sl_max_retry) { cb->ccb_error |= ABORTIO; return SCSI_LOW_DONE_COMPLETE; } /* XXX: special huck for selection timeout */ if (li->li_state == SCSI_LOW_LUN_SLEEP && (cb->ccb_error & SELTIMEOUTIO) != 0) { cb->ccb_error |= ABORTIO; return SCSI_LOW_DONE_COMPLETE; } switch(li->li_state) { case SCSI_LOW_LUN_INQ: if (cb->ccb_error != 0) { li->li_diskflags &= ~(SCSI_LOW_DISK_LINK | SCSI_LOW_DISK_QTAG); if (li->li_lun > 0) goto resume; ti->ti_diskflags &= ~(SCSI_LOW_DISK_SYNC | SCSI_LOW_DISK_WIDE); } else if ((li->li_inq.sd_version & 7) >= 2 || (li->li_inq.sd_len >= 4)) { if ((li->li_inq.sd_support & 0x2) == 0) li->li_diskflags &= ~SCSI_LOW_DISK_QTAG; if ((li->li_inq.sd_support & 0x8) == 0) li->li_diskflags &= ~SCSI_LOW_DISK_LINK; if (li->li_lun > 0) goto resume; if ((li->li_inq.sd_support & 0x10) == 0) ti->ti_diskflags &= ~SCSI_LOW_DISK_SYNC; if ((li->li_inq.sd_support & 0x20) == 0) ti->ti_diskflags &= ~SCSI_LOW_DISK_WIDE_16; if ((li->li_inq.sd_support & 0x40) == 0) ti->ti_diskflags &= ~SCSI_LOW_DISK_WIDE_32; } else { li->li_diskflags &= ~(SCSI_LOW_DISK_QTAG | SCSI_LOW_DISK_LINK); if (li->li_lun > 0) goto resume; ti->ti_diskflags &= ~SCSI_LOW_DISK_WIDE; } ti->ti_flags_valid |= SCSI_LOW_TARG_FLAGS_DISK_VALID; resume: scsi_low_calcf_target(ti); scsi_low_calcf_lun(li); break; case SCSI_LOW_LUN_MODEQ: if (cb->ccb_error != 0) { if (cb->ccb_error & SENSEIO) { #ifdef SCSI_LOW_DEBUG if (scsi_low_debug & SCSI_LOW_DEBUG_SENSE) { int error_code, sense_key, asc, ascq; scsi_extract_sense(&cb->ccb_sense, &error_code, &sense_key, &asc, &ascq); printf("SENSE: [%x][%x][%x][%x]\n", error_code, sense_key, asc, ascq); } #endif /* SCSI_LOW_DEBUG */ } else { li->li_diskflags &= ~SCSI_LOW_DISK_QTAG; } } else if ((li->li_sms.sms_cmp.cmp_page & 0x3f) == 0x0a) { if (li->li_sms.sms_cmp.cmp_qc & 0x02) li->li_qflags |= SCSI_LOW_QFLAG_CA_QCLEAR; else li->li_qflags &= ~SCSI_LOW_QFLAG_CA_QCLEAR; if ((li->li_sms.sms_cmp.cmp_qc & 0x01) != 0) li->li_diskflags &= ~SCSI_LOW_DISK_QTAG; } li->li_flags_valid |= SCSI_LOW_LUN_FLAGS_DISK_VALID; scsi_low_calcf_lun(li); break; default: break; } li->li_state ++; if (li->li_state == SCSI_LOW_LUN_OK) { scsi_low_calcf_target(ti); scsi_low_calcf_lun(li); if (li->li_flags_valid == SCSI_LOW_LUN_FLAGS_ALL_VALID && (slp->sl_show_result & SHOW_CALCF_RES) != 0) { scsi_low_calcf_show(li); } } cb->ccb_rcnt --; return SCSI_LOW_DONE_RETRY; } static int scsi_low_done(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { int rv; if (cb->ccb_error == 0) { if ((cb->ccb_flags & (CCB_SENSE | CCB_CLEARQ)) != 0) { #ifdef SCSI_LOW_QCLEAR_AFTER_CA /* XXX: * SCSI-2 draft suggests * page 0x0a QErr bit determins if * the target aborts or continues * the queueing io's after CA state resolved. * However many targets seem not to support * the page 0x0a. Thus we should manually clear the * queuing io's after CA state. */ if ((cb->ccb_flags & CCB_CLEARQ) == 0) { cb->ccb_rcnt --; cb->ccb_flags |= CCB_CLEARQ; goto retry; } #endif /* SCSI_LOW_QCLEAR_AFTER_CA */ if ((cb->ccb_flags & CCB_SENSE) != 0) cb->ccb_error |= (SENSEIO | ABORTIO); cb->ccb_flags &= ~(CCB_SENSE | CCB_CLEARQ); } else switch (cb->ccb_sscp.scp_status) { case ST_GOOD: case ST_MET: case ST_INTERGOOD: case ST_INTERMET: if (cb->ccb_datalen == 0 || cb->ccb_scp.scp_datalen == 0) break; if (cb->ccb_scp.scp_cmdlen > 0 && (scsi_low_cmd_flags[cb->ccb_scp.scp_cmd[0]] & SCSI_LOW_CMD_RESIDUAL_CHK) == 0) break; cb->ccb_error |= PDMAERR; break; case ST_BUSY: case ST_QUEFULL: cb->ccb_error |= (BUSYERR | STATERR); break; case ST_CONFLICT: cb->ccb_error |= (STATERR | ABORTIO); break; case ST_CHKCOND: case ST_CMDTERM: if (cb->ccb_flags & (CCB_AUTOSENSE | CCB_INTERNAL)) { cb->ccb_rcnt --; cb->ccb_flags |= CCB_SENSE; goto retry; } cb->ccb_error |= (UACAERR | STATERR | ABORTIO); break; case ST_UNKNOWN: default: cb->ccb_error |= FATALIO; break; } } else { if (cb->ccb_flags & CCB_SENSE) { cb->ccb_error |= (SENSEERR | ABORTIO); } cb->ccb_flags &= ~(CCB_CLEARQ | CCB_SENSE); } /* internal ccb */ if ((cb->ccb_flags & CCB_INTERNAL) != 0) { if (scsi_low_setup_done(slp, cb) == SCSI_LOW_DONE_RETRY) goto retry; } /* check a ccb msgout flag */ if (cb->ccb_omsgoutflag != 0) { #define SCSI_LOW_MSG_ABORT_OK (SCSI_LOW_MSG_ABORT | \ SCSI_LOW_MSG_ABORT_QTAG | \ SCSI_LOW_MSG_CLEAR_QTAG | \ SCSI_LOW_MSG_TERMIO) if ((cb->ccb_omsgoutflag & SCSI_LOW_MSG_ABORT_OK) != 0) { cb->ccb_error |= ABORTIO; } } /* call OS depend done */ if (cb->osdep != NULL) { rv = scsi_low_done_cam(slp, cb); if (rv == EJUSTRETURN) goto retry; } else if (cb->ccb_error != 0) { if (cb->ccb_rcnt >= slp->sl_max_retry) cb->ccb_error |= ABORTIO; if ((cb->ccb_flags & CCB_NORETRY) == 0 && (cb->ccb_error & ABORTIO) == 0) goto retry; } /* free our target */ #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_DONE, cb->ti->ti_id) != 0) { printf(">> SCSI_LOW_DONE_COMPLETE ===============\n"); scsi_low_print(slp, NULL); } #endif /* SCSI_LOW_DEBUG */ scsi_low_deactivate_qtag(cb); scsi_low_dealloc_qtag(cb); scsi_low_free_ccb(cb); slp->sl_nio --; return SCSI_LOW_DONE_COMPLETE; retry: #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_DONE, cb->ti->ti_id) != 0) { printf("** SCSI_LOW_DONE_RETRY ===============\n"); scsi_low_print(slp, NULL); } #endif /* SCSI_LOW_DEBUG */ cb->ccb_rcnt ++; scsi_low_deactivate_qtag(cb); scsi_low_ccb_message_retry(cb); return SCSI_LOW_DONE_RETRY; } /************************************************************** * Reset **************************************************************/ static void scsi_low_reset_nexus_target(slp, ti, fdone) struct scsi_low_softc *slp; struct targ_info *ti; int fdone; { struct lun_info *li; for (li = LIST_FIRST(&ti->ti_litab); li != NULL; li = LIST_NEXT(li, lun_chain)) { scsi_low_reset_nexus_lun(slp, li, fdone); li->li_state = SCSI_LOW_LUN_SLEEP; li->li_maxnqio = 0; } ti->ti_disc = 0; ti->ti_setup_msg = 0; ti->ti_setup_msg_done = 0; ti->ti_osynch.offset = ti->ti_osynch.period = 0; ti->ti_owidth = SCSI_LOW_BUS_WIDTH_8; ti->ti_diskflags = SCSI_LOW_DISK_TFLAGS; ti->ti_flags_valid &= ~SCSI_LOW_TARG_FLAGS_DISK_VALID; if (slp->sl_funcs->scsi_low_targ_init != NULL) { ((*slp->sl_funcs->scsi_low_targ_init) (slp, ti, SCSI_LOW_INFO_REVOKE)); } scsi_low_calcf_target(ti); for (li = LIST_FIRST(&ti->ti_litab); li != NULL; li = LIST_NEXT(li, lun_chain)) { li->li_flags = 0; li->li_diskflags = SCSI_LOW_DISK_LFLAGS; li->li_flags_valid &= ~SCSI_LOW_LUN_FLAGS_DISK_VALID; if (slp->sl_funcs->scsi_low_lun_init != NULL) { ((*slp->sl_funcs->scsi_low_lun_init) (slp, ti, li, SCSI_LOW_INFO_REVOKE)); } scsi_low_calcf_lun(li); } } static void scsi_low_reset_nexus(slp, fdone) struct scsi_low_softc *slp; int fdone; { struct targ_info *ti; struct slccb *cb, *topcb; if ((cb = slp->sl_Qnexus) != NULL) { topcb = scsi_low_revoke_ccb(slp, cb, fdone); } else { topcb = NULL; } for (ti = TAILQ_FIRST(&slp->sl_titab); ti != NULL; ti = TAILQ_NEXT(ti, ti_chain)) { scsi_low_reset_nexus_target(slp, ti, fdone); scsi_low_bus_release(slp, ti); scsi_low_init_msgsys(slp, ti); } if (topcb != NULL) { topcb->ccb_flags |= CCB_STARTQ; TAILQ_INSERT_HEAD(&slp->sl_start, topcb, ccb_chain); } slp->sl_disc = 0; slp->sl_retry_sel = 0; slp->sl_flags &= ~HW_PDMASTART; } /* misc */ static int tw_pos; static char tw_chars[] = "|/-\\"; #define TWIDDLEWAIT 10000 static void scsi_low_twiddle_wait(void) { cnputc('\b'); cnputc(tw_chars[tw_pos++]); tw_pos %= (sizeof(tw_chars) - 1); DELAY(TWIDDLEWAIT); } void scsi_low_bus_reset(slp) struct scsi_low_softc *slp; { int i; (*slp->sl_funcs->scsi_low_bus_reset) (slp); device_printf(slp->sl_dev, "try to reset scsi bus "); for (i = 0; i <= SCSI2_RESET_DELAY / TWIDDLEWAIT ; i++) scsi_low_twiddle_wait(); cnputc('\b'); printf("\n"); } int scsi_low_restart(slp, flags, s) struct scsi_low_softc *slp; int flags; u_char *s; { int error; if (s != NULL) device_printf(slp->sl_dev, "scsi bus restart. reason: %s\n", s); if ((error = scsi_low_init(slp, flags)) != 0) return error; scsi_low_start(slp); return 0; } /************************************************************** * disconnect and reselect **************************************************************/ #define MSGCMD_LUN(msg) (msg & 0x07) static struct slccb * scsi_low_establish_ccb(ti, li, tag) struct targ_info *ti; struct lun_info *li; scsi_low_tag_t tag; { struct scsi_low_softc *slp = ti->ti_sc; struct slccb *cb; if (li == NULL) return NULL; cb = TAILQ_FIRST(&li->li_discq); for ( ; cb != NULL; cb = TAILQ_NEXT(cb, ccb_chain)) if (cb->ccb_tag == tag) goto found; return cb; /* * establish our ccb nexus */ found: #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_NEXUS_CHECK, ti->ti_id) != 0) { device_printf(slp->sl_dev, "nexus(0x%lx) abort check start\n", (u_long) cb); cb->ccb_flags |= (CCB_NORETRY | CCB_SILENT); scsi_low_revoke_ccb(slp, cb, 1); return NULL; } if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_ATTEN_CHECK, ti->ti_id) != 0) { if (cb->ccb_omsgoutflag == 0) scsi_low_ccb_message_assert(cb, SCSI_LOW_MSG_NOOP); } #endif /* SCSI_LOW_DEBUG */ TAILQ_REMOVE(&li->li_discq, cb, ccb_chain); cb->ccb_flags &= ~CCB_DISCQ; slp->sl_Qnexus = cb; slp->sl_scp = cb->ccb_sscp; slp->sl_error |= cb->ccb_error; slp->sl_disc --; ti->ti_disc --; li->li_disc --; /* inform "ccb nexus established" to the host driver */ (*slp->sl_funcs->scsi_low_establish_ccb_nexus) (slp); /* check msg */ if (cb->ccb_msgoutflag != 0) { scsi_low_ccb_message_exec(slp, cb); } return cb; } struct targ_info * scsi_low_reselected(slp, targ) struct scsi_low_softc *slp; u_int targ; { struct targ_info *ti; struct slccb *cb; u_char *s; /* * Check select vs reselected collision. */ if ((cb = slp->sl_selid) != NULL) { scsi_low_arbit_fail(slp, cb); #ifdef SCSI_LOW_STATICS scsi_low_statics.nexus_conflict ++; #endif /* SCSI_LOW_STATICS */ } /* * Check if no current active nexus. */ if (slp->sl_Tnexus != NULL) { s = "host busy"; goto world_restart; } /* * Check a valid target id asserted ? */ if (targ >= slp->sl_ntargs || targ == slp->sl_hostid) { s = "scsi id illegal"; goto world_restart; } /* * Check the target scsi status. */ ti = slp->sl_ti[targ]; if (ti->ti_phase != PH_DISC && ti->ti_phase != PH_NULL) { s = "phase mismatch"; goto world_restart; } /* * Setup init msgsys */ slp->sl_error = 0; scsi_low_init_msgsys(slp, ti); /* * Establish our target nexus */ SCSI_LOW_SETUP_PHASE(ti, PH_RESEL); slp->sl_Tnexus = ti; #ifdef SCSI_LOW_STATICS scsi_low_statics.nexus_reselected ++; #endif /* SCSI_LOW_STATICS */ return ti; world_restart: device_printf(slp->sl_dev, "reselect(%x:unknown) %s\n", targ, s); scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, "reselect: scsi world confused"); return NULL; } /************************************************************** * cmd out pointer setup **************************************************************/ int scsi_low_cmd(slp, ti) struct scsi_low_softc *slp; struct targ_info *ti; { struct slccb *cb = slp->sl_Qnexus; slp->sl_ph_count ++; if (cb == NULL) { /* * no ccb, abort! */ slp->sl_scp.scp_cmd = (u_int8_t *) &unit_ready_cmd; slp->sl_scp.scp_cmdlen = sizeof(unit_ready_cmd); slp->sl_scp.scp_datalen = 0; slp->sl_scp.scp_direction = SCSI_LOW_READ; slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); SCSI_LOW_INFO(slp, ti, "CMDOUT: ccb nexus not found"); return EINVAL; } else { #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_CMDLNK_CHECK, ti->ti_id)) { scsi_low_test_cmdlnk(slp, cb); } #endif /* SCSI_LOW_DEBUG */ } return 0; } /************************************************************** * data out pointer setup **************************************************************/ int scsi_low_data(slp, ti, bp, direction) struct scsi_low_softc *slp; struct targ_info *ti; struct buf **bp; int direction; { struct slccb *cb = slp->sl_Qnexus; if (cb != NULL && direction == cb->ccb_sscp.scp_direction) { *bp = cb->bp; return 0; } slp->sl_error |= (FATALIO | PDMAERR); slp->sl_scp.scp_datalen = 0; slp->sl_scp.scp_direction = direction; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); if (ti->ti_ophase != ti->ti_phase) { char *s; if (cb == NULL) s = "DATA PHASE: ccb nexus not found"; else s = "DATA PHASE: xfer direction mismatch"; SCSI_LOW_INFO(slp, ti, s); } *bp = NULL; return EINVAL; } /************************************************************** * MSG_SYS **************************************************************/ #define MSGINPTR_CLR(ti) {(ti)->ti_msginptr = 0; (ti)->ti_msginlen = 0;} #define MSGIN_PERIOD(ti) ((ti)->ti_msgin[3]) #define MSGIN_OFFSET(ti) ((ti)->ti_msgin[4]) #define MSGIN_WIDTHP(ti) ((ti)->ti_msgin[3]) #define MSGIN_DATA_LAST 0x30 static int scsi_low_errfunc_synch(struct scsi_low_softc *, u_int); static int scsi_low_errfunc_wide(struct scsi_low_softc *, u_int); static int scsi_low_errfunc_identify(struct scsi_low_softc *, u_int); static int scsi_low_errfunc_qtag(struct scsi_low_softc *, u_int); static int scsi_low_msgfunc_synch(struct scsi_low_softc *); static int scsi_low_msgfunc_wide(struct scsi_low_softc *); static int scsi_low_msgfunc_identify(struct scsi_low_softc *); static int scsi_low_msgfunc_abort(struct scsi_low_softc *); static int scsi_low_msgfunc_qabort(struct scsi_low_softc *); static int scsi_low_msgfunc_qtag(struct scsi_low_softc *); static int scsi_low_msgfunc_reset(struct scsi_low_softc *); struct scsi_low_msgout_data { u_int md_flags; u_int8_t md_msg; int (*md_msgfunc)(struct scsi_low_softc *); int (*md_errfunc)(struct scsi_low_softc *, u_int); #define MSG_RELEASE_ATN 0x0001 u_int md_condition; }; struct scsi_low_msgout_data scsi_low_msgout_data[] = { /* 0 */ {SCSI_LOW_MSG_RESET, MSG_RESET, scsi_low_msgfunc_reset, NULL, MSG_RELEASE_ATN}, /* 1 */ {SCSI_LOW_MSG_REJECT, MSG_REJECT, NULL, NULL, MSG_RELEASE_ATN}, /* 2 */ {SCSI_LOW_MSG_PARITY, MSG_PARITY, NULL, NULL, MSG_RELEASE_ATN}, /* 3 */ {SCSI_LOW_MSG_ERROR, MSG_I_ERROR, NULL, NULL, MSG_RELEASE_ATN}, /* 4 */ {SCSI_LOW_MSG_IDENTIFY, MSG_IDENTIFY, scsi_low_msgfunc_identify, scsi_low_errfunc_identify, 0}, /* 5 */ {SCSI_LOW_MSG_ABORT, MSG_ABORT, scsi_low_msgfunc_abort, NULL, MSG_RELEASE_ATN}, /* 6 */ {SCSI_LOW_MSG_TERMIO, MSG_TERM_IO, NULL, NULL, MSG_RELEASE_ATN}, /* 7 */ {SCSI_LOW_MSG_SIMPLE_QTAG, MSG_SIMPLE_QTAG, scsi_low_msgfunc_qtag, scsi_low_errfunc_qtag, 0}, /* 8 */ {SCSI_LOW_MSG_ORDERED_QTAG, MSG_ORDERED_QTAG, scsi_low_msgfunc_qtag, scsi_low_errfunc_qtag, 0}, /* 9 */{SCSI_LOW_MSG_HEAD_QTAG, MSG_HEAD_QTAG, scsi_low_msgfunc_qtag, scsi_low_errfunc_qtag, 0}, /* 10 */ {SCSI_LOW_MSG_ABORT_QTAG, MSG_ABORT_QTAG, scsi_low_msgfunc_qabort, NULL, MSG_RELEASE_ATN}, /* 11 */ {SCSI_LOW_MSG_CLEAR_QTAG, MSG_CLEAR_QTAG, scsi_low_msgfunc_abort, NULL, MSG_RELEASE_ATN}, /* 12 */{SCSI_LOW_MSG_WIDE, MSG_EXTEND, scsi_low_msgfunc_wide, scsi_low_errfunc_wide, MSG_RELEASE_ATN}, /* 13 */{SCSI_LOW_MSG_SYNCH, MSG_EXTEND, scsi_low_msgfunc_synch, scsi_low_errfunc_synch, MSG_RELEASE_ATN}, /* 14 */{SCSI_LOW_MSG_NOOP, MSG_NOOP, NULL, NULL, MSG_RELEASE_ATN}, /* 15 */{SCSI_LOW_MSG_ALL, 0}, }; static int scsi_low_msginfunc_ext(struct scsi_low_softc *); static int scsi_low_synch(struct scsi_low_softc *); static int scsi_low_wide(struct scsi_low_softc *); static int scsi_low_msginfunc_msg_reject(struct scsi_low_softc *); static int scsi_low_msginfunc_rejop(struct scsi_low_softc *); static int scsi_low_msginfunc_rp(struct scsi_low_softc *); static int scsi_low_msginfunc_sdp(struct scsi_low_softc *); static int scsi_low_msginfunc_disc(struct scsi_low_softc *); static int scsi_low_msginfunc_cc(struct scsi_low_softc *); static int scsi_low_msginfunc_lcc(struct scsi_low_softc *); static int scsi_low_msginfunc_parity(struct scsi_low_softc *); static int scsi_low_msginfunc_noop(struct scsi_low_softc *); static int scsi_low_msginfunc_simple_qtag(struct scsi_low_softc *); static int scsi_low_msginfunc_i_wide_residue(struct scsi_low_softc *); struct scsi_low_msgin_data { u_int md_len; int (*md_msgfunc)(struct scsi_low_softc *); }; struct scsi_low_msgin_data scsi_low_msgin_data[] = { /* 0 */ {1, scsi_low_msginfunc_cc}, /* 1 */ {2, scsi_low_msginfunc_ext}, /* 2 */ {1, scsi_low_msginfunc_sdp}, /* 3 */ {1, scsi_low_msginfunc_rp}, /* 4 */ {1, scsi_low_msginfunc_disc}, /* 5 */ {1, scsi_low_msginfunc_rejop}, /* 6 */ {1, scsi_low_msginfunc_rejop}, /* 7 */ {1, scsi_low_msginfunc_msg_reject}, /* 8 */ {1, scsi_low_msginfunc_noop}, /* 9 */ {1, scsi_low_msginfunc_parity}, /* a */ {1, scsi_low_msginfunc_lcc}, /* b */ {1, scsi_low_msginfunc_lcc}, /* c */ {1, scsi_low_msginfunc_rejop}, /* d */ {2, scsi_low_msginfunc_rejop}, /* e */ {1, scsi_low_msginfunc_rejop}, /* f */ {1, scsi_low_msginfunc_rejop}, /* 0x10 */ {1, scsi_low_msginfunc_rejop}, /* 0x11 */ {1, scsi_low_msginfunc_rejop}, /* 0x12 */ {1, scsi_low_msginfunc_rejop}, /* 0x13 */ {1, scsi_low_msginfunc_rejop}, /* 0x14 */ {1, scsi_low_msginfunc_rejop}, /* 0x15 */ {1, scsi_low_msginfunc_rejop}, /* 0x16 */ {1, scsi_low_msginfunc_rejop}, /* 0x17 */ {1, scsi_low_msginfunc_rejop}, /* 0x18 */ {1, scsi_low_msginfunc_rejop}, /* 0x19 */ {1, scsi_low_msginfunc_rejop}, /* 0x1a */ {1, scsi_low_msginfunc_rejop}, /* 0x1b */ {1, scsi_low_msginfunc_rejop}, /* 0x1c */ {1, scsi_low_msginfunc_rejop}, /* 0x1d */ {1, scsi_low_msginfunc_rejop}, /* 0x1e */ {1, scsi_low_msginfunc_rejop}, /* 0x1f */ {1, scsi_low_msginfunc_rejop}, /* 0x20 */ {2, scsi_low_msginfunc_simple_qtag}, /* 0x21 */ {2, scsi_low_msginfunc_rejop}, /* 0x22 */ {2, scsi_low_msginfunc_rejop}, /* 0x23 */ {2, scsi_low_msginfunc_i_wide_residue}, /* 0x24 */ {2, scsi_low_msginfunc_rejop}, /* 0x25 */ {2, scsi_low_msginfunc_rejop}, /* 0x26 */ {2, scsi_low_msginfunc_rejop}, /* 0x27 */ {2, scsi_low_msginfunc_rejop}, /* 0x28 */ {2, scsi_low_msginfunc_rejop}, /* 0x29 */ {2, scsi_low_msginfunc_rejop}, /* 0x2a */ {2, scsi_low_msginfunc_rejop}, /* 0x2b */ {2, scsi_low_msginfunc_rejop}, /* 0x2c */ {2, scsi_low_msginfunc_rejop}, /* 0x2d */ {2, scsi_low_msginfunc_rejop}, /* 0x2e */ {2, scsi_low_msginfunc_rejop}, /* 0x2f */ {2, scsi_low_msginfunc_rejop}, /* 0x30 */ {1, scsi_low_msginfunc_rejop} /* default rej op */ }; /************************************************************** * msgout **************************************************************/ static int scsi_low_msgfunc_synch(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; int ptr = ti->ti_msgoutlen; ti->ti_msgoutstr[ptr + 1] = MSG_EXTEND_SYNCHLEN; ti->ti_msgoutstr[ptr + 2] = MSG_EXTEND_SYNCHCODE; ti->ti_msgoutstr[ptr + 3] = ti->ti_maxsynch.period; ti->ti_msgoutstr[ptr + 4] = ti->ti_maxsynch.offset; return MSG_EXTEND_SYNCHLEN + 2; } static int scsi_low_msgfunc_wide(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; int ptr = ti->ti_msgoutlen; ti->ti_msgoutstr[ptr + 1] = MSG_EXTEND_WIDELEN; ti->ti_msgoutstr[ptr + 2] = MSG_EXTEND_WIDECODE; ti->ti_msgoutstr[ptr + 3] = ti->ti_width; return MSG_EXTEND_WIDELEN + 2; } static int scsi_low_msgfunc_identify(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; struct lun_info *li = slp->sl_Lnexus; struct slccb *cb = slp->sl_Qnexus; int ptr = ti->ti_msgoutlen; u_int8_t msg; msg = MSG_IDENTIFY; if (cb == NULL) { slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); SCSI_LOW_INFO(slp, ti, "MSGOUT: nexus unknown"); } else { if (scsi_low_is_disconnect_ok(cb) != 0) msg |= (MSG_IDENTIFY_DISCPRIV | li->li_lun); else msg |= li->li_lun; if (ti->ti_phase == PH_MSGOUT) { (*slp->sl_funcs->scsi_low_establish_lun_nexus) (slp); if (cb->ccb_tag == SCSI_LOW_UNKTAG) { (*slp->sl_funcs->scsi_low_establish_ccb_nexus) (slp); } } } ti->ti_msgoutstr[ptr + 0] = msg; return 1; } static int scsi_low_msgfunc_abort(slp) struct scsi_low_softc *slp; { SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_ABORT); return 1; } static int scsi_low_msgfunc_qabort(slp) struct scsi_low_softc *slp; { SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_TERM); return 1; } static int scsi_low_msgfunc_reset(slp) struct scsi_low_softc *slp; { SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_RESET); return 1; } static int scsi_low_msgfunc_qtag(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; struct slccb *cb = slp->sl_Qnexus; int ptr = ti->ti_msgoutlen; if (cb == NULL || cb->ccb_tag == SCSI_LOW_UNKTAG) { ti->ti_msgoutstr[ptr + 0] = MSG_NOOP; return 1; } else { ti->ti_msgoutstr[ptr + 1] = (u_int8_t) cb->ccb_tag; if (ti->ti_phase == PH_MSGOUT) { (*slp->sl_funcs->scsi_low_establish_ccb_nexus) (slp); } } return 2; } /* * The following functions are called when targets give unexpected * responces in msgin (after msgout). */ static int scsi_low_errfunc_identify(slp, msgflags) struct scsi_low_softc *slp; u_int msgflags; { if (slp->sl_Lnexus != NULL) { slp->sl_Lnexus->li_cfgflags &= ~SCSI_LOW_DISC; scsi_low_calcf_lun(slp->sl_Lnexus); } return 0; } static int scsi_low_errfunc_synch(slp, msgflags) struct scsi_low_softc *slp; u_int msgflags; { struct targ_info *ti = slp->sl_Tnexus; MSGIN_PERIOD(ti) = 0; MSGIN_OFFSET(ti) = 0; scsi_low_synch(slp); return 0; } static int scsi_low_errfunc_wide(slp, msgflags) struct scsi_low_softc *slp; u_int msgflags; { struct targ_info *ti = slp->sl_Tnexus; MSGIN_WIDTHP(ti) = 0; scsi_low_wide(slp); return 0; } static int scsi_low_errfunc_qtag(slp, msgflags) struct scsi_low_softc *slp; u_int msgflags; { if ((msgflags & SCSI_LOW_MSG_REJECT) != 0) { if (slp->sl_Qnexus != NULL) { scsi_low_deactivate_qtag(slp->sl_Qnexus); } if (slp->sl_Lnexus != NULL) { slp->sl_Lnexus->li_cfgflags &= ~SCSI_LOW_QTAG; scsi_low_calcf_lun(slp->sl_Lnexus); } device_printf(slp->sl_dev, "scsi_low: qtag msg rejected\n"); } return 0; } int scsi_low_msgout(slp, ti, fl) struct scsi_low_softc *slp; struct targ_info *ti; u_int fl; { struct scsi_low_msgout_data *mdp; int len = 0; #ifdef SCSI_LOW_DIAGNOSTIC if (ti != slp->sl_Tnexus) { scsi_low_print(slp, NULL); panic("scsi_low_msgout: Target nexus inconsistent"); } #endif /* SCSI_LOW_DIAGNOSTIC */ slp->sl_ph_count ++; if (slp->sl_ph_count > SCSI_LOW_MAX_PHCHANGES) { device_printf(slp->sl_dev, "too many phase changes\n"); slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); } /* STEP I. * Scsi phase changes. * Previously msgs asserted are accepted by our target or * processed by scsi_low_msgin. * Thus clear all saved informations. */ if ((fl & SCSI_LOW_MSGOUT_INIT) != 0) { ti->ti_omsgflags = 0; ti->ti_emsgflags = 0; } else if (slp->sl_atten == 0) { /* STEP II. * We did not assert attention, however still our target required * msgs. Resend previous msgs. */ ti->ti_msgflags |= ti->ti_omsgflags; ti->ti_omsgflags = 0; #ifdef SCSI_LOW_DIAGNOSTIC device_printf(slp->sl_dev, "scsi_low_msgout: retry msgout\n"); #endif /* SCSI_LOW_DIAGNOSTIC */ } /* STEP III. * We have no msgs. send MSG_NOOP (OK?) */ if (scsi_low_is_msgout_continue(ti, 0) == 0) scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_NOOP, 0); /* STEP IV. * Process all msgs */ ti->ti_msgoutlen = 0; slp->sl_clear_atten = 0; mdp = &scsi_low_msgout_data[0]; for ( ; mdp->md_flags != SCSI_LOW_MSG_ALL; mdp ++) { if ((ti->ti_msgflags & mdp->md_flags) != 0) { ti->ti_omsgflags |= mdp->md_flags; ti->ti_msgflags &= ~mdp->md_flags; ti->ti_emsgflags = mdp->md_flags; ti->ti_msgoutstr[ti->ti_msgoutlen] = mdp->md_msg; if (mdp->md_msgfunc != NULL) len = (*mdp->md_msgfunc) (slp); else len = 1; #ifdef SCSI_LOW_DIAGNOSTIC scsi_low_msg_log_write(&ti->ti_log_msgout, &ti->ti_msgoutstr[ti->ti_msgoutlen], len); #endif /* SCSI_LOW_DIAGNOSTIC */ ti->ti_msgoutlen += len; if ((mdp->md_condition & MSG_RELEASE_ATN) != 0) { slp->sl_clear_atten = 1; break; } if ((fl & SCSI_LOW_MSGOUT_UNIFY) == 0 || ti->ti_msgflags == 0) break; if (ti->ti_msgoutlen >= SCSI_LOW_MAX_MSGLEN - 5) break; } } if (scsi_low_is_msgout_continue(ti, 0) == 0) slp->sl_clear_atten = 1; return ti->ti_msgoutlen; } /************************************************************** * msgin **************************************************************/ static int scsi_low_msginfunc_noop(slp) struct scsi_low_softc *slp; { return 0; } static int scsi_low_msginfunc_rejop(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; u_int8_t msg = ti->ti_msgin[0]; device_printf(slp->sl_dev, "MSGIN: msg 0x%x rejected\n", (u_int) msg); scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0); return 0; } static int scsi_low_msginfunc_cc(slp) struct scsi_low_softc *slp; { struct lun_info *li; SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_CMDC); /* validate status */ if (slp->sl_Qnexus == NULL) return ENOENT; slp->sl_Qnexus->ccb_sscp.scp_status = slp->sl_scp.scp_status; li = slp->sl_Lnexus; switch (slp->sl_scp.scp_status) { case ST_GOOD: li->li_maxnqio = li->li_maxnexus; break; case ST_CHKCOND: li->li_maxnqio = 0; if (li->li_qflags & SCSI_LOW_QFLAG_CA_QCLEAR) scsi_low_reset_nexus_lun(slp, li, 0); break; case ST_BUSY: li->li_maxnqio = 0; break; case ST_QUEFULL: if (li->li_maxnexus >= li->li_nqio) li->li_maxnexus = li->li_nqio - 1; li->li_maxnqio = li->li_maxnexus; break; case ST_INTERGOOD: case ST_INTERMET: slp->sl_error |= MSGERR; break; default: break; } return 0; } static int scsi_low_msginfunc_lcc(slp) struct scsi_low_softc *slp; { struct targ_info *ti; struct lun_info *li; struct slccb *ncb, *cb; ti = slp->sl_Tnexus; li = slp->sl_Lnexus; if ((cb = slp->sl_Qnexus) == NULL) goto bad; cb->ccb_sscp.scp_status = slp->sl_scp.scp_status; switch (slp->sl_scp.scp_status) { case ST_INTERGOOD: case ST_INTERMET: li->li_maxnqio = li->li_maxnexus; break; default: slp->sl_error |= MSGERR; break; } if ((li->li_flags & SCSI_LOW_LINK) == 0) goto bad; cb->ccb_error |= slp->sl_error; if (cb->ccb_error != 0) goto bad; for (ncb = TAILQ_FIRST(&slp->sl_start); ncb != NULL; ncb = TAILQ_NEXT(ncb, ccb_chain)) { if (ncb->li == li) goto cmd_link_start; } bad: SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_LCTERM); scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0); return EIO; cmd_link_start: ncb->ccb_flags &= ~CCB_STARTQ; TAILQ_REMOVE(&slp->sl_start, ncb, ccb_chain); scsi_low_dealloc_qtag(ncb); ncb->ccb_tag = cb->ccb_tag; ncb->ccb_otag = cb->ccb_otag; cb->ccb_tag = SCSI_LOW_UNKTAG; cb->ccb_otag = SCSI_LOW_UNKTAG; if (scsi_low_done(slp, cb) == SCSI_LOW_DONE_RETRY) panic("%s: linked ccb retried", device_get_nameunit(slp->sl_dev)); slp->sl_Qnexus = ncb; slp->sl_ph_count = 0; ncb->ccb_error = 0; ncb->ccb_datalen = -1; ncb->ccb_scp.scp_status = ST_UNKNOWN; ncb->ccb_flags &= ~CCB_INTERNAL; scsi_low_init_msgsys(slp, ti); scsi_low_ccb_setup_cam(slp, ncb); if (ncb->ccb_tcmax < SCSI_LOW_MIN_TOUT) ncb->ccb_tcmax = SCSI_LOW_MIN_TOUT; ncb->ccb_tc = ncb->ccb_tcmax; /* setup saved scsi data pointer */ ncb->ccb_sscp = ncb->ccb_scp; slp->sl_scp = ncb->ccb_sscp; slp->sl_error = ncb->ccb_error; #ifdef SCSI_LOW_DIAGNOSTIC scsi_low_msg_log_init(&ti->ti_log_msgin); scsi_low_msg_log_init(&ti->ti_log_msgout); #endif /* SCSI_LOW_DIAGNOSTIC */ return EJUSTRETURN; } static int scsi_low_msginfunc_disc(slp) struct scsi_low_softc *slp; { SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_DISC); return 0; } static int scsi_low_msginfunc_sdp(slp) struct scsi_low_softc *slp; { struct slccb *cb = slp->sl_Qnexus; if (cb != NULL) { cb->ccb_sscp.scp_datalen = slp->sl_scp.scp_datalen; cb->ccb_sscp.scp_data = slp->sl_scp.scp_data; } else scsi_low_assert_msg(slp, slp->sl_Tnexus, SCSI_LOW_MSG_REJECT, 0); return 0; } static int scsi_low_msginfunc_rp(slp) struct scsi_low_softc *slp; { if (slp->sl_Qnexus != NULL) slp->sl_scp = slp->sl_Qnexus->ccb_sscp; else scsi_low_assert_msg(slp, slp->sl_Tnexus, SCSI_LOW_MSG_REJECT, 0); return 0; } static int scsi_low_synch(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; u_int period = 0, offset = 0, speed; u_char *s; int error; if ((MSGIN_PERIOD(ti) >= ti->ti_maxsynch.period && MSGIN_OFFSET(ti) <= ti->ti_maxsynch.offset) || MSGIN_OFFSET(ti) == 0) { if ((offset = MSGIN_OFFSET(ti)) != 0) period = MSGIN_PERIOD(ti); s = offset ? "synchronous" : "async"; } else { /* XXX: * Target seems to be brain damaged. * Force async transfer. */ ti->ti_maxsynch.period = 0; ti->ti_maxsynch.offset = 0; device_printf(slp->sl_dev, "target brain damaged. async transfer\n"); return EINVAL; } ti->ti_maxsynch.period = period; ti->ti_maxsynch.offset = offset; error = (*slp->sl_funcs->scsi_low_msg) (slp, ti, SCSI_LOW_MSG_SYNCH); if (error != 0) { /* XXX: * Current period and offset are not acceptable * for our adapter. * The adapter changes max synch and max offset. */ device_printf(slp->sl_dev, "synch neg failed. retry synch msg neg ...\n"); return error; } ti->ti_osynch = ti->ti_maxsynch; if (offset > 0) { ti->ti_setup_msg_done |= SCSI_LOW_MSG_SYNCH; } /* inform data */ if ((slp->sl_show_result & SHOW_SYNCH_NEG) != 0) { #ifdef SCSI_LOW_NEGOTIATE_BEFORE_SENSE struct slccb *cb = slp->sl_Qnexus; if (cb != NULL && (cb->ccb_flags & CCB_SENSE) != 0) return 0; #endif /* SCSI_LOW_NEGOTIATE_BEFORE_SENSE */ device_printf(slp->sl_dev, "(%d:*): <%s> offset %d period %dns ", ti->ti_id, s, offset, period * 4); if (period != 0) { speed = 1000 * 10 / (period * 4); printf("%d.%d M/s", speed / 10, speed % 10); } printf("\n"); } return 0; } static int scsi_low_wide(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; int error; ti->ti_width = MSGIN_WIDTHP(ti); error = (*slp->sl_funcs->scsi_low_msg) (slp, ti, SCSI_LOW_MSG_WIDE); if (error != 0) { /* XXX: * Current width is not acceptable for our adapter. * The adapter changes max width. */ device_printf(slp->sl_dev, "wide neg failed. retry wide msg neg ...\n"); return error; } ti->ti_owidth = ti->ti_width; if (ti->ti_width > SCSI_LOW_BUS_WIDTH_8) { ti->ti_setup_msg_done |= (SCSI_LOW_MSG_SYNCH | SCSI_LOW_MSG_WIDE); } /* inform data */ if ((slp->sl_show_result & SHOW_WIDE_NEG) != 0) { #ifdef SCSI_LOW_NEGOTIATE_BEFORE_SENSE struct slccb *cb = slp->sl_Qnexus; if (cb != NULL && (cb->ccb_flags & CCB_SENSE) != 0) return 0; #endif /* SCSI_LOW_NEGOTIATE_BEFORE_SENSE */ device_printf(slp->sl_dev, "(%d:*): transfer width %d bits\n", ti->ti_id, 1 << (3 + ti->ti_width)); } return 0; } static int scsi_low_msginfunc_simple_qtag(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; scsi_low_tag_t etag = (scsi_low_tag_t) ti->ti_msgin[1]; if (slp->sl_Qnexus != NULL) { if (slp->sl_Qnexus->ccb_tag != etag) { slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); SCSI_LOW_INFO(slp, ti, "MSGIN: qtag mismatch"); } } else if (scsi_low_establish_ccb(ti, slp->sl_Lnexus, etag) == NULL) { #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_NEXUS_CHECK, ti->ti_id)) return 0; #endif /* SCSI_LOW_DEBUG */ slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT_QTAG, 0); SCSI_LOW_INFO(slp, ti, "MSGIN: taged ccb not found"); } return 0; } static int scsi_low_msginfunc_i_wide_residue(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; struct slccb *cb = slp->sl_Qnexus; int res = (int) ti->ti_msgin[1]; if (cb == NULL || res <= 0 || (ti->ti_width == SCSI_LOW_BUS_WIDTH_16 && res > 1) || (ti->ti_width == SCSI_LOW_BUS_WIDTH_32 && res > 3)) return EINVAL; if (slp->sl_scp.scp_datalen + res > cb->ccb_scp.scp_datalen) return EINVAL; slp->sl_scp.scp_datalen += res; slp->sl_scp.scp_data -= res; scsi_low_data_finish(slp); return 0; } static int scsi_low_msginfunc_ext(slp) struct scsi_low_softc *slp; { struct slccb *cb = slp->sl_Qnexus; struct lun_info *li = slp->sl_Lnexus; struct targ_info *ti = slp->sl_Tnexus; int count, retry; u_int32_t *ptr; if (ti->ti_msginptr == 2) { ti->ti_msginlen = ti->ti_msgin[1] + 2; return 0; } switch (MKMSG_EXTEND(ti->ti_msgin[1], ti->ti_msgin[2])) { case MKMSG_EXTEND(MSG_EXTEND_MDPLEN, MSG_EXTEND_MDPCODE): if (cb == NULL) break; ptr = (u_int32_t *)(&ti->ti_msgin[3]); count = (int) htonl((long) (*ptr)); if(slp->sl_scp.scp_datalen - count < 0 || slp->sl_scp.scp_datalen - count > cb->ccb_scp.scp_datalen) break; slp->sl_scp.scp_datalen -= count; slp->sl_scp.scp_data += count; return 0; case MKMSG_EXTEND(MSG_EXTEND_SYNCHLEN, MSG_EXTEND_SYNCHCODE): if (li == NULL) break; retry = scsi_low_synch(slp); if (retry != 0 || (ti->ti_emsgflags & SCSI_LOW_MSG_SYNCH) == 0) scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_SYNCH, 0); #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_ATTEN_CHECK, ti->ti_id)) { scsi_low_test_atten(slp, ti, SCSI_LOW_MSG_SYNCH); } #endif /* SCSI_LOW_DEBUG */ return 0; case MKMSG_EXTEND(MSG_EXTEND_WIDELEN, MSG_EXTEND_WIDECODE): if (li == NULL) break; retry = scsi_low_wide(slp); if (retry != 0 || (ti->ti_emsgflags & SCSI_LOW_MSG_WIDE) == 0) scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_WIDE, 0); return 0; default: break; } scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0); return EINVAL; } static int scsi_low_msginfunc_parity(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; /* only I -> T, invalid! */ scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0); return 0; } static int scsi_low_msginfunc_msg_reject(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; struct scsi_low_msgout_data *mdp; u_int msgflags; if (ti->ti_emsgflags != 0) { device_printf(slp->sl_dev, "msg flags [0x%x] rejected\n", ti->ti_emsgflags); msgflags = SCSI_LOW_MSG_REJECT; mdp = &scsi_low_msgout_data[0]; for ( ; mdp->md_flags != SCSI_LOW_MSG_ALL; mdp ++) { if ((ti->ti_emsgflags & mdp->md_flags) != 0) { ti->ti_emsgflags &= ~mdp->md_flags; if (mdp->md_errfunc != NULL) (*mdp->md_errfunc) (slp, msgflags); break; } } return 0; } else { SCSI_LOW_INFO(slp, ti, "MSGIN: rejected msg not found"); slp->sl_error |= MSGERR; } return EINVAL; } int scsi_low_msgin(slp, ti, c) struct scsi_low_softc *slp; struct targ_info *ti; u_int c; { struct scsi_low_msgin_data *sdp; struct lun_info *li; u_int8_t msg; #ifdef SCSI_LOW_DIAGNOSTIC if (ti != slp->sl_Tnexus) { scsi_low_print(slp, NULL); panic("scsi_low_msgin: Target nexus inconsistent"); } #endif /* SCSI_LOW_DIAGNOSTIC */ /* * Phase changes, clear the pointer. */ if (ti->ti_ophase != ti->ti_phase) { MSGINPTR_CLR(ti); ti->ti_msgin_parity_error = 0; slp->sl_ph_count ++; if (slp->sl_ph_count > SCSI_LOW_MAX_PHCHANGES) { device_printf(slp->sl_dev, "too many phase changes\n"); slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); } } /* * Store a current messages byte into buffer and * wait for the completion of the current msg. */ ti->ti_msgin[ti->ti_msginptr ++] = (u_int8_t) c; if (ti->ti_msginptr >= SCSI_LOW_MAX_MSGLEN) { ti->ti_msginptr = SCSI_LOW_MAX_MSGLEN - 1; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0); } /* * Check parity errors. */ if ((c & SCSI_LOW_DATA_PE) != 0) { ti->ti_msgin_parity_error ++; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_PARITY, 0); goto out; } if (ti->ti_msgin_parity_error != 0) goto out; /* * Calculate messages length. */ msg = ti->ti_msgin[0]; if (msg < MSGIN_DATA_LAST) sdp = &scsi_low_msgin_data[msg]; else sdp = &scsi_low_msgin_data[MSGIN_DATA_LAST]; if (ti->ti_msginlen == 0) { ti->ti_msginlen = sdp->md_len; } /* * Check comletion. */ if (ti->ti_msginptr < ti->ti_msginlen) return EJUSTRETURN; /* * Do process. */ if ((msg & MSG_IDENTIFY) == 0) { if (((*sdp->md_msgfunc) (slp)) == EJUSTRETURN) return EJUSTRETURN; } else { li = slp->sl_Lnexus; if (li == NULL) { li = scsi_low_alloc_li(ti, MSGCMD_LUN(msg), 0); if (li == NULL) goto badlun; slp->sl_Lnexus = li; (*slp->sl_funcs->scsi_low_establish_lun_nexus) (slp); } else { if (MSGCMD_LUN(msg) != li->li_lun) goto badlun; } if (slp->sl_Qnexus == NULL && li->li_nqio == 0) { if (!scsi_low_establish_ccb(ti, li, SCSI_LOW_UNKTAG)) { #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_NEXUS_CHECK, ti->ti_id) != 0) { goto out; } #endif /* SCSI_LOW_DEBUG */ goto badlun; } } } goto out; /* * Msg process completed, reset msgin pointer and assert ATN if desired. */ badlun: slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); SCSI_LOW_INFO(slp, ti, "MSGIN: identify wrong"); out: if (ti->ti_msginptr < ti->ti_msginlen) return EJUSTRETURN; #ifdef SCSI_LOW_DIAGNOSTIC scsi_low_msg_log_write(&ti->ti_log_msgin, &ti->ti_msgin[0], ti->ti_msginlen); #endif /* SCSI_LOW_DIAGNOSTIC */ MSGINPTR_CLR(ti); return 0; } /********************************************************** * disconnect **********************************************************/ int scsi_low_disconnected(slp, ti) struct scsi_low_softc *slp; struct targ_info *ti; { struct slccb *cb = slp->sl_Qnexus; /* check phase completion */ switch (slp->sl_msgphase) { case MSGPH_RESET: scsi_low_statusin(slp, slp->sl_Tnexus, ST_GOOD); scsi_low_msginfunc_cc(slp); scsi_low_reset_nexus_target(slp, slp->sl_Tnexus, 0); goto io_resume; case MSGPH_ABORT: scsi_low_statusin(slp, slp->sl_Tnexus, ST_GOOD); scsi_low_msginfunc_cc(slp); scsi_low_reset_nexus_lun(slp, slp->sl_Lnexus, 0); goto io_resume; case MSGPH_TERM: scsi_low_statusin(slp, slp->sl_Tnexus, ST_GOOD); scsi_low_msginfunc_cc(slp); goto io_resume; case MSGPH_DISC: if (cb != NULL) { struct lun_info *li; li = cb->li; TAILQ_INSERT_TAIL(&li->li_discq, cb, ccb_chain); cb->ccb_flags |= CCB_DISCQ; cb->ccb_error |= slp->sl_error; li->li_disc ++; ti->ti_disc ++; slp->sl_disc ++; } #ifdef SCSI_LOW_STATICS scsi_low_statics.nexus_disconnected ++; #endif /* SCSI_LOW_STATICS */ #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_DISC, ti->ti_id) != 0) { printf("## SCSI_LOW_DISCONNECTED ===============\n"); scsi_low_print(slp, NULL); } #endif /* SCSI_LOW_DEBUG */ break; case MSGPH_NULL: slp->sl_error |= FATALIO; if (ti->ti_phase == PH_SELSTART) slp->sl_error |= SELTIMEOUTIO; else slp->sl_error |= UBFERR; /* fall through */ case MSGPH_LCTERM: case MSGPH_CMDC: io_resume: if (cb == NULL) break; #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_ATTEN_CHECK, ti->ti_id)) { if (cb->ccb_omsgoutflag == SCSI_LOW_MSG_NOOP && (cb->ccb_msgoutflag != 0 || (ti->ti_msgflags & SCSI_LOW_MSG_NOOP))) { scsi_low_info(slp, ti, "ATTEN CHECK FAILED"); } } #endif /* SCSI_LOW_DEBUG */ cb->ccb_error |= slp->sl_error; if (scsi_low_done(slp, cb) == SCSI_LOW_DONE_RETRY) { cb->ccb_flags |= CCB_STARTQ; TAILQ_INSERT_HEAD(&slp->sl_start, cb, ccb_chain); } break; } scsi_low_bus_release(slp, ti); scsi_low_start(slp); return 1; } /********************************************************** * TAG operations **********************************************************/ static int scsi_low_alloc_qtag(cb) struct slccb *cb; { struct lun_info *li = cb->li; scsi_low_tag_t etag; if (cb->ccb_otag != SCSI_LOW_UNKTAG) return 0; #ifndef SCSI_LOW_ALT_QTAG_ALLOCATE etag = ffs(li->li_qtagbits); if (etag == 0) return ENOSPC; li->li_qtagbits &= ~(1 << (etag - 1)); cb->ccb_otag = etag; return 0; #else /* SCSI_LOW_ALT_QTAG_ALLOCATE */ for (etag = li->li_qd ; li->li_qd < SCSI_LOW_MAXNEXUS; li->li_qd ++) if (li->li_qtagarray[li->li_qd] == 0) goto found; for (li->li_qd = 0; li->li_qd < etag; li->li_qd ++) if (li->li_qtagarray[li->li_qd] == 0) goto found; return ENOSPC; found: li->li_qtagarray[li->li_qd] ++; cb->ccb_otag = (li->li_qd ++); return 0; #endif /* SCSI_LOW_ALT_QTAG_ALLOCATE */ } static int scsi_low_dealloc_qtag(cb) struct slccb *cb; { struct lun_info *li = cb->li; scsi_low_tag_t etag; if (cb->ccb_otag == SCSI_LOW_UNKTAG) return 0; #ifndef SCSI_LOW_ALT_QTAG_ALLOCATE etag = cb->ccb_otag - 1; #ifdef SCSI_LOW_DIAGNOSTIC if (etag >= sizeof(li->li_qtagbits) * NBBY) panic("scsi_low_dealloc_tag: illegal tag"); #endif /* SCSI_LOW_DIAGNOSTIC */ li->li_qtagbits |= (1 << etag); #else /* SCSI_LOW_ALT_QTAG_ALLOCATE */ etag = cb->ccb_otag; #ifdef SCSI_LOW_DIAGNOSTIC if (etag >= SCSI_LOW_MAXNEXUS) panic("scsi_low_dealloc_tag: illegal tag"); #endif /* SCSI_LOW_DIAGNOSTIC */ li->li_qtagarray[etag] --; #endif /* SCSI_LOW_ALT_QTAG_ALLOCATE */ cb->ccb_otag = SCSI_LOW_UNKTAG; return 0; } static struct slccb * scsi_low_revoke_ccb(slp, cb, fdone) struct scsi_low_softc *slp; struct slccb *cb; int fdone; { struct targ_info *ti = cb->ti; struct lun_info *li = cb->li; #ifdef SCSI_LOW_DIAGNOSTIC if ((cb->ccb_flags & (CCB_STARTQ | CCB_DISCQ)) == (CCB_STARTQ | CCB_DISCQ)) { panic("%s: ccb in both queue", device_get_nameunit(slp->sl_dev)); } #endif /* SCSI_LOW_DIAGNOSTIC */ if ((cb->ccb_flags & CCB_STARTQ) != 0) { TAILQ_REMOVE(&slp->sl_start, cb, ccb_chain); } if ((cb->ccb_flags & CCB_DISCQ) != 0) { TAILQ_REMOVE(&li->li_discq, cb, ccb_chain); li->li_disc --; ti->ti_disc --; slp->sl_disc --; } cb->ccb_flags &= ~(CCB_STARTQ | CCB_DISCQ | CCB_SENSE | CCB_CLEARQ | CCB_INTERNAL); if (fdone != 0 && (cb->ccb_rcnt ++ >= slp->sl_max_retry || (cb->ccb_flags & CCB_NORETRY) != 0)) { cb->ccb_error |= FATALIO; cb->ccb_flags &= ~CCB_AUTOSENSE; if (scsi_low_done(slp, cb) != SCSI_LOW_DONE_COMPLETE) panic("%s: done ccb retried", device_get_nameunit(slp->sl_dev)); return NULL; } else { cb->ccb_error |= PENDINGIO; scsi_low_deactivate_qtag(cb); scsi_low_ccb_message_retry(cb); cb->ccb_tc = cb->ccb_tcmax = SCSI_LOW_MIN_TOUT; return cb; } } static void scsi_low_reset_nexus_lun(slp, li, fdone) struct scsi_low_softc *slp; struct lun_info *li; int fdone; { struct slccb *cb, *ncb, *ecb; if (li == NULL) return; ecb = NULL; for (cb = TAILQ_FIRST(&li->li_discq); cb != NULL; cb = ncb) { ncb = TAILQ_NEXT(cb, ccb_chain); cb = scsi_low_revoke_ccb(slp, cb, fdone); if (cb != NULL) { /* * presumely keep ordering of io */ cb->ccb_flags |= CCB_STARTQ; if (ecb == NULL) { TAILQ_INSERT_HEAD(&slp->sl_start,\ cb, ccb_chain); } else { TAILQ_INSERT_AFTER(&slp->sl_start,\ ecb, cb, ccb_chain); } ecb = cb; } } } /************************************************************** * Qurik setup **************************************************************/ static void scsi_low_calcf_lun(li) struct lun_info *li; { struct targ_info *ti = li->li_ti; struct scsi_low_softc *slp = ti->ti_sc; u_int cfgflags, diskflags; if (li->li_flags_valid == SCSI_LOW_LUN_FLAGS_ALL_VALID) cfgflags = li->li_cfgflags; else cfgflags = 0; diskflags = li->li_diskflags & li->li_quirks; /* disconnect */ li->li_flags &= ~SCSI_LOW_DISC; if ((slp->sl_cfgflags & CFG_NODISC) == 0 && (diskflags & SCSI_LOW_DISK_DISC) != 0 && (cfgflags & SCSI_LOW_DISC) != 0) li->li_flags |= SCSI_LOW_DISC; /* parity */ li->li_flags |= SCSI_LOW_NOPARITY; if ((slp->sl_cfgflags & CFG_NOPARITY) == 0 && (diskflags & SCSI_LOW_DISK_PARITY) != 0 && (cfgflags & SCSI_LOW_NOPARITY) == 0) li->li_flags &= ~SCSI_LOW_NOPARITY; /* qtag */ if ((slp->sl_cfgflags & CFG_NOQTAG) == 0 && (cfgflags & SCSI_LOW_QTAG) != 0 && (diskflags & SCSI_LOW_DISK_QTAG) != 0) { li->li_flags |= SCSI_LOW_QTAG; li->li_maxnexus = SCSI_LOW_MAXNEXUS; li->li_maxnqio = li->li_maxnexus; } else { li->li_flags &= ~SCSI_LOW_QTAG; li->li_maxnexus = 0; li->li_maxnqio = li->li_maxnexus; } /* cmd link */ li->li_flags &= ~SCSI_LOW_LINK; if ((cfgflags & SCSI_LOW_LINK) != 0 && (diskflags & SCSI_LOW_DISK_LINK) != 0) li->li_flags |= SCSI_LOW_LINK; /* compatible flags */ li->li_flags &= ~SCSI_LOW_SYNC; if (ti->ti_maxsynch.offset > 0) li->li_flags |= SCSI_LOW_SYNC; #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_CALCF, ti->ti_id) != 0) { scsi_low_calcf_show(li); } #endif /* SCSI_LOW_DEBUG */ } static void scsi_low_calcf_target(ti) struct targ_info *ti; { struct scsi_low_softc *slp = ti->ti_sc; u_int offset, period, diskflags; diskflags = ti->ti_diskflags & ti->ti_quirks; /* synch */ if ((slp->sl_cfgflags & CFG_ASYNC) == 0 && (diskflags & SCSI_LOW_DISK_SYNC) != 0) { offset = ti->ti_maxsynch.offset; period = ti->ti_maxsynch.period; if (offset == 0 || period == 0) offset = period = 0; } else { offset = period = 0; } ti->ti_maxsynch.offset = offset; ti->ti_maxsynch.period = period; /* wide */ if ((diskflags & SCSI_LOW_DISK_WIDE_32) == 0 && ti->ti_width > SCSI_LOW_BUS_WIDTH_16) ti->ti_width = SCSI_LOW_BUS_WIDTH_16; if ((diskflags & SCSI_LOW_DISK_WIDE_16) == 0 && ti->ti_width > SCSI_LOW_BUS_WIDTH_8) ti->ti_width = SCSI_LOW_BUS_WIDTH_8; if (ti->ti_flags_valid == SCSI_LOW_TARG_FLAGS_ALL_VALID) { if (ti->ti_maxsynch.offset != ti->ti_osynch.offset || ti->ti_maxsynch.period != ti->ti_osynch.period) ti->ti_setup_msg |= SCSI_LOW_MSG_SYNCH; if (ti->ti_width != ti->ti_owidth) ti->ti_setup_msg |= (SCSI_LOW_MSG_WIDE | SCSI_LOW_MSG_SYNCH); ti->ti_osynch = ti->ti_maxsynch; ti->ti_owidth = ti->ti_width; } #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_CALCF, ti->ti_id) != 0) { device_printf(slp->sl_dev, "(%d:*): max period(%dns) offset(%d) width(%d)\n", ti->ti_id, ti->ti_maxsynch.period * 4, ti->ti_maxsynch.offset, ti->ti_width); } #endif /* SCSI_LOW_DEBUG */ } static void scsi_low_calcf_show(li) struct lun_info *li; { struct targ_info *ti = li->li_ti; struct scsi_low_softc *slp = ti->ti_sc; device_printf(slp->sl_dev, "(%d:%d): period(%d ns) offset(%d) width(%d) flags 0x%b\n", ti->ti_id, li->li_lun, ti->ti_maxsynch.period * 4, ti->ti_maxsynch.offset, ti->ti_width, li->li_flags, SCSI_LOW_BITS); } #ifdef SCSI_LOW_START_UP_CHECK /************************************************************** * scsi world start up **************************************************************/ static int scsi_low_poll(struct scsi_low_softc *, struct slccb *); static int scsi_low_start_up(slp) struct scsi_low_softc *slp; { struct targ_info *ti; struct lun_info *li; struct slccb *cb; int target, lun; device_printf(slp->sl_dev, "scsi_low: probing all devices ....\n"); for (target = 0; target < slp->sl_ntargs; target ++) { if (target == slp->sl_hostid) { if ((slp->sl_show_result & SHOW_PROBE_RES) != 0) { device_printf(slp->sl_dev, "scsi_low: target %d (host card)\n", target); } continue; } if ((slp->sl_show_result & SHOW_PROBE_RES) != 0) { device_printf(slp->sl_dev, "scsi_low: target %d lun ", target); } ti = slp->sl_ti[target]; for (lun = 0; lun < slp->sl_nluns; lun ++) { if ((cb = SCSI_LOW_ALLOC_CCB(1)) == NULL) break; cb->osdep = NULL; cb->bp = NULL; li = scsi_low_alloc_li(ti, lun, 1); scsi_low_enqueue(slp, ti, li, cb, CCB_AUTOSENSE | CCB_POLLED, 0); scsi_low_poll(slp, cb); if (li->li_state != SCSI_LOW_LUN_OK) break; if ((slp->sl_show_result & SHOW_PROBE_RES) != 0) { printf("%d ", lun); } } if ((slp->sl_show_result & SHOW_PROBE_RES) != 0) { printf("\n"); } } return 0; } static int scsi_low_poll(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { int tcount; tcount = 0; while (slp->sl_nio > 0) { DELAY((1000 * 1000) / SCSI_LOW_POLL_HZ); (*slp->sl_funcs->scsi_low_poll) (slp); if (tcount ++ < SCSI_LOW_POLL_HZ / SCSI_LOW_TIMEOUT_HZ) continue; tcount = 0; scsi_low_timeout_check(slp); } return 0; } #endif /* SCSI_LOW_START_UP_CHECK */ /********************************************************** * DEBUG SECTION **********************************************************/ #ifdef SCSI_LOW_DEBUG static void scsi_low_test_abort(slp, ti, li) struct scsi_low_softc *slp; struct targ_info *ti; struct lun_info *li; { struct slccb *acb; if (li->li_disc > 1) { acb = TAILQ_FIRST(&li->li_discq); if (scsi_low_abort_ccb(slp, acb) == 0) { device_printf(slp->sl_dev, "aborting ccb(0x%lx) start\n", (u_long) acb); } } } static void scsi_low_test_atten(slp, ti, msg) struct scsi_low_softc *slp; struct targ_info *ti; u_int msg; { if (slp->sl_ph_count < SCSI_LOW_MAX_ATTEN_CHECK) scsi_low_assert_msg(slp, ti, msg, 0); else device_printf(slp->sl_dev, "atten check OK\n"); } static void scsi_low_test_cmdlnk(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { #define SCSI_LOW_CMDLNK_NOK (CCB_INTERNAL | CCB_SENSE | CCB_CLEARQ) if ((cb->ccb_flags & SCSI_LOW_CMDLNK_NOK) != 0) return; memcpy(cb->ccb_scsi_cmd, slp->sl_scp.scp_cmd, slp->sl_scp.scp_cmdlen); cb->ccb_scsi_cmd[slp->sl_scp.scp_cmdlen - 1] |= 1; slp->sl_scp.scp_cmd = cb->ccb_scsi_cmd; } #endif /* SCSI_LOW_DEBUG */ /* static */ void scsi_low_info(slp, ti, s) struct scsi_low_softc *slp; struct targ_info *ti; u_char *s; { if (slp == NULL) slp = LIST_FIRST(&sl_tab); if (s == NULL) s = "no message"; printf(">>>>> SCSI_LOW_INFO(0x%lx): %s\n", (u_long) slp->sl_Tnexus, s); if (ti == NULL) { TAILQ_FOREACH(ti, &slp->sl_titab, ti_chain) { scsi_low_print(slp, ti); } } else { scsi_low_print(slp, ti); } } static u_char *phase[] = { "FREE", "ARBSTART", "SELSTART", "SELECTED", "CMDOUT", "DATA", "MSGIN", "MSGOUT", "STATIN", "DISC", "RESEL" }; void scsi_low_print(slp, ti) struct scsi_low_softc *slp; struct targ_info *ti; { struct lun_info *li; struct slccb *cb; struct sc_p *sp; if (ti == NULL || ti == slp->sl_Tnexus) { ti = slp->sl_Tnexus; li = slp->sl_Lnexus; cb = slp->sl_Qnexus; } else { li = LIST_FIRST(&ti->ti_litab); cb = TAILQ_FIRST(&li->li_discq); } sp = &slp->sl_scp; device_printf(slp->sl_dev, "=== NEXUS T(0x%lx) L(0x%lx) Q(0x%lx) NIO(%d) ===\n", (u_long) ti, (u_long) li, (u_long) cb, slp->sl_nio); /* target stat */ if (ti != NULL) { u_int flags = 0, maxnqio = 0, nqio = 0; int lun = CAM_LUN_WILDCARD; if (li != NULL) { lun = li->li_lun; flags = li->li_flags; maxnqio = li->li_maxnqio; nqio = li->li_nqio; } device_printf(slp->sl_dev, "(%d:%d) ph<%s> => ph<%s> DISC(%d) QIO(%d:%d)\n", ti->ti_id, lun, phase[(int) ti->ti_ophase], phase[(int) ti->ti_phase], ti->ti_disc, nqio, maxnqio); if (cb != NULL) { printf("CCB: cmd[0] 0x%x clen 0x%x dlen 0x%x<0x%x stat 0x%x err %b\n", (u_int) cb->ccb_scp.scp_cmd[0], cb->ccb_scp.scp_cmdlen, cb->ccb_datalen, cb->ccb_scp.scp_datalen, (u_int) cb->ccb_sscp.scp_status, cb->ccb_error, SCSI_LOW_ERRORBITS); } printf("MSGIN: ptr(%x) [%x][%x][%x][%x][%x] attention: %d\n", (u_int) (ti->ti_msginptr), (u_int) (ti->ti_msgin[0]), (u_int) (ti->ti_msgin[1]), (u_int) (ti->ti_msgin[2]), (u_int) (ti->ti_msgin[3]), (u_int) (ti->ti_msgin[4]), slp->sl_atten); printf("MSGOUT: msgflags 0x%x [%x][%x][%x][%x][%x] msgoutlen %d C_FLAGS: %b\n", (u_int) ti->ti_msgflags, (u_int) (ti->ti_msgoutstr[0]), (u_int) (ti->ti_msgoutstr[1]), (u_int) (ti->ti_msgoutstr[2]), (u_int) (ti->ti_msgoutstr[3]), (u_int) (ti->ti_msgoutstr[4]), ti->ti_msgoutlen, flags, SCSI_LOW_BITS); #ifdef SCSI_LOW_DIAGNOSTIC scsi_low_msg_log_show(&ti->ti_log_msgin, "MIN LOG ", 2); scsi_low_msg_log_show(&ti->ti_log_msgout, "MOUT LOG", 2); #endif /* SCSI_LOW_DIAGNOSTIC */ } printf("SCB: daddr 0x%lx dlen 0x%x stat 0x%x err %b\n", (u_long) sp->scp_data, sp->scp_datalen, (u_int) sp->scp_status, slp->sl_error, SCSI_LOW_ERRORBITS); } Index: head/sys/cam/scsi/scsi_low.h =================================================================== --- head/sys/cam/scsi/scsi_low.h (revision 326264) +++ head/sys/cam/scsi/scsi_low.h (revision 326265) @@ -1,791 +1,793 @@ /* $FreeBSD$ */ /* $NecBSD: scsi_low.h,v 1.24.10.5 2001/06/26 07:31:46 honda Exp $ */ /* $NetBSD$ */ #define SCSI_LOW_DIAGNOSTIC #define SCSI_LOW_ALT_QTAG_ALLOCATE /*- + * SPDX-License-Identifier: BSD-3-Clause + * * [NetBSD for NEC PC-98 series] * Copyright (c) 1995, 1996, 1997, 1998, 1999, 2000, 2001 * NetBSD/pc98 porting staff. All rights reserved. * Copyright (c) 1995, 1996, 1997, 1998, 1999, 2000, 2001 * Naofumi HONDA. All rights reserved. * * [Ported for FreeBSD CAM] * Copyright (c) 2000, 2001 * MITSUNAGA Noriaki, NOKUBI Hirotaka and TAKAHASHI Yoshihiro. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _SCSI_LOW_H_ #define _SCSI_LOW_H_ /******** includes *******************************/ #include #include #include #include #include #include #include #include #include /******** functions macro ************************/ #undef MSG_IDENTIFY /*================================================ * Generic Scsi Low header file ================================================*/ /************************************************* * Scsi low definitions *************************************************/ #define SCSI_LOW_SYNC DVF_SCSI_SYNC #define SCSI_LOW_DISC DVF_SCSI_DISC #define SCSI_LOW_WAIT DVF_SCSI_WAIT #define SCSI_LOW_LINK DVF_SCSI_LINK #define SCSI_LOW_QTAG DVF_SCSI_QTAG #define SCSI_LOW_NOPARITY DVF_SCSI_NOPARITY #define SCSI_LOW_SAVESP DVF_SCSI_SAVESP #define SCSI_LOW_DEFCFG DVF_SCSI_DEFCFG #define SCSI_LOW_BITS DVF_SCSI_BITS #define SCSI_LOW_PERIOD(n) DVF_SCSI_PERIOD(n) #define SCSI_LOW_OFFSET(n) DVF_SCSI_OFFSET(n) /* host scsi id and targets macro */ #ifndef SCSI_LOW_NTARGETS #define SCSI_LOW_NTARGETS 8 #endif /* SCSI_LOW_NTARGETS */ #define SCSI_LOW_NCCB 128 #define SCSI_LOW_MAX_RETRY 3 #define SCSI_LOW_MAX_SELECTION_RETRY 10 /* timeout control macro */ #define SCSI_LOW_TIMEOUT_HZ 10 #define SCSI_LOW_MIN_TOUT 12 #define SCSI_LOW_TIMEOUT_CHECK_INTERVAL 1 #define SCSI_LOW_POWDOWN_TC 15 #define SCSI_LOW_MAX_PHCHANGES 256 #define SCSI2_RESET_DELAY 5000000 /* msg */ #define SCSI_LOW_MAX_MSGLEN 32 #define SCSI_LOW_MSG_LOG_DATALEN 8 /************************************************* * Scsi Data Pointer *************************************************/ /* scsi pointer */ struct sc_p { u_int8_t *scp_data; int scp_datalen; u_int8_t *scp_cmd; int scp_cmdlen; u_int8_t scp_direction; #define SCSI_LOW_RWUNK (-1) #define SCSI_LOW_WRITE 0 #define SCSI_LOW_READ 1 u_int8_t scp_status; u_int8_t scp_spare[2]; }; /************************************************* * Command Control Block Structure *************************************************/ typedef int scsi_low_tag_t; struct targ_info; #define SCSI_LOW_UNKLUN ((u_int) -1) #define SCSI_LOW_UNKTAG ((scsi_low_tag_t) -1) struct slccb { TAILQ_ENTRY(slccb) ccb_chain; void *osdep; /* os depend structure */ struct targ_info *ti; /* targ_info */ struct lun_info *li; /* lun info */ struct buf *bp; /* io bufs */ scsi_low_tag_t ccb_tag; /* effective qtag */ scsi_low_tag_t ccb_otag; /* allocated qtag */ /***************************************** * Scsi data pointers (original and saved) *****************************************/ struct sc_p ccb_scp; /* given */ struct sc_p ccb_sscp; /* saved scsi data pointer */ int ccb_datalen; /* transferred data counter */ /***************************************** * Msgout *****************************************/ u_int ccb_msgoutflag; u_int ccb_omsgoutflag; /***************************************** * Error or Timeout counters *****************************************/ u_int ccb_flags; #define CCB_INTERNAL 0x0001 #define CCB_SENSE 0x0002 #define CCB_CLEARQ 0x0004 #define CCB_DISCQ 0x0008 #define CCB_STARTQ 0x0010 #define CCB_POLLED 0x0100 /* polling ccb */ #define CCB_NORETRY 0x0200 /* do NOT retry */ #define CCB_AUTOSENSE 0x0400 /* do a sense after CA */ #define CCB_URGENT 0x0800 /* an urgent ccb */ #define CCB_NOSDONE 0x1000 /* do not call an os done routine */ #define CCB_SCSIIO 0x2000 /* a normal scsi io coming from upper layer */ #define CCB_SILENT 0x4000 /* no terminate messages */ u_int ccb_error; int ccb_rcnt; /* retry counter */ int ccb_selrcnt; /* selection retry counter */ int ccb_tc; /* timer counter */ int ccb_tcmax; /* max timeout */ /***************************************** * Sense data buffer *****************************************/ u_int8_t ccb_scsi_cmd[12]; struct scsi_sense_data ccb_sense; }; /************************************************* * Slccb functions *************************************************/ GENERIC_CCB_ASSERT(scsi_low, slccb) /************************************************* * Target and Lun structures *************************************************/ struct scsi_low_softc; LIST_HEAD(scsi_low_softc_tab, scsi_low_softc); TAILQ_HEAD(targ_info_tab, targ_info); LIST_HEAD(lun_info_tab, lun_info); struct lun_info { int li_lun; struct targ_info *li_ti; /* my target */ LIST_ENTRY(lun_info) lun_chain; /* targ_info link */ struct slccbtab li_discq; /* disconnect queue */ /* * qtag control */ int li_maxnexus; int li_maxnqio; int li_nqio; int li_disc; #define SCSI_LOW_MAXNEXUS (sizeof(u_int) * NBBY) u_int li_qtagbits; #ifdef SCSI_LOW_ALT_QTAG_ALLOCATE u_int8_t li_qtagarray[SCSI_LOW_MAXNEXUS]; u_int li_qd; #endif /* SCSI_LOW_ALT_QTAG_ALLOCATE */ #define SCSI_LOW_QFLAG_CA_QCLEAR 0x01 u_int li_qflags; /* * lun state */ #define SCSI_LOW_LUN_SLEEP 0x00 #define SCSI_LOW_LUN_START 0x01 #define SCSI_LOW_LUN_INQ 0x02 #define SCSI_LOW_LUN_MODEQ 0x03 #define SCSI_LOW_LUN_OK 0x04 u_int li_state; /* target lun state */ /* * lun control flags */ u_int li_flags_valid; /* valid flags */ #define SCSI_LOW_LUN_FLAGS_USER_VALID 0x0001 #define SCSI_LOW_LUN_FLAGS_DISK_VALID 0x0002 #define SCSI_LOW_LUN_FLAGS_QUIRKS_VALID 0x0004 #define SCSI_LOW_LUN_FLAGS_ALL_VALID \ (SCSI_LOW_LUN_FLAGS_USER_VALID | \ SCSI_LOW_LUN_FLAGS_DISK_VALID | SCSI_LOW_LUN_FLAGS_QUIRKS_VALID) u_int li_flags; /* real lun control flags */ u_int li_cfgflags; /* lun control flags given by user */ u_int li_diskflags; /* lun control flags given by hardware info */ u_int li_quirks; /* lun control flags given by upper layer */ /* inq buffer */ struct scsi_low_inq_data { u_int8_t sd_type; u_int8_t sd_sp1; u_int8_t sd_version; u_int8_t sd_resp; u_int8_t sd_len; u_int8_t sd_sp2[2]; u_int8_t sd_support; } __packed li_inq; /* modeq buffer */ struct scsi_low_mode_sense_data { u_int8_t sms_header[4]; struct { u_int8_t cmp_page; u_int8_t cmp_length; u_int8_t cmp_rlec; u_int8_t cmp_qc; u_int8_t cmp_eca; u_int8_t cmp_spare[3]; } __packed sms_cmp; } li_sms; }; struct scsi_low_msg_log { int slml_ptr; struct { u_int8_t msg[2]; } slml_msg[SCSI_LOW_MSG_LOG_DATALEN]; }; struct targ_info { TAILQ_ENTRY(targ_info) ti_chain; /* targ_info link */ struct scsi_low_softc *ti_sc; /* our softc */ u_int ti_id; /* scsi id */ /* * Lun chain */ struct lun_info_tab ti_litab; /* lun chain */ /* * total disconnected nexus */ int ti_disc; /* * Scsi phase control */ #define PH_NULL 0x00 #define PH_ARBSTART 0x01 #define PH_SELSTART 0x02 #define PH_SELECTED 0x03 #define PH_CMD 0x04 #define PH_DATA 0x05 #define PH_MSGIN 0x06 #define PH_MSGOUT 0x07 #define PH_STAT 0x08 #define PH_DISC 0x09 #define PH_RESEL 0x0a u_int ti_phase; /* scsi phase */ u_int ti_ophase; /* old scsi phase */ /* * Msg in */ u_int ti_msginptr; /* msgin ptr */ u_int ti_msginlen; /* expected msg length */ int ti_msgin_parity_error; /* parity error detected */ u_int8_t ti_msgin[SCSI_LOW_MAX_MSGLEN]; /* msgin buffer */ /* * Msg out */ u_int ti_msgflags; /* msgs to be asserted */ u_int ti_omsgflags; /* msgs asserted */ u_int ti_emsgflags; /* a msg currently asserted */ #define SCSI_LOW_MSG_RESET 0x00000001 #define SCSI_LOW_MSG_REJECT 0x00000002 #define SCSI_LOW_MSG_PARITY 0x00000004 #define SCSI_LOW_MSG_ERROR 0x00000008 #define SCSI_LOW_MSG_IDENTIFY 0x00000010 #define SCSI_LOW_MSG_ABORT 0x00000020 #define SCSI_LOW_MSG_TERMIO 0x00000040 #define SCSI_LOW_MSG_SIMPLE_QTAG 0x00000080 #define SCSI_LOW_MSG_ORDERED_QTAG 0x00000100 #define SCSI_LOW_MSG_HEAD_QTAG 0x00000200 #define SCSI_LOW_MSG_ABORT_QTAG 0x00000400 #define SCSI_LOW_MSG_CLEAR_QTAG 0x00000800 #define SCSI_LOW_MSG_WIDE 0x00001000 #define SCSI_LOW_MSG_SYNCH 0x00002000 #define SCSI_LOW_MSG_NOOP 0x00004000 #define SCSI_LOW_MSG_LAST 0x00008000 #define SCSI_LOW_MSG_ALL 0xffffffff /* msgout buffer */ u_int8_t ti_msgoutstr[SCSI_LOW_MAX_MSGLEN]; /* scsi msgout */ u_int ti_msgoutlen; /* msgout strlen */ /* * target initialize msgout */ u_int ti_setup_msg; /* setup msgout requests */ u_int ti_setup_msg_done; /* * synch and wide data info */ u_int ti_flags_valid; /* valid flags */ #define SCSI_LOW_TARG_FLAGS_USER_VALID 0x0001 #define SCSI_LOW_TARG_FLAGS_DISK_VALID 0x0002 #define SCSI_LOW_TARG_FLAGS_QUIRKS_VALID 0x0004 #define SCSI_LOW_TARG_FLAGS_ALL_VALID \ (SCSI_LOW_TARG_FLAGS_USER_VALID | \ SCSI_LOW_TARG_FLAGS_DISK_VALID | SCSI_LOW_TARG_FLAGS_QUIRKS_VALID) u_int ti_diskflags; /* given target disk flags */ u_int ti_quirks; /* given target quirk */ struct synch { u_int8_t offset; u_int8_t period; } ti_osynch, ti_maxsynch; /* synch data */ #define SCSI_LOW_BUS_WIDTH_8 0 #define SCSI_LOW_BUS_WIDTH_16 1 #define SCSI_LOW_BUS_WIDTH_32 2 u_int ti_owidth, ti_width; /* * lun info size. */ int ti_lunsize; #ifdef SCSI_LOW_DIAGNOSTIC struct scsi_low_msg_log ti_log_msgout; struct scsi_low_msg_log ti_log_msgin; #endif /* SCSI_LOW_DIAGNOSTIC */ }; /************************************************* * COMMON HEADER STRUCTURE *************************************************/ struct scsi_low_softc; struct proc; typedef struct scsi_low_softc *sc_low_t; #define SCSI_LOW_START_OK 0 #define SCSI_LOW_START_FAIL 1 #define SCSI_LOW_INFO_ALLOC 0 #define SCSI_LOW_INFO_REVOKE 1 #define SCSI_LOW_INFO_DEALLOC 2 #define SCSI_LOW_POWDOWN 1 #define SCSI_LOW_ENGAGE 2 #define SC_LOW_INIT_T (int (*)(sc_low_t, int)) #define SC_LOW_BUSRST_T (void (*)(sc_low_t)) #define SC_LOW_TARG_INIT_T (int (*)(sc_low_t, struct targ_info *, int)) #define SC_LOW_LUN_INIT_T (int (*)(sc_low_t, struct targ_info *, struct lun_info *, int)) #define SC_LOW_SELECT_T (int (*)(sc_low_t, struct slccb *)) #define SC_LOW_ATTEN_T (void (*)(sc_low_t)) #define SC_LOW_NEXUS_T (int (*)(sc_low_t)) #define SC_LOW_MSG_T (int (*)(sc_low_t, struct targ_info *, u_int)) #define SC_LOW_POLL_T (int (*)(void *)) #define SC_LOW_POWER_T (int (*)(sc_low_t, u_int)) #define SC_LOW_TIMEOUT_T (int (*)(sc_low_t)) struct scsi_low_funcs { int (*scsi_low_init)(sc_low_t, int); void (*scsi_low_bus_reset)(sc_low_t); int (*scsi_low_targ_init)(sc_low_t, struct targ_info *, int); int (*scsi_low_lun_init)(sc_low_t, struct targ_info *, struct lun_info *, int); int (*scsi_low_start_bus)(sc_low_t, struct slccb *); int (*scsi_low_establish_lun_nexus)(sc_low_t); int (*scsi_low_establish_ccb_nexus)(sc_low_t); void (*scsi_low_attention)(sc_low_t); int (*scsi_low_msg)(sc_low_t, struct targ_info *, u_int); int (*scsi_low_timeout)(sc_low_t); int (*scsi_low_poll)(void *); int (*scsi_low_power)(sc_low_t, u_int); int (*scsi_low_ioctl)(sc_low_t, u_long, caddr_t, int, struct proc *); }; struct scsi_low_softc { device_t sl_dev; struct cam_sim *sl_sim; struct cam_path *sl_path; int sl_poll_count; struct mtx sl_lock; struct callout sl_engage_timer; struct callout sl_timeout_timer; #ifdef SCSI_LOW_POWFUNC struct callout sl_recover_timer; #endif /* our chain */ LIST_ENTRY(scsi_low_softc) sl_chain; /* my targets */ struct targ_info *sl_ti[SCSI_LOW_NTARGETS]; struct targ_info_tab sl_titab; /* current active T_L_Q nexus */ struct targ_info *sl_Tnexus; /* Target nexus */ struct lun_info *sl_Lnexus; /* Lun nexus */ struct slccb *sl_Qnexus; /* Qtag nexus */ int sl_nexus_call; /* ccb start queue */ struct slccbtab sl_start; /* retry limit and phase change counter */ int sl_max_retry; int sl_ph_count; int sl_timeout_count; /* selection & total num disconnect targets */ int sl_nio; int sl_disc; int sl_retry_sel; struct slccb *sl_selid; /* attention */ int sl_atten; /* ATN asserted */ int sl_clear_atten; /* negate ATN required */ /* scsi phase suggested by scsi msg */ u_int sl_msgphase; #define MSGPH_NULL 0x00 /* no msg */ #define MSGPH_DISC 0x01 /* disconnect msg */ #define MSGPH_CMDC 0x02 /* cmd complete msg */ #define MSGPH_ABORT 0x03 /* abort seq */ #define MSGPH_TERM 0x04 /* current io terminate */ #define MSGPH_LCTERM 0x05 /* cmd link terminated */ #define MSGPH_RESET 0x06 /* reset target */ /* error */ u_int sl_error; /* error flags */ #define FATALIO 0x0001 /* generic io error & retry io */ #define ABORTIO 0x0002 /* generic io error & terminate io */ #define TIMEOUTIO 0x0004 /* watch dog timeout */ #define SELTIMEOUTIO 0x0008 /* selection timeout */ #define PDMAERR 0x0010 /* dma xfer error */ #define MSGERR 0x0020 /* msgsys error */ #define PARITYERR 0x0040 /* parity error */ #define BUSYERR 0x0080 /* target busy error */ #define STATERR 0x0100 /* status error */ #define UACAERR 0x0200 /* target CA state, no sense check */ #define SENSEIO 0x1000 /* cmd not excuted but sense data ok */ #define SENSEERR 0x2000 /* cmd not excuted and sense data bad */ #define UBFERR 0x4000 /* unexpected bus free */ #define PENDINGIO 0x8000 /* ccb start not yet */ #define SCSI_LOW_ERRORBITS "\020\017ubferr\016senseerr\015senseio\012uacaerr\011staterr\010busy\007parity\006msgerr\005pdmaerr\004seltimeout\003timeout\002abort\001fatal" /* current scsi data pointer */ struct sc_p sl_scp; /* power control */ u_int sl_active; /* host is busy state */ int sl_powc; /* power down timer counter */ u_int sl_rstep; /* resume step */ /* configuration flags */ u_int sl_flags; #define HW_POWDOWN 0x0001 #define HW_RESUME 0x0002 #define HW_PDMASTART 0x0004 #define HW_INACTIVE 0x0008 #define HW_POWERCTRL 0x0010 #define HW_INITIALIZING 0x0020 #define HW_READ_PADDING 0x1000 #define HW_WRITE_PADDING 0x2000 u_int sl_cfgflags; #define CFG_NODISC 0x0001 #define CFG_NOPARITY 0x0002 #define CFG_NOATTEN 0x0004 #define CFG_ASYNC 0x0008 #define CFG_NOQTAG 0x0010 int sl_show_result; #define SHOW_SYNCH_NEG 0x0001 #define SHOW_WIDE_NEG 0x0002 #define SHOW_CALCF_RES 0x0010 #define SHOW_PROBE_RES 0x0020 #define SHOW_ALL_NEG -1 /* host informations */ u_int sl_hostid; int sl_nluns; int sl_ntargs; int sl_openings; /* interface functions */ struct scsi_low_funcs *sl_funcs; /* targinfo size */ int sl_targsize; }; #define SCSI_LOW_LOCK(sl) mtx_lock(&(sl)->sl_lock) #define SCSI_LOW_UNLOCK(sl) mtx_unlock(&(sl)->sl_lock) #define SCSI_LOW_ASSERT_LOCKED(sl) mtx_assert(&(sl)->sl_lock, MA_OWNED) /************************************************* * SCSI LOW service functions *************************************************/ /* * Scsi low attachment function. */ int scsi_low_attach(struct scsi_low_softc *, int, int, int, int, int); int scsi_low_detach(struct scsi_low_softc *); /* * Scsi low interface activate or deactivate functions */ int scsi_low_is_busy(struct scsi_low_softc *); int scsi_low_activate(struct scsi_low_softc *); int scsi_low_deactivate(struct scsi_low_softc *); /* * Scsi phase "bus service" functions. * These functions are corresponding to each scsi bus phaeses. */ /* bus idle phase (other initiators or targets release bus) */ void scsi_low_bus_idle(struct scsi_low_softc *); /* arbitration and selection phase */ void scsi_low_arbit_fail(struct scsi_low_softc *, struct slccb *); static __inline void scsi_low_arbit_win(struct scsi_low_softc *); /* msgout phase */ #define SCSI_LOW_MSGOUT_INIT 0x00000001 #define SCSI_LOW_MSGOUT_UNIFY 0x00000002 int scsi_low_msgout(struct scsi_low_softc *, struct targ_info *, u_int); /* msgin phase */ #define SCSI_LOW_DATA_PE 0x80000000 int scsi_low_msgin(struct scsi_low_softc *, struct targ_info *, u_int); /* statusin phase */ static __inline int scsi_low_statusin(struct scsi_low_softc *, struct targ_info *, u_int); /* data phase */ int scsi_low_data(struct scsi_low_softc *, struct targ_info *, struct buf **, int); static __inline void scsi_low_data_finish(struct scsi_low_softc *); /* cmd phase */ int scsi_low_cmd(struct scsi_low_softc *, struct targ_info *); /* reselection phase */ struct targ_info *scsi_low_reselected(struct scsi_low_softc *, u_int); /* disconnection phase */ int scsi_low_disconnected(struct scsi_low_softc *, struct targ_info *); /* * Scsi bus restart function. * Canncel all established nexuses => scsi system initialized => restart jobs. */ #define SCSI_LOW_RESTART_HARD 1 #define SCSI_LOW_RESTART_SOFT 0 int scsi_low_restart(struct scsi_low_softc *, int, u_char *); /* * Scsi utility fucntions */ /* print current status */ void scsi_low_print(struct scsi_low_softc *, struct targ_info *); /* bus reset utility */ void scsi_low_bus_reset(struct scsi_low_softc *); /************************************************* * Message macro defs *************************************************/ #define SCSI_LOW_SETUP_PHASE(ti, phase) \ { \ (ti)->ti_ophase = ti->ti_phase; \ (ti)->ti_phase = (phase); \ } #define SCSI_LOW_SETUP_MSGPHASE(slp, PHASE) \ { \ (slp)->sl_msgphase = (PHASE); \ } #define SCSI_LOW_ASSERT_ATN(slp) \ { \ (slp)->sl_atten = 1; \ } #define SCSI_LOW_DEASSERT_ATN(slp) \ { \ (slp)->sl_atten = 0; \ } /************************************************* * Inline functions *************************************************/ static __inline void scsi_low_attention(struct scsi_low_softc *); static __inline int scsi_low_is_msgout_continue(struct targ_info *, u_int); static __inline int scsi_low_assert_msg(struct scsi_low_softc *, struct targ_info *, u_int, int); static __inline int scsi_low_is_disconnect_ok(struct slccb *); static __inline int scsi_low_is_msgout_continue(ti, mask) struct targ_info *ti; u_int mask; { return ((ti->ti_msgflags & (~mask)) != 0); } static __inline int scsi_low_is_disconnect_ok(cb) struct slccb *cb; { return ((cb->li->li_flags & SCSI_LOW_DISC) != 0 && (cb->ccb_flags & (CCB_SENSE | CCB_CLEARQ)) == 0); } static __inline void scsi_low_attention(slp) struct scsi_low_softc *slp; { if (slp->sl_atten != 0) return; (*slp->sl_funcs->scsi_low_attention) (slp); SCSI_LOW_ASSERT_ATN(slp); } static __inline int scsi_low_assert_msg(slp, ti, msg, now) struct scsi_low_softc *slp; struct targ_info *ti; u_int msg; int now; { ti->ti_msgflags |= msg; if (now != 0) scsi_low_attention(slp); return 0; } static __inline void scsi_low_arbit_win(slp) struct scsi_low_softc *slp; { slp->sl_selid = NULL; } static __inline void scsi_low_data_finish(slp) struct scsi_low_softc *slp; { if (slp->sl_Qnexus != NULL) { slp->sl_Qnexus->ccb_datalen = slp->sl_scp.scp_datalen; } } static __inline int scsi_low_statusin(slp, ti, c) struct scsi_low_softc *slp; struct targ_info *ti; u_int c; { slp->sl_ph_count ++; if ((c & SCSI_LOW_DATA_PE) != 0) { scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ERROR, 0); return EIO; } slp->sl_scp.scp_status = (u_int8_t) c; return 0; } /************************************************* * Message out defs *************************************************/ /* XXX: use scsi_message.h */ #define ST_GOOD 0x00 #define ST_CHKCOND 0x02 #define ST_MET 0x04 #define ST_BUSY 0x08 #define ST_INTERGOOD 0x10 #define ST_INTERMET 0x14 #define ST_CONFLICT 0x18 #define ST_CMDTERM 0x22 #define ST_QUEFULL 0x28 #define ST_UNKNOWN 0xff #define MSG_COMP 0x00 #define MSG_EXTEND 0x01 #define MKMSG_EXTEND(XLEN, XCODE) ((((u_int)(XLEN)) << NBBY) | ((u_int)(XCODE))) #define MSG_EXTEND_MDPCODE 0x00 #define MSG_EXTEND_MDPLEN 0x05 #define MSG_EXTEND_SYNCHCODE 0x01 #define MSG_EXTEND_SYNCHLEN 0x03 #define MSG_EXTEND_WIDECODE 0x03 #define MSG_EXTEND_WIDELEN 0x02 #define MSG_SAVESP 0x02 #define MSG_RESTORESP 0x03 #define MSG_DISCON 0x04 #define MSG_I_ERROR 0x05 #define MSG_ABORT 0x06 #define MSG_REJECT 0x07 #define MSG_NOOP 0x08 #define MSG_PARITY 0x09 #define MSG_LCOMP 0x0a #define MSG_LCOMP_F 0x0b #define MSG_RESET 0x0c #define MSG_ABORT_QTAG 0x0d #define MSG_CLEAR_QTAG 0x0e #define MSG_TERM_IO 0x11 #define MSG_SIMPLE_QTAG 0x20 #define MSG_HEAD_QTAG 0x21 #define MSG_ORDERED_QTAG 0x22 #define MSG_IDENTIFY 0x80 #define MSG_IDENTIFY_DISCPRIV 0x40 #endif /* !_SCSI_LOW_H_ */ Index: head/sys/cam/scsi/scsi_pass.c =================================================================== --- head/sys/cam/scsi/scsi_pass.c (revision 326264) +++ head/sys/cam/scsi/scsi_pass.c (revision 326265) @@ -1,2280 +1,2282 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { PASS_FLAG_OPEN = 0x01, PASS_FLAG_LOCKED = 0x02, PASS_FLAG_INVALID = 0x04, PASS_FLAG_INITIAL_PHYSPATH = 0x08, PASS_FLAG_ZONE_INPROG = 0x10, PASS_FLAG_ZONE_VALID = 0x20, PASS_FLAG_UNMAPPED_CAPABLE = 0x40, PASS_FLAG_ABANDONED_REF_SET = 0x80 } pass_flags; typedef enum { PASS_STATE_NORMAL } pass_state; typedef enum { PASS_CCB_BUFFER_IO, PASS_CCB_QUEUED_IO } pass_ccb_types; #define ccb_type ppriv_field0 #define ccb_ioreq ppriv_ptr1 /* * The maximum number of memory segments we preallocate. */ #define PASS_MAX_SEGS 16 typedef enum { PASS_IO_NONE = 0x00, PASS_IO_USER_SEG_MALLOC = 0x01, PASS_IO_KERN_SEG_MALLOC = 0x02, PASS_IO_ABANDONED = 0x04 } pass_io_flags; struct pass_io_req { union ccb ccb; union ccb *alloced_ccb; union ccb *user_ccb_ptr; camq_entry user_periph_links; ccb_ppriv_area user_periph_priv; struct cam_periph_map_info mapinfo; pass_io_flags flags; ccb_flags data_flags; int num_user_segs; bus_dma_segment_t user_segs[PASS_MAX_SEGS]; int num_kern_segs; bus_dma_segment_t kern_segs[PASS_MAX_SEGS]; bus_dma_segment_t *user_segptr; bus_dma_segment_t *kern_segptr; int num_bufs; uint32_t dirs[CAM_PERIPH_MAXMAPS]; uint32_t lengths[CAM_PERIPH_MAXMAPS]; uint8_t *user_bufs[CAM_PERIPH_MAXMAPS]; uint8_t *kern_bufs[CAM_PERIPH_MAXMAPS]; struct bintime start_time; TAILQ_ENTRY(pass_io_req) links; }; struct pass_softc { pass_state state; pass_flags flags; u_int8_t pd_type; union ccb saved_ccb; int open_count; u_int maxio; struct devstat *device_stats; struct cdev *dev; struct cdev *alias_dev; struct task add_physpath_task; struct task shutdown_kqueue_task; struct selinfo read_select; TAILQ_HEAD(, pass_io_req) incoming_queue; TAILQ_HEAD(, pass_io_req) active_queue; TAILQ_HEAD(, pass_io_req) abandoned_queue; TAILQ_HEAD(, pass_io_req) done_queue; struct cam_periph *periph; char zone_name[12]; char io_zone_name[12]; uma_zone_t pass_zone; uma_zone_t pass_io_zone; size_t io_zone_size; }; static d_open_t passopen; static d_close_t passclose; static d_ioctl_t passioctl; static d_ioctl_t passdoioctl; static d_poll_t passpoll; static d_kqfilter_t passkqfilter; static void passreadfiltdetach(struct knote *kn); static int passreadfilt(struct knote *kn, long hint); static periph_init_t passinit; static periph_ctor_t passregister; static periph_oninv_t passoninvalidate; static periph_dtor_t passcleanup; static periph_start_t passstart; static void pass_shutdown_kqueue(void *context, int pending); static void pass_add_physpath(void *context, int pending); static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void passdone(struct cam_periph *periph, union ccb *done_ccb); static int passcreatezone(struct cam_periph *periph); static void passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req); static int passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req, ccb_flags direction); static int passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req); static int passmemdone(struct cam_periph *periph, struct pass_io_req *io_req); static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb); static struct periph_driver passdriver = { passinit, "pass", TAILQ_HEAD_INITIALIZER(passdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(pass, passdriver); static struct cdevsw pass_cdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_open = passopen, .d_close = passclose, .d_ioctl = passioctl, .d_poll = passpoll, .d_kqfilter = passkqfilter, .d_name = "pass", }; static struct filterops passread_filtops = { .f_isfd = 1, .f_detach = passreadfiltdetach, .f_event = passreadfilt }; static MALLOC_DEFINE(M_SCSIPASS, "scsi_pass", "scsi passthrough buffers"); static void passinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, passasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("pass: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void passrejectios(struct cam_periph *periph) { struct pass_io_req *io_req, *io_req2; struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; /* * The user can no longer get status for I/O on the done queue, so * clean up all outstanding I/O on the done queue. */ TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) { TAILQ_REMOVE(&softc->done_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * The underlying device is gone, so we can't issue these I/Os. * The devfs node has been shut down, so we can't return status to * the user. Free any I/O left on the incoming queue. */ TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) { TAILQ_REMOVE(&softc->incoming_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * Normally we would put I/Os on the abandoned queue and acquire a * reference when we saw the final close. But, the device went * away and devfs may have moved everything off to deadfs by the * time the I/O done callback is called; as a result, we won't see * any more closes. So, if we have any active I/Os, we need to put * them on the abandoned queue. When the abandoned queue is empty, * we'll release the remaining reference (see below) to the peripheral. */ TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) { TAILQ_REMOVE(&softc->active_queue, io_req, links); io_req->flags |= PASS_IO_ABANDONED; TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links); } /* * If we put any I/O on the abandoned queue, acquire a reference. */ if ((!TAILQ_EMPTY(&softc->abandoned_queue)) && ((softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0)) { cam_periph_doacquire(periph); softc->flags |= PASS_FLAG_ABANDONED_REF_SET; } } static void passdevgonecb(void *arg) { struct cam_periph *periph; struct mtx *mtx; struct pass_softc *softc; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct pass_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for the device node, it is gone now. * Accordingly, inform all queued I/Os of their fate. */ cam_periph_release_locked(periph); passrejectios(periph); /* * We reference the SIM lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); /* * We have to remove our kqueue context from a thread because it * may sleep. It would be nice if we could get a callback from * kqueue when it is done cleaning up resources. */ taskqueue_enqueue(taskqueue_thread, &softc->shutdown_kqueue_task); } static void passoninvalidate(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, passasync, periph, periph->path); softc->flags |= PASS_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, passdevgonecb, periph); } static void passcleanup(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); KASSERT(TAILQ_EMPTY(&softc->active_queue), ("%s called when there are commands on the active queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->abandoned_queue), ("%s called when there are commands on the abandoned queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->incoming_queue), ("%s called when there are commands on the incoming queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->done_queue), ("%s called when there are commands on the done queue!\n", __func__)); devstat_remove_entry(softc->device_stats); cam_periph_unlock(periph); /* * We call taskqueue_drain() for the physpath task to make sure it * is complete. We drop the lock because this can potentially * sleep. XXX KDM that is bad. Need a way to get a callback when * a taskqueue is drained. * * Note that we don't drain the kqueue shutdown task queue. This * is because we hold a reference on the periph for kqueue, and * release that reference from the kqueue shutdown task queue. So * we cannot come into this routine unless we've released that * reference. Also, because that could be the last reference, we * could be called from the cam_periph_release() call in * pass_shutdown_kqueue(). In that case, the taskqueue_drain() * would deadlock. It would be preferable if we had a way to * get a callback when a taskqueue is done. */ taskqueue_drain(taskqueue_thread, &softc->add_physpath_task); cam_periph_lock(periph); free(softc, M_DEVBUF); } static void pass_shutdown_kqueue(void *context, int pending) { struct cam_periph *periph; struct pass_softc *softc; periph = context; softc = periph->softc; knlist_clear(&softc->read_select.si_note, /*is_locked*/ 0); knlist_destroy(&softc->read_select.si_note); /* * Release the reference we held for kqueue. */ cam_periph_release(periph); } static void pass_add_physpath(void *context, int pending) { struct cam_periph *periph; struct pass_softc *softc; struct mtx *mtx; char *physpath; /* * If we have one, create a devfs alias for our * physical path. */ periph = context; softc = periph->softc; physpath = malloc(MAXPATHLEN, M_DEVBUF, M_WAITOK); mtx = cam_periph_mtx(periph); mtx_lock(mtx); if (periph->flags & CAM_PERIPH_INVALID) goto out; if (xpt_getattr(physpath, MAXPATHLEN, "GEOM::physpath", periph->path) == 0 && strlen(physpath) != 0) { mtx_unlock(mtx); make_dev_physpath_alias(MAKEDEV_WAITOK, &softc->alias_dev, softc->dev, softc->alias_dev, physpath); mtx_lock(mtx); } out: /* * Now that we've made our alias, we no longer have to have a * reference to the device. */ if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0) softc->flags |= PASS_FLAG_INITIAL_PHYSPATH; /* * We always acquire a reference to the periph before queueing this * task queue function, so it won't go away before we run. */ while (pending-- > 0) cam_periph_release_locked(periph); mtx_unlock(mtx); free(physpath, M_DEVBUF); } static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(passregister, passoninvalidate, passcleanup, passstart, "pass", CAM_PERIPH_BIO, path, passasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("passasync: Unable to attach new device " "due to status %#x: %s\n", status, entry ? entry->status_text : "Unknown"); } break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct pass_softc *softc; cam_status status; softc = (struct pass_softc *)periph->softc; /* * Acquire a reference to the periph before we * start the taskqueue, so that we don't run into * a situation where the periph goes away before * the task queue has a chance to run. */ status = cam_periph_acquire(periph); if (status != CAM_REQ_CMP) break; taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status passregister(struct cam_periph *periph, void *arg) { struct pass_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; int error, no_tags; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("%s: no getdev CCB, can't register device\n", __func__); return(CAM_REQ_CMP_ERR); } softc = (struct pass_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT); if (softc == NULL) { printf("%s: Unable to probe new device. " "Unable to allocate softc\n", __func__); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); softc->state = PASS_STATE_NORMAL; if (cgd->protocol == PROTO_SCSI || cgd->protocol == PROTO_ATAPI) softc->pd_type = SID_TYPE(&cgd->inq_data); else if (cgd->protocol == PROTO_SATAPM) softc->pd_type = T_ENCLOSURE; else softc->pd_type = T_DIRECT; periph->softc = softc; softc->periph = periph; TAILQ_INIT(&softc->incoming_queue); TAILQ_INIT(&softc->active_queue); TAILQ_INIT(&softc->abandoned_queue); TAILQ_INIT(&softc->done_queue); snprintf(softc->zone_name, sizeof(softc->zone_name), "%s%d", periph->periph_name, periph->unit_number); snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO", periph->periph_name, periph->unit_number); softc->io_zone_size = MAXPHYS; knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph)); bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ if (cpi.hba_misc & PIM_UNMAPPED) softc->flags |= PASS_FLAG_UNMAPPED_CAPABLE; /* * We pass in 0 for a blocksize, since we don't * know what the blocksize of this device is, if * it even has a blocksize. */ cam_periph_unlock(periph); no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0; softc->device_stats = devstat_new_entry("pass", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0), softc->pd_type | XPORT_DEVSTAT_TYPE(cpi.transport) | DEVSTAT_TYPE_PASS, DEVSTAT_PRIORITY_PASS); /* * Initialize the taskqueue handler for shutting down kqueue. */ TASK_INIT(&softc->shutdown_kqueue_task, /*priority*/ 0, pass_shutdown_kqueue, periph); /* * Acquire a reference to the periph that we can release once we've * cleaned up the kqueue. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* Register the device */ make_dev_args_init(&args); args.mda_devsw = &pass_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); cam_periph_release_locked(periph); return (CAM_REQ_CMP_ERR); } /* * Hold a reference to the periph before we create the physical * path alias so it can't go away. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } cam_periph_lock(periph); TASK_INIT(&softc->add_physpath_task, /*priority*/0, pass_add_physpath, periph); /* * See if physical path information is already available. */ taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task); /* * Add an async callback so that we get notified if * this device goes away or its physical path * (stored in the advanced info data of the EDT) has * changed. */ xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED, passasync, periph, periph->path); if (bootverbose) xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); } static int passopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); cam_periph_lock(periph); softc = (struct pass_softc *)periph->softc; if (softc->flags & PASS_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } /* * Don't allow access when we're running at a high securelevel. */ error = securelevel_gt(td->td_ucred, 1); if (error) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(error); } /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(EPERM); } /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { xpt_print(periph->path, "can't do nonblocking access\n"); cam_periph_release_locked(periph); cam_periph_unlock(periph); return(EINVAL); } softc->open_count++; cam_periph_unlock(periph); return (error); } static int passclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = periph->softc; softc->open_count--; if (softc->open_count == 0) { struct pass_io_req *io_req, *io_req2; TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) { TAILQ_REMOVE(&softc->done_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) { TAILQ_REMOVE(&softc->incoming_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * If there are any active I/Os, we need to forcibly acquire a * reference to the peripheral so that we don't go away * before they complete. We'll release the reference when * the abandoned queue is empty. */ io_req = TAILQ_FIRST(&softc->active_queue); if ((io_req != NULL) && (softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0) { cam_periph_doacquire(periph); softc->flags |= PASS_FLAG_ABANDONED_REF_SET; } /* * Since the I/O in the active queue is not under our * control, just set a flag so that we can clean it up when * it completes and put it on the abandoned queue. This * will prevent our sending spurious completions in the * event that the device is opened again before these I/Os * complete. */ TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) { TAILQ_REMOVE(&softc->active_queue, io_req, links); io_req->flags |= PASS_IO_ABANDONED; TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links); } } cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } static void passstart(struct cam_periph *periph, union ccb *start_ccb) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; switch (softc->state) { case PASS_STATE_NORMAL: { struct pass_io_req *io_req; /* * Check for any queued I/O requests that require an * allocated slot. */ io_req = TAILQ_FIRST(&softc->incoming_queue); if (io_req == NULL) { xpt_release_ccb(start_ccb); break; } TAILQ_REMOVE(&softc->incoming_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links); /* * Merge the user's CCB into the allocated CCB. */ xpt_merge_ccb(start_ccb, &io_req->ccb); start_ccb->ccb_h.ccb_type = PASS_CCB_QUEUED_IO; start_ccb->ccb_h.ccb_ioreq = io_req; start_ccb->ccb_h.cbfcnp = passdone; io_req->alloced_ccb = start_ccb; binuptime(&io_req->start_time); devstat_start_transaction(softc->device_stats, &io_req->start_time); xpt_action(start_ccb); /* * If we have any more I/O waiting, schedule ourselves again. */ if (!TAILQ_EMPTY(&softc->incoming_queue)) xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } default: break; } } static void passdone(struct cam_periph *periph, union ccb *done_ccb) { struct pass_softc *softc; struct ccb_scsiio *csio; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); csio = &done_ccb->csio; switch (csio->ccb_h.ccb_type) { case PASS_CCB_QUEUED_IO: { struct pass_io_req *io_req; io_req = done_ccb->ccb_h.ccb_ioreq; #if 0 xpt_print(periph->path, "%s: called for user CCB %p\n", __func__, io_req->user_ccb_ptr); #endif if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) && (done_ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) && ((io_req->flags & PASS_IO_ABANDONED) == 0)) { int error; error = passerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ return; } } /* * Copy the allocated CCB contents back to the malloced CCB * so we can give status back to the user when he requests it. */ bcopy(done_ccb, &io_req->ccb, sizeof(*done_ccb)); /* * Log data/transaction completion with devstat(9). */ switch (done_ccb->ccb_h.func_code) { case XPT_SCSI_IO: devstat_end_transaction(softc->device_stats, done_ccb->csio.dxfer_len - done_ccb->csio.resid, done_ccb->csio.tag_action & 0x3, ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (done_ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, &io_req->start_time); break; case XPT_ATA_IO: devstat_end_transaction(softc->device_stats, done_ccb->ataio.dxfer_len - done_ccb->ataio.resid, 0, /* Not used in ATA */ ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (done_ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, &io_req->start_time); break; case XPT_SMP_IO: /* * XXX KDM this isn't quite right, but there isn't * currently an easy way to represent a bidirectional * transfer in devstat. The only way to do it * and have the byte counts come out right would * mean that we would have to record two * transactions, one for the request and one for the * response. For now, so that we report something, * just treat the entire thing as a read. */ devstat_end_transaction(softc->device_stats, done_ccb->smpio.smp_request_len + done_ccb->smpio.smp_response_len, DEVSTAT_TAG_SIMPLE, DEVSTAT_READ, NULL, &io_req->start_time); break; default: devstat_end_transaction(softc->device_stats, 0, DEVSTAT_TAG_NONE, DEVSTAT_NO_DATA, NULL, &io_req->start_time); break; } /* * In the normal case, take the completed I/O off of the * active queue and put it on the done queue. Notitfy the * user that we have a completed I/O. */ if ((io_req->flags & PASS_IO_ABANDONED) == 0) { TAILQ_REMOVE(&softc->active_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links); selwakeuppri(&softc->read_select, PRIBIO); KNOTE_LOCKED(&softc->read_select.si_note, 0); } else { /* * In the case of an abandoned I/O (final close * without fetching the I/O), take it off of the * abandoned queue and free it. */ TAILQ_REMOVE(&softc->abandoned_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); /* * Release the done_ccb here, since we may wind up * freeing the peripheral when we decrement the * reference count below. */ xpt_release_ccb(done_ccb); /* * If the abandoned queue is empty, we can release * our reference to the periph since we won't have * any more completions coming. */ if ((TAILQ_EMPTY(&softc->abandoned_queue)) && (softc->flags & PASS_FLAG_ABANDONED_REF_SET)) { softc->flags &= ~PASS_FLAG_ABANDONED_REF_SET; cam_periph_release_locked(periph); } /* * We have already released the CCB, so we can * return. */ return; } break; } } xpt_release_ccb(done_ccb); } static int passcreatezone(struct cam_periph *periph) { struct pass_softc *softc; int error; error = 0; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); KASSERT(((softc->flags & PASS_FLAG_ZONE_VALID) == 0), ("%s called when the pass(4) zone is valid!\n", __func__)); KASSERT((softc->pass_zone == NULL), ("%s called when the pass(4) zone is allocated!\n", __func__)); if ((softc->flags & PASS_FLAG_ZONE_INPROG) == 0) { /* * We're the first context through, so we need to create * the pass(4) UMA zone for I/O requests. */ softc->flags |= PASS_FLAG_ZONE_INPROG; /* * uma_zcreate() does a blocking (M_WAITOK) allocation, * so we cannot hold a mutex while we call it. */ cam_periph_unlock(periph); softc->pass_zone = uma_zcreate(softc->zone_name, sizeof(struct pass_io_req), NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/ 0); softc->pass_io_zone = uma_zcreate(softc->io_zone_name, softc->io_zone_size, NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/ 0); cam_periph_lock(periph); if ((softc->pass_zone == NULL) || (softc->pass_io_zone == NULL)) { if (softc->pass_zone == NULL) xpt_print(periph->path, "unable to allocate " "IO Req UMA zone\n"); else xpt_print(periph->path, "unable to allocate " "IO UMA zone\n"); softc->flags &= ~PASS_FLAG_ZONE_INPROG; goto bailout; } /* * Set the flags appropriately and notify any other waiters. */ softc->flags &= PASS_FLAG_ZONE_INPROG; softc->flags |= PASS_FLAG_ZONE_VALID; wakeup(&softc->pass_zone); } else { /* * In this case, the UMA zone has not yet been created, but * another context is in the process of creating it. We * need to sleep until the creation is either done or has * failed. */ while ((softc->flags & PASS_FLAG_ZONE_INPROG) && ((softc->flags & PASS_FLAG_ZONE_VALID) == 0)) { error = msleep(&softc->pass_zone, cam_periph_mtx(periph), PRIBIO, "paszon", 0); if (error != 0) goto bailout; } /* * If the zone creation failed, no luck for the user. */ if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0){ error = ENOMEM; goto bailout; } } bailout: return (error); } static void passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req) { union ccb *ccb; u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; int i, numbufs; ccb = &io_req->ccb; switch (ccb->ccb_h.func_code) { case XPT_DEV_MATCH: numbufs = min(io_req->num_bufs, 2); if (numbufs == 1) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; } break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: data_ptrs[0] = &ccb->csio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; case XPT_ATA_IO: data_ptrs[0] = &ccb->ataio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; case XPT_SMP_IO: numbufs = min(io_req->num_bufs, 2); data_ptrs[0] = &ccb->smpio.smp_request; data_ptrs[1] = &ccb->smpio.smp_response; break; case XPT_DEV_ADVINFO: numbufs = min(io_req->num_bufs, 1); data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; break; case XPT_NVME_IO: case XPT_NVME_ADMIN: data_ptrs[0] = &ccb->nvmeio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; default: /* allow ourselves to be swapped once again */ return; break; /* NOTREACHED */ } if (io_req->flags & PASS_IO_USER_SEG_MALLOC) { free(io_req->user_segptr, M_SCSIPASS); io_req->user_segptr = NULL; } /* * We only want to free memory we malloced. */ if (io_req->data_flags == CAM_DATA_VADDR) { for (i = 0; i < io_req->num_bufs; i++) { if (io_req->kern_bufs[i] == NULL) continue; free(io_req->kern_bufs[i], M_SCSIPASS); io_req->kern_bufs[i] = NULL; } } else if (io_req->data_flags == CAM_DATA_SG) { for (i = 0; i < io_req->num_kern_segs; i++) { if ((uint8_t *)(uintptr_t) io_req->kern_segptr[i].ds_addr == NULL) continue; uma_zfree(softc->pass_io_zone, (uint8_t *)(uintptr_t) io_req->kern_segptr[i].ds_addr); io_req->kern_segptr[i].ds_addr = 0; } } if (io_req->flags & PASS_IO_KERN_SEG_MALLOC) { free(io_req->kern_segptr, M_SCSIPASS); io_req->kern_segptr = NULL; } if (io_req->data_flags != CAM_DATA_PADDR) { for (i = 0; i < numbufs; i++) { /* * Restore the user's buffer pointers to their * previous values. */ if (io_req->user_bufs[i] != NULL) *data_ptrs[i] = io_req->user_bufs[i]; } } } static int passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req, ccb_flags direction) { bus_size_t kern_watermark, user_watermark, len_copied, len_to_copy; bus_dma_segment_t *user_sglist, *kern_sglist; int i, j, error; error = 0; kern_watermark = 0; user_watermark = 0; len_to_copy = 0; len_copied = 0; user_sglist = io_req->user_segptr; kern_sglist = io_req->kern_segptr; for (i = 0, j = 0; i < io_req->num_user_segs && j < io_req->num_kern_segs;) { uint8_t *user_ptr, *kern_ptr; len_to_copy = min(user_sglist[i].ds_len -user_watermark, kern_sglist[j].ds_len - kern_watermark); user_ptr = (uint8_t *)(uintptr_t)user_sglist[i].ds_addr; user_ptr = user_ptr + user_watermark; kern_ptr = (uint8_t *)(uintptr_t)kern_sglist[j].ds_addr; kern_ptr = kern_ptr + kern_watermark; user_watermark += len_to_copy; kern_watermark += len_to_copy; if (!useracc(user_ptr, len_to_copy, (direction == CAM_DIR_IN) ? VM_PROT_WRITE : VM_PROT_READ)) { xpt_print(periph->path, "%s: unable to access user " "S/G list element %p len %zu\n", __func__, user_ptr, len_to_copy); error = EFAULT; goto bailout; } if (direction == CAM_DIR_IN) { error = copyout(kern_ptr, user_ptr, len_to_copy); if (error != 0) { xpt_print(periph->path, "%s: copyout of %u " "bytes from %p to %p failed with " "error %d\n", __func__, len_to_copy, kern_ptr, user_ptr, error); goto bailout; } } else { error = copyin(user_ptr, kern_ptr, len_to_copy); if (error != 0) { xpt_print(periph->path, "%s: copyin of %u " "bytes from %p to %p failed with " "error %d\n", __func__, len_to_copy, user_ptr, kern_ptr, error); goto bailout; } } len_copied += len_to_copy; if (user_sglist[i].ds_len == user_watermark) { i++; user_watermark = 0; } if (kern_sglist[j].ds_len == kern_watermark) { j++; kern_watermark = 0; } } bailout: return (error); } static int passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req) { union ccb *ccb; struct pass_softc *softc; int numbufs, i; uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; uint32_t lengths[CAM_PERIPH_MAXMAPS]; uint32_t dirs[CAM_PERIPH_MAXMAPS]; uint32_t num_segs; uint16_t *seg_cnt_ptr; size_t maxmap; int error; cam_periph_assert(periph, MA_NOTOWNED); softc = periph->softc; error = 0; ccb = &io_req->ccb; maxmap = 0; num_segs = 0; seg_cnt_ptr = NULL; switch(ccb->ccb_h.func_code) { case XPT_DEV_MATCH: if (ccb->cdm.match_buf_len == 0) { printf("%s: invalid match buffer length 0\n", __func__); return(EINVAL); } if (ccb->cdm.pattern_buf_len > 0) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; lengths[0] = ccb->cdm.pattern_buf_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; lengths[1] = ccb->cdm.match_buf_len; dirs[1] = CAM_DIR_IN; numbufs = 2; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; lengths[0] = ccb->cdm.match_buf_len; dirs[0] = CAM_DIR_IN; numbufs = 1; } io_req->data_flags = CAM_DATA_VADDR; break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); /* * The user shouldn't be able to supply a bio. */ if ((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) return (EINVAL); io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK; data_ptrs[0] = &ccb->csio.data_ptr; lengths[0] = ccb->csio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; num_segs = ccb->csio.sglist_cnt; seg_cnt_ptr = &ccb->csio.sglist_cnt; numbufs = 1; maxmap = softc->maxio; break; case XPT_ATA_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); /* * We only support a single virtual address for ATA I/O. */ if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) return (EINVAL); io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = &ccb->ataio.data_ptr; lengths[0] = ccb->ataio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 1; maxmap = softc->maxio; break; case XPT_SMP_IO: io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = &ccb->smpio.smp_request; lengths[0] = ccb->smpio.smp_request_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = &ccb->smpio.smp_response; lengths[1] = ccb->smpio.smp_response_len; dirs[1] = CAM_DIR_IN; numbufs = 2; maxmap = softc->maxio; break; case XPT_DEV_ADVINFO: if (ccb->cdai.bufsiz == 0) return (0); io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; lengths[0] = ccb->cdai.bufsiz; dirs[0] = CAM_DIR_IN; numbufs = 1; break; case XPT_NVME_ADMIN: case XPT_NVME_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return (0); io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK; data_ptrs[0] = &ccb->nvmeio.data_ptr; lengths[0] = ccb->nvmeio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; num_segs = ccb->nvmeio.sglist_cnt; seg_cnt_ptr = &ccb->nvmeio.sglist_cnt; numbufs = 1; maxmap = softc->maxio; break; default: return(EINVAL); break; /* NOTREACHED */ } io_req->num_bufs = numbufs; /* * If there is a maximum, check to make sure that the user's * request fits within the limit. In general, we should only have * a maximum length for requests that go to hardware. Otherwise it * is whatever we're able to malloc. */ for (i = 0; i < numbufs; i++) { io_req->user_bufs[i] = *data_ptrs[i]; io_req->dirs[i] = dirs[i]; io_req->lengths[i] = lengths[i]; if (maxmap == 0) continue; if (lengths[i] <= maxmap) continue; xpt_print(periph->path, "%s: data length %u > max allowed %u " "bytes\n", __func__, lengths[i], maxmap); error = EINVAL; goto bailout; } switch (io_req->data_flags) { case CAM_DATA_VADDR: /* Map or copy the buffer into kernel address space */ for (i = 0; i < numbufs; i++) { uint8_t *tmp_buf; /* * If for some reason no length is specified, we * don't need to allocate anything. */ if (io_req->lengths[i] == 0) continue; /* * Make sure that the user's buffer is accessible * to that process. */ if (!useracc(io_req->user_bufs[i], io_req->lengths[i], (io_req->dirs[i] == CAM_DIR_IN) ? VM_PROT_WRITE : VM_PROT_READ)) { xpt_print(periph->path, "%s: user address %p " "length %u is not accessible\n", __func__, io_req->user_bufs[i], io_req->lengths[i]); error = EFAULT; goto bailout; } tmp_buf = malloc(lengths[i], M_SCSIPASS, M_WAITOK | M_ZERO); io_req->kern_bufs[i] = tmp_buf; *data_ptrs[i] = tmp_buf; #if 0 xpt_print(periph->path, "%s: malloced %p len %u, user " "buffer %p, operation: %s\n", __func__, tmp_buf, lengths[i], io_req->user_bufs[i], (dirs[i] == CAM_DIR_IN) ? "read" : "write"); #endif /* * We only need to copy in if the user is writing. */ if (dirs[i] != CAM_DIR_OUT) continue; error = copyin(io_req->user_bufs[i], io_req->kern_bufs[i], lengths[i]); if (error != 0) { xpt_print(periph->path, "%s: copy of user " "buffer from %p to %p failed with " "error %d\n", __func__, io_req->user_bufs[i], io_req->kern_bufs[i], error); goto bailout; } } break; case CAM_DATA_PADDR: /* Pass down the pointer as-is */ break; case CAM_DATA_SG: { size_t sg_length, size_to_go, alloc_size; uint32_t num_segs_needed; /* * Copy the user S/G list in, and then copy in the * individual segments. */ /* * We shouldn't see this, but check just in case. */ if (numbufs != 1) { xpt_print(periph->path, "%s: cannot currently handle " "more than one S/G list per CCB\n", __func__); error = EINVAL; goto bailout; } /* * We have to have at least one segment. */ if (num_segs == 0) { xpt_print(periph->path, "%s: CAM_DATA_SG flag set, " "but sglist_cnt=0!\n", __func__); error = EINVAL; goto bailout; } /* * Make sure the user specified the total length and didn't * just leave it to us to decode the S/G list. */ if (lengths[0] == 0) { xpt_print(periph->path, "%s: no dxfer_len specified, " "but CAM_DATA_SG flag is set!\n", __func__); error = EINVAL; goto bailout; } /* * We allocate buffers in io_zone_size increments for an * S/G list. This will generally be MAXPHYS. */ if (lengths[0] <= softc->io_zone_size) num_segs_needed = 1; else { num_segs_needed = lengths[0] / softc->io_zone_size; if ((lengths[0] % softc->io_zone_size) != 0) num_segs_needed++; } /* Figure out the size of the S/G list */ sg_length = num_segs * sizeof(bus_dma_segment_t); io_req->num_user_segs = num_segs; io_req->num_kern_segs = num_segs_needed; /* Save the user's S/G list pointer for later restoration */ io_req->user_bufs[0] = *data_ptrs[0]; /* * If we have enough segments allocated by default to handle * the length of the user's S/G list, */ if (num_segs > PASS_MAX_SEGS) { io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_USER_SEG_MALLOC; } else io_req->user_segptr = io_req->user_segs; if (!useracc(*data_ptrs[0], sg_length, VM_PROT_READ)) { xpt_print(periph->path, "%s: unable to access user " "S/G list at %p\n", __func__, *data_ptrs[0]); error = EFAULT; goto bailout; } error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length); if (error != 0) { xpt_print(periph->path, "%s: copy of user S/G list " "from %p to %p failed with error %d\n", __func__, *data_ptrs[0], io_req->user_segptr, error); goto bailout; } if (num_segs_needed > PASS_MAX_SEGS) { io_req->kern_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs_needed, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_KERN_SEG_MALLOC; } else { io_req->kern_segptr = io_req->kern_segs; } /* * Allocate the kernel S/G list. */ for (size_to_go = lengths[0], i = 0; size_to_go > 0 && i < num_segs_needed; i++, size_to_go -= alloc_size) { uint8_t *kern_ptr; alloc_size = min(size_to_go, softc->io_zone_size); kern_ptr = uma_zalloc(softc->pass_io_zone, M_WAITOK); io_req->kern_segptr[i].ds_addr = (bus_addr_t)(uintptr_t)kern_ptr; io_req->kern_segptr[i].ds_len = alloc_size; } if (size_to_go > 0) { printf("%s: size_to_go = %zu, software error!\n", __func__, size_to_go); error = EINVAL; goto bailout; } *data_ptrs[0] = (uint8_t *)io_req->kern_segptr; *seg_cnt_ptr = io_req->num_kern_segs; /* * We only need to copy data here if the user is writing. */ if (dirs[0] == CAM_DIR_OUT) error = passcopysglist(periph, io_req, dirs[0]); break; } case CAM_DATA_SG_PADDR: { size_t sg_length; /* * We shouldn't see this, but check just in case. */ if (numbufs != 1) { printf("%s: cannot currently handle more than one " "S/G list per CCB\n", __func__); error = EINVAL; goto bailout; } /* * We have to have at least one segment. */ if (num_segs == 0) { xpt_print(periph->path, "%s: CAM_DATA_SG_PADDR flag " "set, but sglist_cnt=0!\n", __func__); error = EINVAL; goto bailout; } /* * Make sure the user specified the total length and didn't * just leave it to us to decode the S/G list. */ if (lengths[0] == 0) { xpt_print(periph->path, "%s: no dxfer_len specified, " "but CAM_DATA_SG flag is set!\n", __func__); error = EINVAL; goto bailout; } /* Figure out the size of the S/G list */ sg_length = num_segs * sizeof(bus_dma_segment_t); io_req->num_user_segs = num_segs; io_req->num_kern_segs = io_req->num_user_segs; /* Save the user's S/G list pointer for later restoration */ io_req->user_bufs[0] = *data_ptrs[0]; if (num_segs > PASS_MAX_SEGS) { io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_USER_SEG_MALLOC; } else io_req->user_segptr = io_req->user_segs; io_req->kern_segptr = io_req->user_segptr; error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length); if (error != 0) { xpt_print(periph->path, "%s: copy of user S/G list " "from %p to %p failed with error %d\n", __func__, *data_ptrs[0], io_req->user_segptr, error); goto bailout; } break; } default: case CAM_DATA_BIO: /* * A user shouldn't be attaching a bio to the CCB. It * isn't a user-accessible structure. */ error = EINVAL; break; } bailout: if (error != 0) passiocleanup(softc, io_req); return (error); } static int passmemdone(struct cam_periph *periph, struct pass_io_req *io_req) { struct pass_softc *softc; union ccb *ccb; int error; int i; error = 0; softc = (struct pass_softc *)periph->softc; ccb = &io_req->ccb; switch (io_req->data_flags) { case CAM_DATA_VADDR: /* * Copy back to the user buffer if this was a read. */ for (i = 0; i < io_req->num_bufs; i++) { if (io_req->dirs[i] != CAM_DIR_IN) continue; error = copyout(io_req->kern_bufs[i], io_req->user_bufs[i], io_req->lengths[i]); if (error != 0) { xpt_print(periph->path, "Unable to copy %u " "bytes from %p to user address %p\n", io_req->lengths[i], io_req->kern_bufs[i], io_req->user_bufs[i]); goto bailout; } } break; case CAM_DATA_PADDR: /* Do nothing. The pointer is a physical address already */ break; case CAM_DATA_SG: /* * Copy back to the user buffer if this was a read. * Restore the user's S/G list buffer pointer. */ if (io_req->dirs[0] == CAM_DIR_IN) error = passcopysglist(periph, io_req, io_req->dirs[0]); break; case CAM_DATA_SG_PADDR: /* * Restore the user's S/G list buffer pointer. No need to * copy. */ break; default: case CAM_DATA_BIO: error = EINVAL; break; } bailout: /* * Reset the user's pointers to their original values and free * allocated memory. */ passiocleanup(softc, io_req); return (error); } static int passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; if ((error = passdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { error = cam_compat_ioctl(dev, cmd, addr, flag, td, passdoioctl); } return (error); } static int passdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; uint32_t priority; periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); softc = (struct pass_softc *)periph->softc; error = 0; switch (cmd) { case CAMIOCOMMAND: { union ccb *inccb; union ccb *ccb; int ccb_malloced; inccb = (union ccb *)addr; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (inccb->ccb_h.func_code == XPT_SCSI_IO) inccb->csio.bio = NULL; #endif if (inccb->ccb_h.flags & CAM_UNLOCKED) { error = EINVAL; break; } /* * Some CCB types, like scan bus and scan lun can only go * through the transport layer device. */ if (inccb->ccb_h.func_code & XPT_FC_XPT_ONLY) { xpt_print(periph->path, "CCB function code %#x is " "restricted to the XPT device\n", inccb->ccb_h.func_code); error = ENODEV; break; } /* Compatibility for RL/priority-unaware code. */ priority = inccb->ccb_h.pinfo.priority; if (priority <= CAM_PRIORITY_OOB) priority += CAM_PRIORITY_OOB + 1; /* * Non-immediate CCBs need a CCB from the per-device pool * of CCBs, which is scheduled by the transport layer. * Immediate CCBs and user-supplied CCBs should just be * malloced. */ if ((inccb->ccb_h.func_code & XPT_FC_QUEUED) && ((inccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0)) { ccb = cam_periph_getccb(periph, priority); ccb_malloced = 0; } else { ccb = xpt_alloc_ccb_nowait(); if (ccb != NULL) xpt_setup_ccb(&ccb->ccb_h, periph->path, priority); ccb_malloced = 1; } if (ccb == NULL) { xpt_print(periph->path, "unable to allocate CCB\n"); error = ENOMEM; break; } error = passsendccb(periph, ccb, inccb); if (ccb_malloced) xpt_free_ccb(ccb); else xpt_release_ccb(ccb); break; } case CAMIOQUEUE: { struct pass_io_req *io_req; union ccb **user_ccb, *ccb; xpt_opcode fc; if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0) { error = passcreatezone(periph); if (error != 0) goto bailout; } /* * We're going to do a blocking allocation for this I/O * request, so we have to drop the lock. */ cam_periph_unlock(periph); io_req = uma_zalloc(softc->pass_zone, M_WAITOK | M_ZERO); ccb = &io_req->ccb; user_ccb = (union ccb **)addr; /* * Unlike the CAMIOCOMMAND ioctl above, we only have a * pointer to the user's CCB, so we have to copy the whole * thing in to a buffer we have allocated (above) instead * of allowing the ioctl code to malloc a buffer and copy * it in. * * This is an advantage for this asynchronous interface, * since we don't want the memory to get freed while the * CCB is outstanding. */ #if 0 xpt_print(periph->path, "Copying user CCB %p to " "kernel address %p\n", *user_ccb, ccb); #endif error = copyin(*user_ccb, ccb, sizeof(*ccb)); if (error != 0) { xpt_print(periph->path, "Copy of user CCB %p to " "kernel address %p failed with error %d\n", *user_ccb, ccb, error); goto camioqueue_error; } #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.bio = NULL; #endif if (ccb->ccb_h.flags & CAM_UNLOCKED) { error = EINVAL; goto camioqueue_error; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) { if (ccb->csio.cdb_len > IOCDBLEN) { error = EINVAL; goto camioqueue_error; } error = copyin(ccb->csio.cdb_io.cdb_ptr, ccb->csio.cdb_io.cdb_bytes, ccb->csio.cdb_len); if (error != 0) goto camioqueue_error; ccb->ccb_h.flags &= ~CAM_CDB_POINTER; } /* * Some CCB types, like scan bus and scan lun can only go * through the transport layer device. */ if (ccb->ccb_h.func_code & XPT_FC_XPT_ONLY) { xpt_print(periph->path, "CCB function code %#x is " "restricted to the XPT device\n", ccb->ccb_h.func_code); error = ENODEV; goto camioqueue_error; } /* * Save the user's CCB pointer as well as his linked list * pointers and peripheral private area so that we can * restore these later. */ io_req->user_ccb_ptr = *user_ccb; io_req->user_periph_links = ccb->ccb_h.periph_links; io_req->user_periph_priv = ccb->ccb_h.periph_priv; /* * Now that we've saved the user's values, we can set our * own peripheral private entry. */ ccb->ccb_h.ccb_ioreq = io_req; /* Compatibility for RL/priority-unaware code. */ priority = ccb->ccb_h.pinfo.priority; if (priority <= CAM_PRIORITY_OOB) priority += CAM_PRIORITY_OOB + 1; /* * Setup fields in the CCB like the path and the priority. * The path in particular cannot be done in userland, since * it is a pointer to a kernel data structure. */ xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, priority, ccb->ccb_h.flags); /* * Setup our done routine. There is no way for the user to * have a valid pointer here. */ ccb->ccb_h.cbfcnp = passdone; fc = ccb->ccb_h.func_code; /* * If this function code has memory that can be mapped in * or out, we need to call passmemsetup(). */ if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO) || (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) { error = passmemsetup(periph, io_req); if (error != 0) goto camioqueue_error; } else io_req->mapinfo.num_bufs_used = 0; cam_periph_lock(periph); /* * Everything goes on the incoming queue initially. */ TAILQ_INSERT_TAIL(&softc->incoming_queue, io_req, links); /* * If the CCB is queued, and is not a user CCB, then * we need to allocate a slot for it. Call xpt_schedule() * so that our start routine will get called when a CCB is * available. */ if ((fc & XPT_FC_QUEUED) && ((fc & XPT_FC_USER_CCB) == 0)) { xpt_schedule(periph, priority); break; } /* * At this point, the CCB in question is either an * immediate CCB (like XPT_DEV_ADVINFO) or it is a user CCB * and therefore should be malloced, not allocated via a slot. * Remove the CCB from the incoming queue and add it to the * active queue. */ TAILQ_REMOVE(&softc->incoming_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links); xpt_action(ccb); /* * If this is not a queued CCB (i.e. it is an immediate CCB), * then it is already done. We need to put it on the done * queue for the user to fetch. */ if ((fc & XPT_FC_QUEUED) == 0) { TAILQ_REMOVE(&softc->active_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links); } break; camioqueue_error: uma_zfree(softc->pass_zone, io_req); cam_periph_lock(periph); break; } case CAMIOGET: { union ccb **user_ccb; struct pass_io_req *io_req; int old_error; user_ccb = (union ccb **)addr; old_error = 0; io_req = TAILQ_FIRST(&softc->done_queue); if (io_req == NULL) { error = ENOENT; break; } /* * Remove the I/O from the done queue. */ TAILQ_REMOVE(&softc->done_queue, io_req, links); /* * We have to drop the lock during the copyout because the * copyout can result in VM faults that require sleeping. */ cam_periph_unlock(periph); /* * Do any needed copies (e.g. for reads) and revert the * pointers in the CCB back to the user's pointers. */ error = passmemdone(periph, io_req); old_error = error; io_req->ccb.ccb_h.periph_links = io_req->user_periph_links; io_req->ccb.ccb_h.periph_priv = io_req->user_periph_priv; #if 0 xpt_print(periph->path, "Copying to user CCB %p from " "kernel address %p\n", *user_ccb, &io_req->ccb); #endif error = copyout(&io_req->ccb, *user_ccb, sizeof(union ccb)); if (error != 0) { xpt_print(periph->path, "Copy to user CCB %p from " "kernel address %p failed with error %d\n", *user_ccb, &io_req->ccb, error); } /* * Prefer the first error we got back, and make sure we * don't overwrite bad status with good. */ if (old_error != 0) error = old_error; cam_periph_lock(periph); /* * At this point, if there was an error, we could potentially * re-queue the I/O and try again. But why? The error * would almost certainly happen again. We might as well * not leak memory. */ uma_zfree(softc->pass_zone, io_req); break; } default: error = cam_periph_ioctl(periph, cmd, addr, passerror); break; } bailout: cam_periph_unlock(periph); return(error); } static int passpoll(struct cdev *dev, int poll_events, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int revents; periph = (struct cam_periph *)dev->si_drv1; softc = (struct pass_softc *)periph->softc; revents = poll_events & (POLLOUT | POLLWRNORM); if ((poll_events & (POLLIN | POLLRDNORM)) != 0) { cam_periph_lock(periph); if (!TAILQ_EMPTY(&softc->done_queue)) { revents |= poll_events & (POLLIN | POLLRDNORM); } cam_periph_unlock(periph); if (revents == 0) selrecord(td, &softc->read_select); } return (revents); } static int passkqfilter(struct cdev *dev, struct knote *kn) { struct cam_periph *periph; struct pass_softc *softc; periph = (struct cam_periph *)dev->si_drv1; softc = (struct pass_softc *)periph->softc; kn->kn_hook = (caddr_t)periph; kn->kn_fop = &passread_filtops; knlist_add(&softc->read_select.si_note, kn, 0); return (0); } static void passreadfiltdetach(struct knote *kn) { struct cam_periph *periph; struct pass_softc *softc; periph = (struct cam_periph *)kn->kn_hook; softc = (struct pass_softc *)periph->softc; knlist_remove(&softc->read_select.si_note, kn, 0); } static int passreadfilt(struct knote *kn, long hint) { struct cam_periph *periph; struct pass_softc *softc; int retval; periph = (struct cam_periph *)kn->kn_hook; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); if (TAILQ_EMPTY(&softc->done_queue)) retval = 0; else retval = 1; return (retval); } /* * Generally, "ccb" should be the CCB supplied by the kernel. "inccb" * should be the CCB that is copied in from the user. */ static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb) { struct pass_softc *softc; struct cam_periph_map_info mapinfo; uint8_t *cmd; xpt_opcode fc; int error; softc = (struct pass_softc *)periph->softc; /* * There are some fields in the CCB header that need to be * preserved, the rest we get from the user. */ xpt_merge_ccb(ccb, inccb); if (ccb->ccb_h.flags & CAM_CDB_POINTER) { cmd = __builtin_alloca(ccb->csio.cdb_len); error = copyin(ccb->csio.cdb_io.cdb_ptr, cmd, ccb->csio.cdb_len); if (error) return (error); ccb->csio.cdb_io.cdb_ptr = cmd; } /* */ ccb->ccb_h.cbfcnp = passdone; /* * Let cam_periph_mapmem do a sanity check on the data pointer format. * Even if no data transfer is needed, it's a cheap check and it * simplifies the code. */ fc = ccb->ccb_h.func_code; if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO) || (fc == XPT_MMC_IO) || (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) { bzero(&mapinfo, sizeof(mapinfo)); /* * cam_periph_mapmem calls into proc and vm functions that can * sleep as well as trigger I/O, so we can't hold the lock. * Dropping it here is reasonably safe. */ cam_periph_unlock(periph); error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio); cam_periph_lock(periph); /* * cam_periph_mapmem returned an error, we can't continue. * Return the error to the user. */ if (error) return(error); } else /* Ensure that the unmap call later on is a no-op. */ mapinfo.num_bufs_used = 0; /* * If the user wants us to perform any error recovery, then honor * that request. Otherwise, it's up to the user to perform any * error recovery. */ cam_periph_runccb(ccb, (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ? passerror : NULL, /* cam_flags */ CAM_RETRY_SELTO, /* sense_flags */ SF_RETRY_UA | SF_NO_PRINT, softc->device_stats); cam_periph_unmapmem(ccb, &mapinfo); ccb->ccb_h.cbfcnp = NULL; ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv; bcopy(ccb, inccb, sizeof(union ccb)); return(0); } static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct cam_periph *periph; struct pass_softc *softc; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct pass_softc *)periph->softc; return(cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } Index: head/sys/cam/scsi/scsi_pass.h =================================================================== --- head/sys/cam/scsi/scsi_pass.h (revision 326264) +++ head/sys/cam/scsi/scsi_pass.h (revision 326265) @@ -1,50 +1,52 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SCSI_PASS_H #define _SCSI_PASS_H 1 #include #include /* * Convert to using a pointer to a ccb in the next major version. * This should allow us to avoid an extra copy of the CCB data. */ #define CAMIOCOMMAND _IOWR(CAM_VERSION, 2, union ccb) #define CAMGETPASSTHRU _IOWR(CAM_VERSION, 3, union ccb) /* * These two ioctls take a union ccb *, but that is not explicitly declared * to avoid having the ioctl handling code malloc and free their own copy * of the CCB or the CCB pointer. */ #define CAMIOQUEUE _IO(CAM_VERSION, 4) #define CAMIOGET _IO(CAM_VERSION, 5) #endif Index: head/sys/cam/scsi/scsi_pt.c =================================================================== --- head/sys/cam/scsi/scsi_pt.c (revision 326264) +++ head/sys/cam/scsi/scsi_pt.c (revision 326265) @@ -1,639 +1,641 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Implementation of SCSI Processor Target Peripheral driver for CAM. * * Copyright (c) 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_pt.h" typedef enum { PT_STATE_PROBE, PT_STATE_NORMAL } pt_state; typedef enum { PT_FLAG_NONE = 0x00, PT_FLAG_OPEN = 0x01, PT_FLAG_DEVICE_INVALID = 0x02, PT_FLAG_RETRY_UA = 0x04 } pt_flags; typedef enum { PT_CCB_BUFFER_IO = 0x01, PT_CCB_RETRY_UA = 0x04, PT_CCB_BUFFER_IO_UA = PT_CCB_BUFFER_IO|PT_CCB_RETRY_UA } pt_ccb_state; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct pt_softc { struct bio_queue_head bio_queue; struct devstat *device_stats; LIST_HEAD(, ccb_hdr) pending_ccbs; pt_state state; pt_flags flags; union ccb saved_ccb; int io_timeout; struct cdev *dev; }; static d_open_t ptopen; static d_close_t ptclose; static d_strategy_t ptstrategy; static periph_init_t ptinit; static void ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static periph_ctor_t ptctor; static periph_oninv_t ptoninvalidate; static periph_dtor_t ptdtor; static periph_start_t ptstart; static void ptdone(struct cam_periph *periph, union ccb *done_ccb); static d_ioctl_t ptioctl; static int pterror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); void scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int tag_action, int readop, u_int byte2, u_int32_t xfer_len, u_int8_t *data_ptr, u_int8_t sense_len, u_int32_t timeout); static struct periph_driver ptdriver = { ptinit, "pt", TAILQ_HEAD_INITIALIZER(ptdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(pt, ptdriver); static struct cdevsw pt_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ptopen, .d_close = ptclose, .d_read = physread, .d_write = physwrite, .d_ioctl = ptioctl, .d_strategy = ptstrategy, .d_name = "pt", }; #ifndef SCSI_PT_DEFAULT_TIMEOUT #define SCSI_PT_DEFAULT_TIMEOUT 60 #endif static int ptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct pt_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); softc = (struct pt_softc *)periph->softc; cam_periph_lock(periph); if (softc->flags & PT_FLAG_DEVICE_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } if ((softc->flags & PT_FLAG_OPEN) == 0) softc->flags |= PT_FLAG_OPEN; else { error = EBUSY; cam_periph_release(periph); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ptopen: dev=%s\n", devtoname(dev))); cam_periph_unlock(periph); return (error); } static int ptclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct pt_softc *softc; periph = (struct cam_periph *)dev->si_drv1; softc = (struct pt_softc *)periph->softc; cam_periph_lock(periph); softc->flags &= ~PT_FLAG_OPEN; cam_periph_release_locked(periph); cam_periph_unlock(periph); return (0); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void ptstrategy(struct bio *bp) { struct cam_periph *periph; struct pt_softc *softc; periph = (struct cam_periph *)bp->bio_dev->si_drv1; bp->bio_resid = bp->bio_bcount; if (periph == NULL) { biofinish(bp, NULL, ENXIO); return; } cam_periph_lock(periph); softc = (struct pt_softc *)periph->softc; /* * If the device has been made invalid, error out */ if ((softc->flags & PT_FLAG_DEVICE_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ bioq_insert_tail(&softc->bio_queue, bp); /* * Schedule ourselves for performing the work. */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); return; } static void ptinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, ptasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("pt: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static cam_status ptctor(struct cam_periph *periph, void *arg) { struct pt_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; int error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("ptregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct pt_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); LIST_INIT(&softc->pending_ccbs); softc->state = PT_STATE_NORMAL; bioq_init(&softc->bio_queue); softc->io_timeout = SCSI_PT_DEFAULT_TIMEOUT * 1000; periph->softc = softc; bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); cam_periph_unlock(periph); make_dev_args_init(&args); args.mda_devsw = &pt_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } softc->device_stats = devstat_new_entry("pt", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_OTHER); cam_periph_lock(periph); /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE, ptasync, periph, periph->path); /* Tell the user we've attached to the device */ xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); } static void ptoninvalidate(struct cam_periph *periph) { struct pt_softc *softc; softc = (struct pt_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, ptasync, periph, periph->path); softc->flags |= PT_FLAG_DEVICE_INVALID; /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ bioq_flush(&softc->bio_queue, NULL, ENXIO); } static void ptdtor(struct cam_periph *periph) { struct pt_softc *softc; softc = (struct pt_softc *)periph->softc; devstat_remove_entry(softc->device_stats); cam_periph_unlock(periph); destroy_dev(softc->dev); cam_periph_lock(periph); free(softc, M_DEVBUF); } static void ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_PROCESSOR) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(ptctor, ptoninvalidate, ptdtor, ptstart, "pt", CAM_PERIPH_BIO, path, ptasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("ptasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_SENT_BDR: case AC_BUS_RESET: { struct pt_softc *softc; struct ccb_hdr *ccbh; softc = (struct pt_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= PT_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= PT_CCB_RETRY_UA; } /* FALLTHROUGH */ default: cam_periph_async(periph, code, path, arg); break; } } static void ptstart(struct cam_periph *periph, union ccb *start_ccb) { struct pt_softc *softc; struct bio *bp; softc = (struct pt_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ptstart\n")); /* * See if there is a buf with work for us to do.. */ bp = bioq_first(&softc->bio_queue); if (bp == NULL) { xpt_release_ccb(start_ccb); } else { bioq_remove(&softc->bio_queue, bp); devstat_start_transaction_bio(softc->device_stats, bp); scsi_send_receive(&start_ccb->csio, /*retries*/4, ptdone, MSG_SIMPLE_Q_TAG, bp->bio_cmd == BIO_READ, /*byte2*/0, bp->bio_bcount, bp->bio_data, /*sense_len*/SSD_FULL_SIZE, /*timeout*/softc->io_timeout); start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO_UA; /* * Block out any asynchronous callbacks * while we touch the pending ccb list. */ LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); xpt_action(start_ccb); if (bp != NULL) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } } static void ptdone(struct cam_periph *periph, union ccb *done_ccb) { struct pt_softc *softc; struct ccb_scsiio *csio; softc = (struct pt_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ptdone\n")); csio = &done_ccb->csio; switch (csio->ccb_h.ccb_state) { case PT_CCB_BUFFER_IO: case PT_CCB_BUFFER_IO_UA: { struct bio *bp; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; int sf; if ((csio->ccb_h.ccb_state & PT_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = pterror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } if (error != 0) { if (error == ENXIO) { /* * Catastrophic error. Mark our device * as invalid. */ xpt_print(periph->path, "Invalidating device\n"); softc->flags |= PT_FLAG_DEVICE_INVALID; } /* * return all queued I/O with EIO, so that * the client can retry these I/Os in the * proper order should it attempt to recover. */ bioq_flush(&softc->bio_queue, NULL, EIO); bp->bio_error = error; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) { /* Short transfer ??? */ bp->bio_flags |= BIO_ERROR; } } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else { bp->bio_resid = csio->resid; if (bp->bio_resid != 0) bp->bio_flags |= BIO_ERROR; } /* * Block out any asynchronous callbacks * while we touch the pending ccb list. */ LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); biofinish(bp, softc->device_stats, 0); break; } } xpt_release_ccb(done_ccb); } static int pterror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct pt_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct pt_softc *)periph->softc; return(cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static int ptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct pt_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; softc = (struct pt_softc *)periph->softc; cam_periph_lock(periph); switch(cmd) { case PTIOCGETTIMEOUT: if (softc->io_timeout >= 1000) *(int *)addr = softc->io_timeout / 1000; else *(int *)addr = 0; break; case PTIOCSETTIMEOUT: if (*(int *)addr < 1) { error = EINVAL; break; } softc->io_timeout = *(int *)addr * 1000; break; default: error = cam_periph_ioctl(periph, cmd, addr, pterror); break; } cam_periph_unlock(periph); return(error); } void scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int tag_action, int readop, u_int byte2, u_int32_t xfer_len, u_int8_t *data_ptr, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_receive *scsi_cmd; scsi_cmd = (struct scsi_send_receive *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = readop ? RECEIVE : SEND; scsi_cmd->byte2 = byte2; scsi_ulto3b(xfer_len, scsi_cmd->xfer_len); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT, tag_action, data_ptr, xfer_len, sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_pt.h =================================================================== --- head/sys/cam/scsi/scsi_pt.h (revision 326264) +++ head/sys/cam/scsi/scsi_pt.h (revision 326265) @@ -1,48 +1,50 @@ /*- * Structure and function declarations for Processor type devices. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1998 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SCSI_SCSI_PT_H #define _SCSI_SCSI_PT_H 1 struct scsi_send_receive { u_int8_t opcode; u_int8_t byte2; u_int8_t xfer_len[3]; u_int8_t control; }; /* * Opcodes */ #define RECEIVE 0x08 #define SEND 0x0A #endif /* _SCSI_SCSI_PT_H */ Index: head/sys/cam/scsi/scsi_sa.c =================================================================== --- head/sys/cam/scsi/scsi_sa.c (revision 326264) +++ head/sys/cam/scsi/scsi_sa.c (revision 326265) @@ -1,5919 +1,5921 @@ /*- * Implementation of SCSI Sequential Access Peripheral driver for CAM. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1999, 2000 Matthew Jacob * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #ifdef _KERNEL #include #include #endif #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #endif #include #include #ifndef _KERNEL #include #include #endif #include #include #include #include #include #include #include #include #ifdef _KERNEL #include "opt_sa.h" #ifndef SA_IO_TIMEOUT #define SA_IO_TIMEOUT 32 #endif #ifndef SA_SPACE_TIMEOUT #define SA_SPACE_TIMEOUT 1 * 60 #endif #ifndef SA_REWIND_TIMEOUT #define SA_REWIND_TIMEOUT 2 * 60 #endif #ifndef SA_ERASE_TIMEOUT #define SA_ERASE_TIMEOUT 4 * 60 #endif #ifndef SA_REP_DENSITY_TIMEOUT #define SA_REP_DENSITY_TIMEOUT 90 #endif #define SCSIOP_TIMEOUT (60 * 1000) /* not an option */ #define IO_TIMEOUT (SA_IO_TIMEOUT * 60 * 1000) #define REWIND_TIMEOUT (SA_REWIND_TIMEOUT * 60 * 1000) #define ERASE_TIMEOUT (SA_ERASE_TIMEOUT * 60 * 1000) #define SPACE_TIMEOUT (SA_SPACE_TIMEOUT * 60 * 1000) #define REP_DENSITY_TIMEOUT (SA_REP_DENSITY_TIMEOUT * 60 * 1000) /* * Additional options that can be set for config: SA_1FM_AT_EOT */ #ifndef UNUSED_PARAMETER #define UNUSED_PARAMETER(x) x = x #endif #define QFRLS(ccb) \ if (((ccb)->ccb_h.status & CAM_DEV_QFRZN) != 0) \ cam_release_devq((ccb)->ccb_h.path, 0, 0, 0, FALSE) /* * Driver states */ static MALLOC_DEFINE(M_SCSISA, "SCSI sa", "SCSI sequential access buffers"); typedef enum { SA_STATE_NORMAL, SA_STATE_ABNORMAL } sa_state; #define ccb_pflags ppriv_field0 #define ccb_bp ppriv_ptr1 /* bits in ccb_pflags */ #define SA_POSITION_UPDATED 0x1 typedef enum { SA_FLAG_OPEN = 0x0001, SA_FLAG_FIXED = 0x0002, SA_FLAG_TAPE_LOCKED = 0x0004, SA_FLAG_TAPE_MOUNTED = 0x0008, SA_FLAG_TAPE_WP = 0x0010, SA_FLAG_TAPE_WRITTEN = 0x0020, SA_FLAG_EOM_PENDING = 0x0040, SA_FLAG_EIO_PENDING = 0x0080, SA_FLAG_EOF_PENDING = 0x0100, SA_FLAG_ERR_PENDING = (SA_FLAG_EOM_PENDING|SA_FLAG_EIO_PENDING| SA_FLAG_EOF_PENDING), SA_FLAG_INVALID = 0x0200, SA_FLAG_COMP_ENABLED = 0x0400, SA_FLAG_COMP_SUPP = 0x0800, SA_FLAG_COMP_UNSUPP = 0x1000, SA_FLAG_TAPE_FROZEN = 0x2000, SA_FLAG_PROTECT_SUPP = 0x4000, SA_FLAG_COMPRESSION = (SA_FLAG_COMP_SUPP|SA_FLAG_COMP_ENABLED| SA_FLAG_COMP_UNSUPP), SA_FLAG_SCTX_INIT = 0x8000 } sa_flags; typedef enum { SA_MODE_REWIND = 0x00, SA_MODE_NOREWIND = 0x01, SA_MODE_OFFLINE = 0x02 } sa_mode; typedef enum { SA_PARAM_NONE = 0x000, SA_PARAM_BLOCKSIZE = 0x001, SA_PARAM_DENSITY = 0x002, SA_PARAM_COMPRESSION = 0x004, SA_PARAM_BUFF_MODE = 0x008, SA_PARAM_NUMBLOCKS = 0x010, SA_PARAM_WP = 0x020, SA_PARAM_SPEED = 0x040, SA_PARAM_DENSITY_EXT = 0x080, SA_PARAM_LBP = 0x100, SA_PARAM_ALL = 0x1ff } sa_params; typedef enum { SA_QUIRK_NONE = 0x000, SA_QUIRK_NOCOMP = 0x001, /* Can't deal with compression at all*/ SA_QUIRK_FIXED = 0x002, /* Force fixed mode */ SA_QUIRK_VARIABLE = 0x004, /* Force variable mode */ SA_QUIRK_2FM = 0x008, /* Needs Two File Marks at EOD */ SA_QUIRK_1FM = 0x010, /* No more than 1 File Mark at EOD */ SA_QUIRK_NODREAD = 0x020, /* Don't try and dummy read density */ SA_QUIRK_NO_MODESEL = 0x040, /* Don't do mode select at all */ SA_QUIRK_NO_CPAGE = 0x080, /* Don't use DEVICE COMPRESSION page */ SA_QUIRK_NO_LONG_POS = 0x100 /* No long position information */ } sa_quirks; #define SA_QUIRK_BIT_STRING \ "\020" \ "\001NOCOMP" \ "\002FIXED" \ "\003VARIABLE" \ "\0042FM" \ "\0051FM" \ "\006NODREAD" \ "\007NO_MODESEL" \ "\010NO_CPAGE" \ "\011NO_LONG_POS" #define SAMODE(z) (dev2unit(z) & 0x3) #define SA_IS_CTRL(z) (dev2unit(z) & (1 << 4)) #define SA_NOT_CTLDEV 0 #define SA_CTLDEV 1 #define SA_ATYPE_R 0 #define SA_ATYPE_NR 1 #define SA_ATYPE_ER 2 #define SA_NUM_ATYPES 3 #define SAMINOR(ctl, access) \ ((ctl << 4) | (access & 0x3)) struct sa_devs { struct cdev *ctl_dev; struct cdev *r_dev; struct cdev *nr_dev; struct cdev *er_dev; }; #define SASBADDBASE(sb, indent, data, xfmt, name, type, xsize, desc) \ sbuf_printf(sb, "%*s<%s type=\"%s\" size=\"%zd\" " \ "fmt=\"%s\" desc=\"%s\">" #xfmt "\n", indent, "", \ #name, #type, xsize, #xfmt, desc ? desc : "", data, #name); #define SASBADDINT(sb, indent, data, fmt, name) \ SASBADDBASE(sb, indent, data, fmt, name, int, sizeof(data), \ NULL) #define SASBADDINTDESC(sb, indent, data, fmt, name, desc) \ SASBADDBASE(sb, indent, data, fmt, name, int, sizeof(data), \ desc) #define SASBADDUINT(sb, indent, data, fmt, name) \ SASBADDBASE(sb, indent, data, fmt, name, uint, sizeof(data), \ NULL) #define SASBADDUINTDESC(sb, indent, data, fmt, name, desc) \ SASBADDBASE(sb, indent, data, fmt, name, uint, sizeof(data), \ desc) #define SASBADDFIXEDSTR(sb, indent, data, fmt, name) \ SASBADDBASE(sb, indent, data, fmt, name, str, sizeof(data), \ NULL) #define SASBADDFIXEDSTRDESC(sb, indent, data, fmt, name, desc) \ SASBADDBASE(sb, indent, data, fmt, name, str, sizeof(data), \ desc) #define SASBADDVARSTR(sb, indent, data, fmt, name, maxlen) \ SASBADDBASE(sb, indent, data, fmt, name, str, maxlen, NULL) #define SASBADDVARSTRDESC(sb, indent, data, fmt, name, maxlen, desc) \ SASBADDBASE(sb, indent, data, fmt, name, str, maxlen, desc) #define SASBADDNODE(sb, indent, name) { \ sbuf_printf(sb, "%*s<%s type=\"%s\">\n", indent, "", #name, \ "node"); \ indent += 2; \ } #define SASBADDNODENUM(sb, indent, name, num) { \ sbuf_printf(sb, "%*s<%s type=\"%s\" num=\"%d\">\n", indent, "", \ #name, "node", num); \ indent += 2; \ } #define SASBENDNODE(sb, indent, name) { \ indent -= 2; \ sbuf_printf(sb, "%*s\n", indent, "", #name); \ } #define SA_DENSITY_TYPES 4 struct sa_prot_state { int initialized; uint32_t prot_method; uint32_t pi_length; uint32_t lbp_w; uint32_t lbp_r; uint32_t rbdp; }; struct sa_prot_info { struct sa_prot_state cur_prot_state; struct sa_prot_state pending_prot_state; }; /* * A table mapping protection parameters to their types and values. */ struct sa_prot_map { char *name; mt_param_set_type param_type; off_t offset; uint32_t min_val; uint32_t max_val; uint32_t *value; } sa_prot_table[] = { { "prot_method", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, prot_method), /*min_val*/ 0, /*max_val*/ 255, NULL }, { "pi_length", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, pi_length), /*min_val*/ 0, /*max_val*/ SA_CTRL_DP_PI_LENGTH_MASK, NULL }, { "lbp_w", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, lbp_w), /*min_val*/ 0, /*max_val*/ 1, NULL }, { "lbp_r", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, lbp_r), /*min_val*/ 0, /*max_val*/ 1, NULL }, { "rbdp", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, rbdp), /*min_val*/ 0, /*max_val*/ 1, NULL } }; #define SA_NUM_PROT_ENTS nitems(sa_prot_table) #define SA_PROT_ENABLED(softc) ((softc->flags & SA_FLAG_PROTECT_SUPP) \ && (softc->prot_info.cur_prot_state.initialized != 0) \ && (softc->prot_info.cur_prot_state.prot_method != 0)) #define SA_PROT_LEN(softc) softc->prot_info.cur_prot_state.pi_length struct sa_softc { sa_state state; sa_flags flags; sa_quirks quirks; u_int si_flags; struct cam_periph *periph; struct bio_queue_head bio_queue; int queue_count; struct devstat *device_stats; struct sa_devs devs; int open_count; int num_devs_to_destroy; int blk_gran; int blk_mask; int blk_shift; u_int32_t max_blk; u_int32_t min_blk; u_int32_t maxio; u_int32_t cpi_maxio; int allow_io_split; int inject_eom; int set_pews_status; u_int32_t comp_algorithm; u_int32_t saved_comp_algorithm; u_int32_t media_blksize; u_int32_t last_media_blksize; u_int32_t media_numblks; u_int8_t media_density; u_int8_t speed; u_int8_t scsi_rev; u_int8_t dsreg; /* mtio mt_dsreg, redux */ int buffer_mode; int filemarks; union ccb saved_ccb; int last_resid_was_io; uint8_t density_type_bits[SA_DENSITY_TYPES]; int density_info_valid[SA_DENSITY_TYPES]; uint8_t density_info[SA_DENSITY_TYPES][SRDS_MAX_LENGTH]; struct sa_prot_info prot_info; int sili; int eot_warn; /* * Current position information. -1 means that the given value is * unknown. fileno and blkno are always calculated. blkno is * relative to the previous file mark. rep_fileno and rep_blkno * are as reported by the drive, if it supports the long form * report for the READ POSITION command. rep_blkno is relative to * the beginning of the partition. * * bop means that the drive is at the beginning of the partition. * eop means that the drive is between early warning and end of * partition, inside the current partition. * bpew means that the position is in a PEWZ (Programmable Early * Warning Zone) */ daddr_t partition; /* Absolute from BOT */ daddr_t fileno; /* Relative to beginning of partition */ daddr_t blkno; /* Relative to last file mark */ daddr_t rep_blkno; /* Relative to beginning of partition */ daddr_t rep_fileno; /* Relative to beginning of partition */ int bop; /* Beginning of Partition */ int eop; /* End of Partition */ int bpew; /* Beyond Programmable Early Warning */ /* * Latched Error Info */ struct { struct scsi_sense_data _last_io_sense; u_int64_t _last_io_resid; u_int8_t _last_io_cdb[CAM_MAX_CDBLEN]; struct scsi_sense_data _last_ctl_sense; u_int64_t _last_ctl_resid; u_int8_t _last_ctl_cdb[CAM_MAX_CDBLEN]; #define last_io_sense errinfo._last_io_sense #define last_io_resid errinfo._last_io_resid #define last_io_cdb errinfo._last_io_cdb #define last_ctl_sense errinfo._last_ctl_sense #define last_ctl_resid errinfo._last_ctl_resid #define last_ctl_cdb errinfo._last_ctl_cdb } errinfo; /* * Misc other flags/state */ u_int32_t : 29, open_rdonly : 1, /* open read-only */ open_pending_mount : 1, /* open pending mount */ ctrl_mode : 1; /* control device open */ struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; struct sa_quirk_entry { struct scsi_inquiry_pattern inq_pat; /* matching pattern */ sa_quirks quirks; /* specific quirk type */ u_int32_t prefblk; /* preferred blocksize when in fixed mode */ }; static struct sa_quirk_entry sa_quirk_table[] = { { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "OnStream", "ADR*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_NODREAD | SA_QUIRK_1FM|SA_QUIRK_NO_MODESEL, 32768 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "Python 06408*", "*"}, SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "Python 25601*", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "Python*", "*"}, SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "VIPER 150*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "VIPER 2525 25462", "-011"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM|SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "VIPER 2525*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 1024 }, #if 0 { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "C15*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_NO_CPAGE, 0, }, #endif { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "C56*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "T20*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "T4000*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "HP-88780*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", "*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "M4 DATA", "123107 SCSI*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { /* jreynold@primenet.com */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "Seagate", "STT8000N*", "*"}, SA_QUIRK_1FM, 0 }, { /* mike@sentex.net */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "Seagate", "STT20000*", "*"}, SA_QUIRK_1FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "SEAGATE", "DAT 06241-XXX", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 3600", "U07:"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 3800", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 4100", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 4200", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " SLR*", "*"}, SA_QUIRK_1FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "5525ES*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "51000*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 1024 } }; static d_open_t saopen; static d_close_t saclose; static d_strategy_t sastrategy; static d_ioctl_t saioctl; static periph_init_t sainit; static periph_ctor_t saregister; static periph_oninv_t saoninvalidate; static periph_dtor_t sacleanup; static periph_start_t sastart; static void saasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void sadone(struct cam_periph *periph, union ccb *start_ccb); static int saerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int samarkswanted(struct cam_periph *); static int sacheckeod(struct cam_periph *periph); static int sagetparams(struct cam_periph *periph, sa_params params_to_get, u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks, int *buff_mode, u_int8_t *write_protect, u_int8_t *speed, int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm, sa_comp_t *comp_page, struct scsi_control_data_prot_subpage *prot_page, int dp_size, int prot_changeable); static int sasetprot(struct cam_periph *periph, struct sa_prot_state *new_prot); static int sasetparams(struct cam_periph *periph, sa_params params_to_set, u_int32_t blocksize, u_int8_t density, u_int32_t comp_algorithm, u_int32_t sense_flags); static int sasetsili(struct cam_periph *periph, struct mtparamset *ps, int num_params); static int saseteotwarn(struct cam_periph *periph, struct mtparamset *ps, int num_params); static void safillprot(struct sa_softc *softc, int *indent, struct sbuf *sb); static void sapopulateprots(struct sa_prot_state *cur_state, struct sa_prot_map *new_table, int table_ents); static struct sa_prot_map *safindprotent(char *name, struct sa_prot_map *table, int table_ents); static int sasetprotents(struct cam_periph *periph, struct mtparamset *ps, int num_params); static struct sa_param_ent *safindparament(struct mtparamset *ps); static int saparamsetlist(struct cam_periph *periph, struct mtsetlist *list, int need_copy); static int saextget(struct cdev *dev, struct cam_periph *periph, struct sbuf *sb, struct mtextget *g); static int saparamget(struct sa_softc *softc, struct sbuf *sb); static void saprevent(struct cam_periph *periph, int action); static int sarewind(struct cam_periph *periph); static int saspace(struct cam_periph *periph, int count, scsi_space_code code); static void sadevgonecb(void *arg); static void sasetupdev(struct sa_softc *softc, struct cdev *dev); static int samount(struct cam_periph *, int, struct cdev *); static int saretension(struct cam_periph *periph); static int sareservereleaseunit(struct cam_periph *periph, int reserve); static int saloadunload(struct cam_periph *periph, int load); static int saerase(struct cam_periph *periph, int longerase); static int sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks, int immed); static int sagetpos(struct cam_periph *periph); static int sardpos(struct cam_periph *periph, int, u_int32_t *); static int sasetpos(struct cam_periph *periph, int, struct mtlocate *); static void safilldenstypesb(struct sbuf *sb, int *indent, uint8_t *buf, int buf_len, int is_density); static void safilldensitysb(struct sa_softc *softc, int *indent, struct sbuf *sb); #ifndef SA_DEFAULT_IO_SPLIT #define SA_DEFAULT_IO_SPLIT 0 #endif static int sa_allow_io_split = SA_DEFAULT_IO_SPLIT; /* * Tunable to allow the user to set a global allow_io_split value. Note * that this WILL GO AWAY in FreeBSD 11.0. Silently splitting the I/O up * is bad behavior, because it hides the true tape block size from the * application. */ static SYSCTL_NODE(_kern_cam, OID_AUTO, sa, CTLFLAG_RD, 0, "CAM Sequential Access Tape Driver"); SYSCTL_INT(_kern_cam_sa, OID_AUTO, allow_io_split, CTLFLAG_RDTUN, &sa_allow_io_split, 0, "Default I/O split value"); static struct periph_driver sadriver = { sainit, "sa", TAILQ_HEAD_INITIALIZER(sadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(sa, sadriver); /* For 2.2-stable support */ #ifndef D_TAPE #define D_TAPE 0 #endif static struct cdevsw sa_cdevsw = { .d_version = D_VERSION, .d_open = saopen, .d_close = saclose, .d_read = physread, .d_write = physwrite, .d_ioctl = saioctl, .d_strategy = sastrategy, .d_name = "sa", .d_flags = D_TAPE | D_TRACKCLOSE, }; static int saopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct sa_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) { return (ENXIO); } cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE|CAM_DEBUG_INFO, ("saopen(%s): softc=0x%x\n", devtoname(dev), softc->flags)); if (SA_IS_CTRL(dev)) { softc->ctrl_mode = 1; softc->open_count++; cam_periph_unlock(periph); return (0); } if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } if (softc->flags & SA_FLAG_OPEN) { error = EBUSY; } else if (softc->flags & SA_FLAG_INVALID) { error = ENXIO; } else { /* * Preserve whether this is a read_only open. */ softc->open_rdonly = (flags & O_RDWR) == O_RDONLY; /* * The function samount ensures media is loaded and ready. * It also does a device RESERVE if the tape isn't yet mounted. * * If the mount fails and this was a non-blocking open, * make this a 'open_pending_mount' action. */ error = samount(periph, flags, dev); if (error && (flags & O_NONBLOCK)) { softc->flags |= SA_FLAG_OPEN; softc->open_pending_mount = 1; softc->open_count++; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } } if (error) { cam_periph_unhold(periph); cam_periph_unlock(periph); cam_periph_release(periph); return (error); } saprevent(periph, PR_PREVENT); softc->flags |= SA_FLAG_OPEN; softc->open_count++; cam_periph_unhold(periph); cam_periph_unlock(periph); return (error); } static int saclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct sa_softc *softc; int mode, error, writing, tmp, i; int closedbits = SA_FLAG_OPEN; mode = SAMODE(dev); periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE|CAM_DEBUG_INFO, ("saclose(%s): softc=0x%x\n", devtoname(dev), softc->flags)); softc->open_rdonly = 0; if (SA_IS_CTRL(dev)) { softc->ctrl_mode = 0; softc->open_count--; cam_periph_unlock(periph); cam_periph_release(periph); return (0); } if (softc->open_pending_mount) { softc->flags &= ~SA_FLAG_OPEN; softc->open_pending_mount = 0; softc->open_count--; cam_periph_unlock(periph); cam_periph_release(periph); return (0); } if ((error = cam_periph_hold(periph, PRIBIO)) != 0) { cam_periph_unlock(periph); return (error); } /* * Were we writing the tape? */ writing = (softc->flags & SA_FLAG_TAPE_WRITTEN) != 0; /* * See whether or not we need to write filemarks. If this * fails, we probably have to assume we've lost tape * position. */ error = sacheckeod(periph); if (error) { xpt_print(periph->path, "failed to write terminating filemark(s)\n"); softc->flags |= SA_FLAG_TAPE_FROZEN; } /* * Whatever we end up doing, allow users to eject tapes from here on. */ saprevent(periph, PR_ALLOW); /* * Decide how to end... */ if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) { closedbits |= SA_FLAG_TAPE_FROZEN; } else switch (mode) { case SA_MODE_OFFLINE: /* * An 'offline' close is an unconditional release of * frozen && mount conditions, irrespective of whether * these operations succeeded. The reason for this is * to allow at least some kind of programmatic way * around our state getting all fouled up. If somebody * issues an 'offline' command, that will be allowed * to clear state. */ (void) sarewind(periph); (void) saloadunload(periph, FALSE); closedbits |= SA_FLAG_TAPE_MOUNTED|SA_FLAG_TAPE_FROZEN; break; case SA_MODE_REWIND: /* * If the rewind fails, return an error- if anyone cares, * but not overwriting any previous error. * * We don't clear the notion of mounted here, but we do * clear the notion of frozen if we successfully rewound. */ tmp = sarewind(periph); if (tmp) { if (error != 0) error = tmp; } else { closedbits |= SA_FLAG_TAPE_FROZEN; } break; case SA_MODE_NOREWIND: /* * If we're not rewinding/unloading the tape, find out * whether we need to back up over one of two filemarks * we wrote (if we wrote two filemarks) so that appends * from this point on will be sane. */ if (error == 0 && writing && (softc->quirks & SA_QUIRK_2FM)) { tmp = saspace(periph, -1, SS_FILEMARKS); if (tmp) { xpt_print(periph->path, "unable to backspace " "over one of double filemarks at end of " "tape\n"); xpt_print(periph->path, "it is possible that " "this device needs a SA_QUIRK_1FM quirk set" "for it\n"); softc->flags |= SA_FLAG_TAPE_FROZEN; } } break; default: xpt_print(periph->path, "unknown mode 0x%x in saclose\n", mode); /* NOTREACHED */ break; } /* * We wish to note here that there are no more filemarks to be written. */ softc->filemarks = 0; softc->flags &= ~SA_FLAG_TAPE_WRITTEN; /* * And we are no longer open for business. */ softc->flags &= ~closedbits; softc->open_count--; /* * Invalidate any density information that depends on having tape * media in the drive. */ for (i = 0; i < SA_DENSITY_TYPES; i++) { if (softc->density_type_bits[i] & SRDS_MEDIA) softc->density_info_valid[i] = 0; } /* * Inform users if tape state if frozen.... */ if (softc->flags & SA_FLAG_TAPE_FROZEN) { xpt_print(periph->path, "tape is now frozen- use an OFFLINE, " "REWIND or MTEOM command to clear this state.\n"); } /* release the device if it is no longer mounted */ if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) sareservereleaseunit(periph, FALSE); cam_periph_unhold(periph); cam_periph_unlock(periph); cam_periph_release(periph); return (error); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void sastrategy(struct bio *bp) { struct cam_periph *periph; struct sa_softc *softc; bp->bio_resid = bp->bio_bcount; if (SA_IS_CTRL(bp->bio_dev)) { biofinish(bp, NULL, EINVAL); return; } periph = (struct cam_periph *)bp->bio_dev->si_drv1; cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; if (softc->flags & SA_FLAG_INVALID) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } if (softc->flags & SA_FLAG_TAPE_FROZEN) { cam_periph_unlock(periph); biofinish(bp, NULL, EPERM); return; } /* * This should actually never occur as the write(2) * system call traps attempts to write to a read-only * file descriptor. */ if (bp->bio_cmd == BIO_WRITE && softc->open_rdonly) { cam_periph_unlock(periph); biofinish(bp, NULL, EBADF); return; } if (softc->open_pending_mount) { int error = samount(periph, 0, bp->bio_dev); if (error) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } saprevent(periph, PR_PREVENT); softc->open_pending_mount = 0; } /* * If it's a null transfer, return immediately */ if (bp->bio_bcount == 0) { cam_periph_unlock(periph); biodone(bp); return; } /* valid request? */ if (softc->flags & SA_FLAG_FIXED) { /* * Fixed block device. The byte count must * be a multiple of our block size. */ if (((softc->blk_mask != ~0) && ((bp->bio_bcount & softc->blk_mask) != 0)) || ((softc->blk_mask == ~0) && ((bp->bio_bcount % softc->min_blk) != 0))) { xpt_print(periph->path, "Invalid request. Fixed block " "device requests must be a multiple of %d bytes\n", softc->min_blk); cam_periph_unlock(periph); biofinish(bp, NULL, EINVAL); return; } } else if ((bp->bio_bcount > softc->max_blk) || (bp->bio_bcount < softc->min_blk) || (bp->bio_bcount & softc->blk_mask) != 0) { xpt_print_path(periph->path); printf("Invalid request. Variable block " "device requests must be "); if (softc->blk_mask != 0) { printf("a multiple of %d ", (0x1 << softc->blk_gran)); } printf("between %d and %d bytes\n", softc->min_blk, softc->max_blk); cam_periph_unlock(periph); biofinish(bp, NULL, EINVAL); return; } /* * Place it at the end of the queue. */ bioq_insert_tail(&softc->bio_queue, bp); softc->queue_count++; #if 0 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("sastrategy: queuing a %ld %s byte %s\n", bp->bio_bcount, (softc->flags & SA_FLAG_FIXED)? "fixed" : "variable", (bp->bio_cmd == BIO_READ)? "read" : "write")); #endif if (softc->queue_count > 1) { CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("sastrategy: queue count now %d\n", softc->queue_count)); } /* * Schedule ourselves for performing the work. */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); return; } static int sasetsili(struct cam_periph *periph, struct mtparamset *ps, int num_params) { uint32_t sili_blocksize; struct sa_softc *softc; int error; error = 0; softc = (struct sa_softc *)periph->softc; if (ps->value_type != MT_PARAM_SET_SIGNED) { snprintf(ps->error_str, sizeof(ps->error_str), "sili is a signed parameter"); goto bailout; } if ((ps->value.value_signed < 0) || (ps->value.value_signed > 1)) { snprintf(ps->error_str, sizeof(ps->error_str), "invalid sili value %jd", (intmax_t)ps->value.value_signed); goto bailout_error; } /* * We only set the SILI flag in variable block * mode. You'll get a check condition in fixed * block mode if things don't line up in any case. */ if (softc->flags & SA_FLAG_FIXED) { snprintf(ps->error_str, sizeof(ps->error_str), "can't set sili bit in fixed block mode"); goto bailout_error; } if (softc->sili == ps->value.value_signed) goto bailout; if (ps->value.value_signed == 1) sili_blocksize = 4; else sili_blocksize = 0; error = sasetparams(periph, SA_PARAM_BLOCKSIZE, sili_blocksize, 0, 0, SF_QUIET_IR); if (error != 0) { snprintf(ps->error_str, sizeof(ps->error_str), "sasetparams() returned error %d", error); goto bailout_error; } softc->sili = ps->value.value_signed; bailout: ps->status = MT_PARAM_STATUS_OK; return (error); bailout_error: ps->status = MT_PARAM_STATUS_ERROR; if (error == 0) error = EINVAL; return (error); } static int saseteotwarn(struct cam_periph *periph, struct mtparamset *ps, int num_params) { struct sa_softc *softc; int error; error = 0; softc = (struct sa_softc *)periph->softc; if (ps->value_type != MT_PARAM_SET_SIGNED) { snprintf(ps->error_str, sizeof(ps->error_str), "eot_warn is a signed parameter"); ps->status = MT_PARAM_STATUS_ERROR; goto bailout; } if ((ps->value.value_signed < 0) || (ps->value.value_signed > 1)) { snprintf(ps->error_str, sizeof(ps->error_str), "invalid eot_warn value %jd\n", (intmax_t)ps->value.value_signed); ps->status = MT_PARAM_STATUS_ERROR; goto bailout; } softc->eot_warn = ps->value.value_signed; ps->status = MT_PARAM_STATUS_OK; bailout: if (ps->status != MT_PARAM_STATUS_OK) error = EINVAL; return (error); } static void safillprot(struct sa_softc *softc, int *indent, struct sbuf *sb) { int tmpint; SASBADDNODE(sb, *indent, protection); if (softc->flags & SA_FLAG_PROTECT_SUPP) tmpint = 1; else tmpint = 0; SASBADDINTDESC(sb, *indent, tmpint, %d, protection_supported, "Set to 1 if protection information is supported"); if ((tmpint != 0) && (softc->prot_info.cur_prot_state.initialized != 0)) { struct sa_prot_state *prot; prot = &softc->prot_info.cur_prot_state; SASBADDUINTDESC(sb, *indent, prot->prot_method, %u, prot_method, "Current Protection Method"); SASBADDUINTDESC(sb, *indent, prot->pi_length, %u, pi_length, "Length of Protection Information"); SASBADDUINTDESC(sb, *indent, prot->lbp_w, %u, lbp_w, "Check Protection on Writes"); SASBADDUINTDESC(sb, *indent, prot->lbp_r, %u, lbp_r, "Check and Include Protection on Reads"); SASBADDUINTDESC(sb, *indent, prot->rbdp, %u, rbdp, "Transfer Protection Information for RECOVER " "BUFFERED DATA command"); } SASBENDNODE(sb, *indent, protection); } static void sapopulateprots(struct sa_prot_state *cur_state, struct sa_prot_map *new_table, int table_ents) { int i; bcopy(sa_prot_table, new_table, min(table_ents * sizeof(*new_table), sizeof(sa_prot_table))); table_ents = min(table_ents, SA_NUM_PROT_ENTS); for (i = 0; i < table_ents; i++) new_table[i].value = (uint32_t *)((uint8_t *)cur_state + new_table[i].offset); return; } static struct sa_prot_map * safindprotent(char *name, struct sa_prot_map *table, int table_ents) { char *prot_name = "protection."; int i, prot_len; prot_len = strlen(prot_name); /* * This shouldn't happen, but we check just in case. */ if (strncmp(name, prot_name, prot_len) != 0) goto bailout; for (i = 0; i < table_ents; i++) { if (strcmp(&name[prot_len], table[i].name) != 0) continue; return (&table[i]); } bailout: return (NULL); } static int sasetprotents(struct cam_periph *periph, struct mtparamset *ps, int num_params) { struct sa_softc *softc; struct sa_prot_map prot_ents[SA_NUM_PROT_ENTS]; struct sa_prot_state new_state; int error; int i; softc = (struct sa_softc *)periph->softc; error = 0; /* * Make sure that this tape drive supports protection information. * Otherwise we can't set anything. */ if ((softc->flags & SA_FLAG_PROTECT_SUPP) == 0) { snprintf(ps[0].error_str, sizeof(ps[0].error_str), "Protection information is not supported for this device"); ps[0].status = MT_PARAM_STATUS_ERROR; goto bailout; } /* * We can't operate with physio(9) splitting enabled, because there * is no way to insure (especially in variable block mode) that * what the user writes (with a checksum block at the end) will * make it into the sa(4) driver intact. */ if ((softc->si_flags & SI_NOSPLIT) == 0) { snprintf(ps[0].error_str, sizeof(ps[0].error_str), "Protection information cannot be enabled with I/O " "splitting"); ps[0].status = MT_PARAM_STATUS_ERROR; goto bailout; } /* * Take the current cached protection state and use that as the * basis for our new entries. */ bcopy(&softc->prot_info.cur_prot_state, &new_state, sizeof(new_state)); /* * Populate the table mapping property names to pointers into the * state structure. */ sapopulateprots(&new_state, prot_ents, SA_NUM_PROT_ENTS); /* * For each parameter the user passed in, make sure the name, type * and value are valid. */ for (i = 0; i < num_params; i++) { struct sa_prot_map *ent; ent = safindprotent(ps[i].value_name, prot_ents, SA_NUM_PROT_ENTS); if (ent == NULL) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Invalid protection entry name %s", ps[i].value_name); error = EINVAL; goto bailout; } if (ent->param_type != ps[i].value_type) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Supplied type %d does not match actual type %d", ps[i].value_type, ent->param_type); error = EINVAL; goto bailout; } if ((ps[i].value.value_unsigned < ent->min_val) || (ps[i].value.value_unsigned > ent->max_val)) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Value %ju is outside valid range %u - %u", (uintmax_t)ps[i].value.value_unsigned, ent->min_val, ent->max_val); error = EINVAL; goto bailout; } *(ent->value) = ps[i].value.value_unsigned; } /* * Actually send the protection settings to the drive. */ error = sasetprot(periph, &new_state); if (error != 0) { for (i = 0; i < num_params; i++) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Unable to set parameter, see dmesg(8)"); } goto bailout; } /* * Let the user know that his settings were stored successfully. */ for (i = 0; i < num_params; i++) ps[i].status = MT_PARAM_STATUS_OK; bailout: return (error); } /* * Entry handlers generally only handle a single entry. Node handlers will * handle a contiguous range of parameters to set in a single call. */ typedef enum { SA_PARAM_TYPE_ENTRY, SA_PARAM_TYPE_NODE } sa_param_type; struct sa_param_ent { char *name; sa_param_type param_type; int (*set_func)(struct cam_periph *periph, struct mtparamset *ps, int num_params); } sa_param_table[] = { {"sili", SA_PARAM_TYPE_ENTRY, sasetsili }, {"eot_warn", SA_PARAM_TYPE_ENTRY, saseteotwarn }, {"protection.", SA_PARAM_TYPE_NODE, sasetprotents } }; static struct sa_param_ent * safindparament(struct mtparamset *ps) { unsigned int i; for (i = 0; i < nitems(sa_param_table); i++){ /* * For entries, we compare all of the characters. For * nodes, we only compare the first N characters. The node * handler will decode the rest. */ if (sa_param_table[i].param_type == SA_PARAM_TYPE_ENTRY) { if (strcmp(ps->value_name, sa_param_table[i].name) != 0) continue; } else { if (strncmp(ps->value_name, sa_param_table[i].name, strlen(sa_param_table[i].name)) != 0) continue; } return (&sa_param_table[i]); } return (NULL); } /* * Go through a list of parameters, coalescing contiguous parameters with * the same parent node into a single call to a set_func. */ static int saparamsetlist(struct cam_periph *periph, struct mtsetlist *list, int need_copy) { int i, contig_ents; int error; struct mtparamset *params, *first; struct sa_param_ent *first_ent; error = 0; params = NULL; if (list->num_params == 0) /* Nothing to do */ goto bailout; /* * Verify that the user has the correct structure size. */ if ((list->num_params * sizeof(struct mtparamset)) != list->param_len) { xpt_print(periph->path, "%s: length of params %d != " "sizeof(struct mtparamset) %zd * num_params %d\n", __func__, list->param_len, sizeof(struct mtparamset), list->num_params); error = EINVAL; goto bailout; } if (need_copy != 0) { /* * XXX KDM will dropping the lock cause an issue here? */ cam_periph_unlock(periph); params = malloc(list->param_len, M_SCSISA, M_WAITOK | M_ZERO); error = copyin(list->params, params, list->param_len); cam_periph_lock(periph); if (error != 0) goto bailout; } else { params = list->params; } contig_ents = 0; first = NULL; first_ent = NULL; for (i = 0; i < list->num_params; i++) { struct sa_param_ent *ent; ent = safindparament(¶ms[i]); if (ent == NULL) { snprintf(params[i].error_str, sizeof(params[i].error_str), "%s: cannot find parameter %s", __func__, params[i].value_name); params[i].status = MT_PARAM_STATUS_ERROR; break; } if (first != NULL) { if (first_ent == ent) { /* * We're still in a contiguous list of * parameters that can be handled by one * node handler. */ contig_ents++; continue; } else { error = first_ent->set_func(periph, first, contig_ents); first = NULL; first_ent = NULL; contig_ents = 0; if (error != 0) { error = 0; break; } } } if (ent->param_type == SA_PARAM_TYPE_NODE) { first = ¶ms[i]; first_ent = ent; contig_ents = 1; } else { error = ent->set_func(periph, ¶ms[i], 1); if (error != 0) { error = 0; break; } } } if (first != NULL) first_ent->set_func(periph, first, contig_ents); bailout: if (need_copy != 0) { if (error != EFAULT) { cam_periph_unlock(periph); copyout(params, list->params, list->param_len); cam_periph_lock(periph); } free(params, M_SCSISA); } return (error); } static int sagetparams_common(struct cdev *dev, struct cam_periph *periph) { struct sa_softc *softc; u_int8_t write_protect; int comp_enabled, comp_supported, error; softc = (struct sa_softc *)periph->softc; if (softc->open_pending_mount) return (0); /* The control device may issue getparams() if there are no opens. */ if (SA_IS_CTRL(dev) && (softc->flags & SA_FLAG_OPEN) != 0) return (0); error = sagetparams(periph, SA_PARAM_ALL, &softc->media_blksize, &softc->media_density, &softc->media_numblks, &softc->buffer_mode, &write_protect, &softc->speed, &comp_supported, &comp_enabled, &softc->comp_algorithm, NULL, NULL, 0, 0); if (error) return (error); if (write_protect) softc->flags |= SA_FLAG_TAPE_WP; else softc->flags &= ~SA_FLAG_TAPE_WP; softc->flags &= ~SA_FLAG_COMPRESSION; if (comp_supported) { if (softc->saved_comp_algorithm == 0) softc->saved_comp_algorithm = softc->comp_algorithm; softc->flags |= SA_FLAG_COMP_SUPP; if (comp_enabled) softc->flags |= SA_FLAG_COMP_ENABLED; } else softc->flags |= SA_FLAG_COMP_UNSUPP; return (0); } #define PENDING_MOUNT_CHECK(softc, periph, dev) \ if (softc->open_pending_mount) { \ error = samount(periph, 0, dev); \ if (error) { \ break; \ } \ saprevent(periph, PR_PREVENT); \ softc->open_pending_mount = 0; \ } static int saioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct cam_periph *periph; struct sa_softc *softc; scsi_space_code spaceop; int didlockperiph = 0; int mode; int error = 0; mode = SAMODE(dev); error = 0; /* shut up gcc */ spaceop = 0; /* shut up gcc */ periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; /* * Check for control mode accesses. We allow MTIOCGET and * MTIOCERRSTAT (but need to be the only one open in order * to clear latched status), and MTSETBSIZE, MTSETDNSTY * and MTCOMP (but need to be the only one accessing this * device to run those). */ if (SA_IS_CTRL(dev)) { switch (cmd) { case MTIOCGETEOTMODEL: case MTIOCGET: case MTIOCEXTGET: case MTIOCPARAMGET: case MTIOCRBLIM: break; case MTIOCERRSTAT: /* * If the periph isn't already locked, lock it * so our MTIOCERRSTAT can reset latched error stats. * * If the periph is already locked, skip it because * we're just getting status and it'll be up to the * other thread that has this device open to do * an MTIOCERRSTAT that would clear latched status. */ if ((periph->flags & CAM_PERIPH_LOCKED) == 0) { error = cam_periph_hold(periph, PRIBIO|PCATCH); if (error != 0) { cam_periph_unlock(periph); return (error); } didlockperiph = 1; } break; case MTIOCTOP: { struct mtop *mt = (struct mtop *) arg; /* * Check to make sure it's an OP we can perform * with no media inserted. */ switch (mt->mt_op) { case MTSETBSIZ: case MTSETDNSTY: case MTCOMP: mt = NULL; /* FALLTHROUGH */ default: break; } if (mt != NULL) { break; } /* FALLTHROUGH */ } case MTIOCSETEOTMODEL: /* * We need to acquire the peripheral here rather * than at open time because we are sharing writable * access to data structures. */ error = cam_periph_hold(periph, PRIBIO|PCATCH); if (error != 0) { cam_periph_unlock(periph); return (error); } didlockperiph = 1; break; default: cam_periph_unlock(periph); return (EINVAL); } } /* * Find the device that the user is talking about */ switch (cmd) { case MTIOCGET: { struct mtget *g = (struct mtget *)arg; error = sagetparams_common(dev, periph); if (error) break; bzero(g, sizeof(struct mtget)); g->mt_type = MT_ISAR; if (softc->flags & SA_FLAG_COMP_UNSUPP) { g->mt_comp = MT_COMP_UNSUPP; g->mt_comp0 = MT_COMP_UNSUPP; g->mt_comp1 = MT_COMP_UNSUPP; g->mt_comp2 = MT_COMP_UNSUPP; g->mt_comp3 = MT_COMP_UNSUPP; } else { if ((softc->flags & SA_FLAG_COMP_ENABLED) == 0) { g->mt_comp = MT_COMP_DISABLED; } else { g->mt_comp = softc->comp_algorithm; } g->mt_comp0 = softc->comp_algorithm; g->mt_comp1 = softc->comp_algorithm; g->mt_comp2 = softc->comp_algorithm; g->mt_comp3 = softc->comp_algorithm; } g->mt_density = softc->media_density; g->mt_density0 = softc->media_density; g->mt_density1 = softc->media_density; g->mt_density2 = softc->media_density; g->mt_density3 = softc->media_density; g->mt_blksiz = softc->media_blksize; g->mt_blksiz0 = softc->media_blksize; g->mt_blksiz1 = softc->media_blksize; g->mt_blksiz2 = softc->media_blksize; g->mt_blksiz3 = softc->media_blksize; g->mt_fileno = softc->fileno; g->mt_blkno = softc->blkno; g->mt_dsreg = (short) softc->dsreg; /* * Yes, we know that this is likely to overflow */ if (softc->last_resid_was_io) { if ((g->mt_resid = (short) softc->last_io_resid) != 0) { if (SA_IS_CTRL(dev) == 0 || didlockperiph) { softc->last_io_resid = 0; } } } else { if ((g->mt_resid = (short)softc->last_ctl_resid) != 0) { if (SA_IS_CTRL(dev) == 0 || didlockperiph) { softc->last_ctl_resid = 0; } } } error = 0; break; } case MTIOCEXTGET: case MTIOCPARAMGET: { struct mtextget *g = (struct mtextget *)arg; char *tmpstr2; struct sbuf *sb; /* * Report drive status using an XML format. */ /* * XXX KDM will dropping the lock cause any problems here? */ cam_periph_unlock(periph); sb = sbuf_new(NULL, NULL, g->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { g->status = MT_EXT_GET_ERROR; snprintf(g->error_str, sizeof(g->error_str), "Unable to allocate %d bytes for status info", g->alloc_len); cam_periph_lock(periph); goto extget_bailout; } cam_periph_lock(periph); if (cmd == MTIOCEXTGET) error = saextget(dev, periph, sb, g); else error = saparamget(softc, sb); if (error != 0) goto extget_bailout; error = sbuf_finish(sb); if (error == ENOMEM) { g->status = MT_EXT_GET_NEED_MORE_SPACE; error = 0; } else if (error != 0) { g->status = MT_EXT_GET_ERROR; snprintf(g->error_str, sizeof(g->error_str), "Error %d returned from sbuf_finish()", error); } else g->status = MT_EXT_GET_OK; error = 0; tmpstr2 = sbuf_data(sb); g->fill_len = strlen(tmpstr2) + 1; cam_periph_unlock(periph); error = copyout(tmpstr2, g->status_xml, g->fill_len); cam_periph_lock(periph); extget_bailout: sbuf_delete(sb); break; } case MTIOCPARAMSET: { struct mtsetlist list; struct mtparamset *ps = (struct mtparamset *)arg; bzero(&list, sizeof(list)); list.num_params = 1; list.param_len = sizeof(*ps); list.params = ps; error = saparamsetlist(periph, &list, /*need_copy*/ 0); break; } case MTIOCSETLIST: { struct mtsetlist *list = (struct mtsetlist *)arg; error = saparamsetlist(periph, list, /*need_copy*/ 1); break; } case MTIOCERRSTAT: { struct scsi_tape_errors *sep = &((union mterrstat *)arg)->scsi_errstat; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("saioctl: MTIOCERRSTAT\n")); bzero(sep, sizeof(*sep)); sep->io_resid = softc->last_io_resid; bcopy((caddr_t) &softc->last_io_sense, sep->io_sense, sizeof (sep->io_sense)); bcopy((caddr_t) &softc->last_io_cdb, sep->io_cdb, sizeof (sep->io_cdb)); sep->ctl_resid = softc->last_ctl_resid; bcopy((caddr_t) &softc->last_ctl_sense, sep->ctl_sense, sizeof (sep->ctl_sense)); bcopy((caddr_t) &softc->last_ctl_cdb, sep->ctl_cdb, sizeof (sep->ctl_cdb)); if ((SA_IS_CTRL(dev) == 0 && !softc->open_pending_mount) || didlockperiph) bzero((caddr_t) &softc->errinfo, sizeof (softc->errinfo)); error = 0; break; } case MTIOCTOP: { struct mtop *mt; int count; PENDING_MOUNT_CHECK(softc, periph, dev); mt = (struct mtop *)arg; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("saioctl: op=0x%x count=0x%x\n", mt->mt_op, mt->mt_count)); count = mt->mt_count; switch (mt->mt_op) { case MTWEOF: /* write an end-of-file marker */ /* * We don't need to clear the SA_FLAG_TAPE_WRITTEN * flag because by keeping track of filemarks * we have last written we know whether or not * we need to write more when we close the device. */ error = sawritefilemarks(periph, count, FALSE, FALSE); break; case MTWEOFI: /* write an end-of-file marker without waiting */ error = sawritefilemarks(periph, count, FALSE, TRUE); break; case MTWSS: /* write a setmark */ error = sawritefilemarks(periph, count, TRUE, FALSE); break; case MTBSR: /* backward space record */ case MTFSR: /* forward space record */ case MTBSF: /* backward space file */ case MTFSF: /* forward space file */ case MTBSS: /* backward space setmark */ case MTFSS: /* forward space setmark */ case MTEOD: /* space to end of recorded medium */ { int nmarks; spaceop = SS_FILEMARKS; nmarks = softc->filemarks; error = sacheckeod(periph); if (error) { xpt_print(periph->path, "EOD check prior to spacing failed\n"); softc->flags |= SA_FLAG_EIO_PENDING; break; } nmarks -= softc->filemarks; switch(mt->mt_op) { case MTBSR: count = -count; /* FALLTHROUGH */ case MTFSR: spaceop = SS_BLOCKS; break; case MTBSF: count = -count; /* FALLTHROUGH */ case MTFSF: break; case MTBSS: count = -count; /* FALLTHROUGH */ case MTFSS: spaceop = SS_SETMARKS; break; case MTEOD: spaceop = SS_EOD; count = 0; nmarks = 0; break; default: error = EINVAL; break; } if (error) break; nmarks = softc->filemarks; /* * XXX: Why are we checking again? */ error = sacheckeod(periph); if (error) break; nmarks -= softc->filemarks; error = saspace(periph, count - nmarks, spaceop); /* * At this point, clear that we've written the tape * and that we've written any filemarks. We really * don't know what the applications wishes to do next- * the sacheckeod's will make sure we terminated the * tape correctly if we'd been writing, but the next * action the user application takes will set again * whether we need to write filemarks. */ softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->filemarks = 0; break; } case MTREW: /* rewind */ PENDING_MOUNT_CHECK(softc, periph, dev); (void) sacheckeod(periph); error = sarewind(periph); /* see above */ softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; softc->filemarks = 0; break; case MTERASE: /* erase */ PENDING_MOUNT_CHECK(softc, periph, dev); error = saerase(periph, count); softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; break; case MTRETENS: /* re-tension tape */ PENDING_MOUNT_CHECK(softc, periph, dev); error = saretension(periph); softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; break; case MTOFFL: /* rewind and put the drive offline */ PENDING_MOUNT_CHECK(softc, periph, dev); (void) sacheckeod(periph); /* see above */ softc->flags &= ~SA_FLAG_TAPE_WRITTEN; softc->filemarks = 0; error = sarewind(periph); /* clear the frozen flag anyway */ softc->flags &= ~SA_FLAG_TAPE_FROZEN; /* * Be sure to allow media removal before ejecting. */ saprevent(periph, PR_ALLOW); if (error == 0) { error = saloadunload(periph, FALSE); if (error == 0) { softc->flags &= ~SA_FLAG_TAPE_MOUNTED; } } break; case MTLOAD: error = saloadunload(periph, TRUE); break; case MTNOP: /* no operation, sets status only */ case MTCACHE: /* enable controller cache */ case MTNOCACHE: /* disable controller cache */ error = 0; break; case MTSETBSIZ: /* Set block size for device */ PENDING_MOUNT_CHECK(softc, periph, dev); if ((softc->sili != 0) && (count != 0)) { xpt_print(periph->path, "Can't enter fixed " "block mode with SILI enabled\n"); error = EINVAL; break; } error = sasetparams(periph, SA_PARAM_BLOCKSIZE, count, 0, 0, 0); if (error == 0) { softc->last_media_blksize = softc->media_blksize; softc->media_blksize = count; if (count) { softc->flags |= SA_FLAG_FIXED; if (powerof2(count)) { softc->blk_shift = ffs(count) - 1; softc->blk_mask = count - 1; } else { softc->blk_mask = ~0; softc->blk_shift = 0; } /* * Make the user's desire 'persistent'. */ softc->quirks &= ~SA_QUIRK_VARIABLE; softc->quirks |= SA_QUIRK_FIXED; } else { softc->flags &= ~SA_FLAG_FIXED; if (softc->max_blk == 0) { softc->max_blk = ~0; } softc->blk_shift = 0; if (softc->blk_gran != 0) { softc->blk_mask = softc->blk_gran - 1; } else { softc->blk_mask = 0; } /* * Make the user's desire 'persistent'. */ softc->quirks |= SA_QUIRK_VARIABLE; softc->quirks &= ~SA_QUIRK_FIXED; } } break; case MTSETDNSTY: /* Set density for device and mode */ PENDING_MOUNT_CHECK(softc, periph, dev); if (count > UCHAR_MAX) { error = EINVAL; break; } else { error = sasetparams(periph, SA_PARAM_DENSITY, 0, count, 0, 0); } break; case MTCOMP: /* enable compression */ PENDING_MOUNT_CHECK(softc, periph, dev); /* * Some devices don't support compression, and * don't like it if you ask them for the * compression page. */ if ((softc->quirks & SA_QUIRK_NOCOMP) || (softc->flags & SA_FLAG_COMP_UNSUPP)) { error = ENODEV; break; } error = sasetparams(periph, SA_PARAM_COMPRESSION, 0, 0, count, SF_NO_PRINT); break; default: error = EINVAL; } break; } case MTIOCIEOT: case MTIOCEEOT: error = 0; break; case MTIOCRDSPOS: PENDING_MOUNT_CHECK(softc, periph, dev); error = sardpos(periph, 0, (u_int32_t *) arg); break; case MTIOCRDHPOS: PENDING_MOUNT_CHECK(softc, periph, dev); error = sardpos(periph, 1, (u_int32_t *) arg); break; case MTIOCSLOCATE: case MTIOCHLOCATE: { struct mtlocate locate_info; int hard; bzero(&locate_info, sizeof(locate_info)); locate_info.logical_id = *((uint32_t *)arg); if (cmd == MTIOCSLOCATE) hard = 0; else hard = 1; PENDING_MOUNT_CHECK(softc, periph, dev); error = sasetpos(periph, hard, &locate_info); break; } case MTIOCEXTLOCATE: PENDING_MOUNT_CHECK(softc, periph, dev); error = sasetpos(periph, /*hard*/ 0, (struct mtlocate *)arg); softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; softc->filemarks = 0; break; case MTIOCGETEOTMODEL: error = 0; if (softc->quirks & SA_QUIRK_1FM) mode = 1; else mode = 2; *((u_int32_t *) arg) = mode; break; case MTIOCSETEOTMODEL: error = 0; switch (*((u_int32_t *) arg)) { case 1: softc->quirks &= ~SA_QUIRK_2FM; softc->quirks |= SA_QUIRK_1FM; break; case 2: softc->quirks &= ~SA_QUIRK_1FM; softc->quirks |= SA_QUIRK_2FM; break; default: error = EINVAL; break; } break; case MTIOCRBLIM: { struct mtrblim *rblim; rblim = (struct mtrblim *)arg; rblim->granularity = softc->blk_gran; rblim->min_block_length = softc->min_blk; rblim->max_block_length = softc->max_blk; break; } default: error = cam_periph_ioctl(periph, cmd, arg, saerror); break; } /* * Check to see if we cleared a frozen state */ if (error == 0 && (softc->flags & SA_FLAG_TAPE_FROZEN)) { switch(cmd) { case MTIOCRDSPOS: case MTIOCRDHPOS: case MTIOCSLOCATE: case MTIOCHLOCATE: /* * XXX KDM look at this. */ softc->fileno = (daddr_t) -1; softc->blkno = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->flags &= ~SA_FLAG_TAPE_FROZEN; xpt_print(periph->path, "tape state now unfrozen.\n"); break; default: break; } } if (didlockperiph) { cam_periph_unhold(periph); } cam_periph_unlock(periph); return (error); } static void sainit(void) { cam_status status; /* * Install a global async callback. */ status = xpt_register_async(AC_FOUND_DEVICE, saasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("sa: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void sadevgonecb(void *arg) { struct cam_periph *periph; struct mtx *mtx; struct sa_softc *softc; periph = (struct cam_periph *)arg; softc = (struct sa_softc *)periph->softc; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc->num_devs_to_destroy--; if (softc->num_devs_to_destroy == 0) { int i; /* * When we have gotten all of our callbacks, we will get * no more close calls from devfs. So if we have any * dangling opens, we need to release the reference held * for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for devfs, all of our * instances are gone now. */ cam_periph_release_locked(periph); } /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void saoninvalidate(struct cam_periph *periph) { struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, saasync, periph, periph->path); softc->flags |= SA_FLAG_INVALID; /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ bioq_flush(&softc->bio_queue, NULL, ENXIO); softc->queue_count = 0; /* * Tell devfs that all of our devices have gone away, and ask for a * callback when it has cleaned up its state. */ destroy_dev_sched_cb(softc->devs.ctl_dev, sadevgonecb, periph); destroy_dev_sched_cb(softc->devs.r_dev, sadevgonecb, periph); destroy_dev_sched_cb(softc->devs.nr_dev, sadevgonecb, periph); destroy_dev_sched_cb(softc->devs.er_dev, sadevgonecb, periph); } static void sacleanup(struct cam_periph *periph) { struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; cam_periph_unlock(periph); if ((softc->flags & SA_FLAG_SCTX_INIT) != 0 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); cam_periph_lock(periph); devstat_remove_entry(softc->device_stats); free(softc, M_SCSISA); } static void saasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_SEQUENTIAL) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(saregister, saoninvalidate, sacleanup, sastart, "sa", CAM_PERIPH_BIO, path, saasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("saasync: Unable to probe new device " "due to status 0x%x\n", status); break; } default: cam_periph_async(periph, code, path, arg); break; } } static void sasetupdev(struct sa_softc *softc, struct cdev *dev) { dev->si_iosize_max = softc->maxio; dev->si_flags |= softc->si_flags; /* * Keep a count of how many non-alias devices we have created, * so we can make sure we clean them all up on shutdown. Aliases * are cleaned up when we destroy the device they're an alias for. */ if ((dev->si_flags & SI_ALIAS) == 0) softc->num_devs_to_destroy++; } static void sasysctlinit(void *context, int pending) { struct cam_periph *periph; struct sa_softc *softc; char tmpstr[80], tmpstr2[80]; periph = (struct cam_periph *)context; /* * If the periph is invalid, no need to setup the sysctls. */ if (periph->flags & CAM_PERIPH_INVALID) goto bailout; softc = (struct sa_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM SA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%u", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= SA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_sa), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) goto bailout; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "allow_io_split", CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &softc->allow_io_split, 0, "Allow Splitting I/O"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "maxio", CTLFLAG_RD, &softc->maxio, 0, "Maximum I/O size"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "cpi_maxio", CTLFLAG_RD, &softc->cpi_maxio, 0, "Maximum Controller I/O size"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "inject_eom", CTLFLAG_RW, &softc->inject_eom, 0, "Queue EOM for the next write/read"); bailout: /* * Release the reference that was held when this task was enqueued. */ cam_periph_release(periph); } static cam_status saregister(struct cam_periph *periph, void *arg) { struct sa_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; caddr_t match; char tmpstr[80]; int error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("saregister: no getdev CCB, can't register device\n"); return (CAM_REQ_CMP_ERR); } softc = (struct sa_softc *) malloc(sizeof (*softc), M_SCSISA, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("saregister: Unable to probe new device. " "Unable to allocate softc\n"); return (CAM_REQ_CMP_ERR); } softc->scsi_rev = SID_ANSI_REV(&cgd->inq_data); softc->state = SA_STATE_NORMAL; softc->fileno = (daddr_t) -1; softc->blkno = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->bop = -1; softc->eop = -1; softc->bpew = -1; bioq_init(&softc->bio_queue); softc->periph = periph; periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)sa_quirk_table, nitems(sa_quirk_table), sizeof(*sa_quirk_table), scsi_inquiry_match); if (match != NULL) { softc->quirks = ((struct sa_quirk_entry *)match)->quirks; softc->last_media_blksize = ((struct sa_quirk_entry *)match)->prefblk; } else softc->quirks = SA_QUIRK_NONE; /* * Long format data for READ POSITION was introduced in SSC, which * was after SCSI-2. (Roughly equivalent to SCSI-3.) If the drive * reports that it is SCSI-2 or older, it is unlikely to support * long position data, but it might. Some drives from that era * claim to be SCSI-2, but do support long position information. * So, instead of immediately disabling long position information * for SCSI-2 devices, we'll try one pass through sagetpos(), and * then disable long position information if we get an error. */ if (cgd->inq_data.version <= SCSI_REV_CCS) softc->quirks |= SA_QUIRK_NO_LONG_POS; if (cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) { struct ccb_dev_advinfo cdai; struct scsi_vpd_extended_inquiry_data ext_inq; bzero(&ext_inq, sizeof(ext_inq)); xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.flags = CDAI_FLAG_NONE; cdai.buftype = CDAI_TYPE_EXT_INQ; cdai.bufsiz = sizeof(ext_inq); cdai.buf = (uint8_t *)&ext_inq; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if ((cdai.ccb_h.status == CAM_REQ_CMP) && (ext_inq.flags1 & SVPD_EID_SA_SPT_LBP)) softc->flags |= SA_FLAG_PROTECT_SUPP; } bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* * The SA driver supports a blocksize, but we don't know the * blocksize until we media is inserted. So, set a flag to * indicate that the blocksize is unavailable right now. */ cam_periph_unlock(periph); softc->device_stats = devstat_new_entry("sa", periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_TAPE); /* * Load the default value that is either compiled in, or loaded * in the global kern.cam.sa.allow_io_split tunable. */ softc->allow_io_split = sa_allow_io_split; /* * Load a per-instance tunable, if it exists. NOTE that this * tunable WILL GO AWAY in FreeBSD 11.0. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.sa.%u.allow_io_split", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->allow_io_split); /* * If maxio isn't set, we fall back to DFLTPHYS. Otherwise we take * the smaller of cpi.maxio or MAXPHYS. */ if (cpi.maxio == 0) softc->maxio = DFLTPHYS; else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; else softc->maxio = cpi.maxio; /* * Record the controller's maximum I/O size so we can report it to * the user later. */ softc->cpi_maxio = cpi.maxio; /* * By default we tell physio that we do not want our I/O split. * The user needs to have a 1:1 mapping between the size of his * write to a tape character device and the size of the write * that actually goes down to the drive. */ if (softc->allow_io_split == 0) softc->si_flags = SI_NOSPLIT; else softc->si_flags = 0; TASK_INIT(&softc->sysctl_task, 0, sasysctlinit, periph); /* * If the SIM supports unmapped I/O, let physio know that we can * handle unmapped buffers. */ if (cpi.hba_misc & PIM_UNMAPPED) softc->si_flags |= SI_UNMAPPED; /* * Acquire a reference to the periph before we create the devfs * instances for it. We'll release this reference once the devfs * instances have been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } make_dev_args_init(&args); args.mda_devsw = &sa_cdevsw; args.mda_si_drv1 = softc->periph; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0660; args.mda_unit = SAMINOR(SA_CTLDEV, SA_ATYPE_R); error = make_dev_s(&args, &softc->devs.ctl_dev, "%s%d.ctl", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } sasetupdev(softc, softc->devs.ctl_dev); args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_R); error = make_dev_s(&args, &softc->devs.r_dev, "%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } sasetupdev(softc, softc->devs.r_dev); args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_NR); error = make_dev_s(&args, &softc->devs.nr_dev, "n%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } sasetupdev(softc, softc->devs.nr_dev); args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_ER); error = make_dev_s(&args, &softc->devs.er_dev, "e%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } sasetupdev(softc, softc->devs.er_dev); cam_periph_lock(periph); softc->density_type_bits[0] = 0; softc->density_type_bits[1] = SRDS_MEDIA; softc->density_type_bits[2] = SRDS_MEDIUM_TYPE; softc->density_type_bits[3] = SRDS_MEDIUM_TYPE | SRDS_MEDIA; /* * Bump the peripheral refcount for the sysctl thread, in case we * get invalidated before the thread has a chance to run. */ cam_periph_acquire(periph); taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); /* * Add an async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_LOST_DEVICE, saasync, periph, periph->path); xpt_announce_periph(periph, NULL); xpt_announce_quirks(periph, softc->quirks, SA_QUIRK_BIT_STRING); return (CAM_REQ_CMP); } static void sastart(struct cam_periph *periph, union ccb *start_ccb) { struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sastart\n")); switch (softc->state) { case SA_STATE_NORMAL: { /* Pull a buffer from the queue and get going on it */ struct bio *bp; /* * See if there is a buf with work for us to do.. */ bp = bioq_first(&softc->bio_queue); if (bp == NULL) { xpt_release_ccb(start_ccb); } else if (((softc->flags & SA_FLAG_ERR_PENDING) != 0) || (softc->inject_eom != 0)) { struct bio *done_bp; if (softc->inject_eom != 0) { softc->flags |= SA_FLAG_EOM_PENDING; softc->inject_eom = 0; /* * If we're injecting EOM for writes, we * need to keep PEWS set for 3 queries * to cover 2 position requests from the * kernel via sagetpos(), and then allow * for one for the user to see the BPEW * flag (e.g. via mt status). After that, * it will be cleared. */ if (bp->bio_cmd == BIO_WRITE) softc->set_pews_status = 3; else softc->set_pews_status = 1; } again: softc->queue_count--; bioq_remove(&softc->bio_queue, bp); bp->bio_resid = bp->bio_bcount; done_bp = bp; if ((softc->flags & SA_FLAG_EOM_PENDING) != 0) { /* * We have two different behaviors for * writes when we hit either Early Warning * or the PEWZ (Programmable Early Warning * Zone). The default behavior is that * for all writes that are currently * queued after the write where we saw the * early warning, we will return the write * with the residual equal to the count. * i.e. tell the application that 0 bytes * were written. * * The alternate behavior, which is enabled * when eot_warn is set, is that in * addition to setting the residual equal * to the count, we will set the error * to ENOSPC. * * In either case, once queued writes are * cleared out, we clear the error flag * (see below) and the application is free to * attempt to write more. */ if (softc->eot_warn != 0) { bp->bio_flags |= BIO_ERROR; bp->bio_error = ENOSPC; } else bp->bio_error = 0; } else if ((softc->flags & SA_FLAG_EOF_PENDING) != 0) { /* * This can only happen if we're reading * in fixed length mode. In this case, * we dump the rest of the list the * same way. */ bp->bio_error = 0; if (bioq_first(&softc->bio_queue) != NULL) { biodone(done_bp); goto again; } } else if ((softc->flags & SA_FLAG_EIO_PENDING) != 0) { bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; } bp = bioq_first(&softc->bio_queue); /* * Only if we have no other buffers queued up * do we clear the pending error flag. */ if (bp == NULL) softc->flags &= ~SA_FLAG_ERR_PENDING; CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("sastart- ERR_PENDING now 0x%x, bp is %sNULL, " "%d more buffers queued up\n", (softc->flags & SA_FLAG_ERR_PENDING), (bp != NULL)? "not " : " ", softc->queue_count)); xpt_release_ccb(start_ccb); biodone(done_bp); } else { u_int32_t length; bioq_remove(&softc->bio_queue, bp); softc->queue_count--; length = bp->bio_bcount; if ((softc->flags & SA_FLAG_FIXED) != 0) { if (softc->blk_shift != 0) { length = length >> softc->blk_shift; } else if (softc->media_blksize != 0) { length = length / softc->media_blksize; } else { bp->bio_error = EIO; xpt_print(periph->path, "zero blocksize" " for FIXED length writes?\n"); biodone(bp); break; } #if 0 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO, ("issuing a %d fixed record %s\n", length, (bp->bio_cmd == BIO_READ)? "read" : "write")); #endif } else { #if 0 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO, ("issuing a %d variable byte %s\n", length, (bp->bio_cmd == BIO_READ)? "read" : "write")); #endif } devstat_start_transaction_bio(softc->device_stats, bp); /* * Some people have theorized that we should * suppress illegal length indication if we are * running in variable block mode so that we don't * have to request sense every time our requested * block size is larger than the written block. * The residual information from the ccb allows * us to identify this situation anyway. The only * problem with this is that we will not get * information about blocks that are larger than * our read buffer unless we set the block size * in the mode page to something other than 0. * * I believe that this is a non-issue. If user apps * don't adjust their read size to match our record * size, that's just life. Anyway, the typical usage * would be to issue, e.g., 64KB reads and occasionally * have to do deal with 512 byte or 1KB intermediate * records. * * That said, though, we now support setting the * SILI bit on reads, and we set the blocksize to 4 * bytes when we do that. This gives us * compatibility with software that wants this, * although the only real difference between that * and not setting the SILI bit on reads is that we * won't get a check condition on reads where our * request size is larger than the block on tape. * That probably only makes a real difference in * non-packetized SCSI, where you have to go back * to the drive to request sense and thus incur * more latency. */ softc->dsreg = (bp->bio_cmd == BIO_READ)? MTIO_DSREG_RD : MTIO_DSREG_WR; scsi_sa_read_write(&start_ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, (bp->bio_cmd == BIO_READ ? SCSI_RW_READ : SCSI_RW_WRITE) | ((bp->bio_flags & BIO_UNMAPPED) != 0 ? SCSI_RW_BIO : 0), softc->sili, (softc->flags & SA_FLAG_FIXED) != 0, length, (bp->bio_flags & BIO_UNMAPPED) != 0 ? (void *)bp : bp->bio_data, bp->bio_bcount, SSD_FULL_SIZE, IO_TIMEOUT); start_ccb->ccb_h.ccb_pflags &= ~SA_POSITION_UPDATED; start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); xpt_action(start_ccb); } if (bp != NULL) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); } break; } case SA_STATE_ABNORMAL: default: panic("state 0x%x in sastart", softc->state); break; } } static void sadone(struct cam_periph *periph, union ccb *done_ccb) { struct sa_softc *softc; struct ccb_scsiio *csio; struct bio *bp; int error; softc = (struct sa_softc *)periph->softc; csio = &done_ccb->csio; softc->dsreg = MTIO_DSREG_REST; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if ((error = saerror(done_ccb, 0, 0)) == ERESTART) { /* * A retry was scheduled, so just return. */ return; } } if (error == EIO) { /* * Catastrophic error. Mark the tape as frozen * (we no longer know tape position). * * Return all queued I/O with EIO, and unfreeze * our queue so that future transactions that * attempt to fix this problem can get to the * device. * */ softc->flags |= SA_FLAG_TAPE_FROZEN; bioq_flush(&softc->bio_queue, NULL, EIO); } if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_error = error; bp->bio_flags |= BIO_ERROR; /* * In the error case, position is updated in saerror. */ } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (csio->resid != 0) { bp->bio_flags |= BIO_ERROR; } if (bp->bio_cmd == BIO_WRITE) { softc->flags |= SA_FLAG_TAPE_WRITTEN; softc->filemarks = 0; } if (!(csio->ccb_h.ccb_pflags & SA_POSITION_UPDATED) && (softc->blkno != (daddr_t) -1)) { if ((softc->flags & SA_FLAG_FIXED) != 0) { u_int32_t l; if (softc->blk_shift != 0) { l = bp->bio_bcount >> softc->blk_shift; } else { l = bp->bio_bcount / softc->media_blksize; } softc->blkno += (daddr_t) l; } else { softc->blkno++; } } } /* * If we had an error (immediate or pending), * release the device queue now. */ if (error || (softc->flags & SA_FLAG_ERR_PENDING)) cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); if (error || bp->bio_resid) { CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("error %d resid %ld count %ld\n", error, bp->bio_resid, bp->bio_bcount)); } biofinish(bp, softc->device_stats, 0); xpt_release_ccb(done_ccb); } /* * Mount the tape (make sure it's ready for I/O). */ static int samount(struct cam_periph *periph, int oflags, struct cdev *dev) { struct sa_softc *softc; union ccb *ccb; int error; /* * oflags can be checked for 'kind' of open (read-only check) - later * dev can be checked for a control-mode or compression open - later */ UNUSED_PARAMETER(oflags); UNUSED_PARAMETER(dev); softc = (struct sa_softc *)periph->softc; /* * This should determine if something has happened since the last * open/mount that would invalidate the mount. We do *not* want * to retry this command- we just want the status. But we only * do this if we're mounted already- if we're not mounted, * we don't care about the unit read state and can instead use * this opportunity to attempt to reserve the tape unit. */ if (softc->flags & SA_FLAG_TAPE_MOUNTED) { ccb = cam_periph_getccb(periph, 1); scsi_test_unit_ready(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, IO_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); if (error == ENXIO) { softc->flags &= ~SA_FLAG_TAPE_MOUNTED; scsi_test_unit_ready(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, IO_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); } else if (error) { /* * We don't need to freeze the tape because we * will now attempt to rewind/load it. */ softc->flags &= ~SA_FLAG_TAPE_MOUNTED; if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { xpt_print(periph->path, "error %d on TUR in samount\n", error); } } } else { error = sareservereleaseunit(periph, TRUE); if (error) { return (error); } ccb = cam_periph_getccb(periph, 1); scsi_test_unit_ready(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, IO_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); } if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) { struct scsi_read_block_limits_data *rblim = NULL; int comp_enabled, comp_supported; u_int8_t write_protect, guessing = 0; /* * Clear out old state. */ softc->flags &= ~(SA_FLAG_TAPE_WP|SA_FLAG_TAPE_WRITTEN| SA_FLAG_ERR_PENDING|SA_FLAG_COMPRESSION); softc->filemarks = 0; /* * *Very* first off, make sure we're loaded to BOT. */ scsi_load_unload(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, FALSE, FALSE, 1, SSD_FULL_SIZE, REWIND_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); /* * In case this doesn't work, do a REWIND instead */ if (error) { scsi_rewind(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, SSD_FULL_SIZE, REWIND_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); } if (error) { xpt_release_ccb(ccb); goto exit; } /* * Do a dummy test read to force access to the * media so that the drive will really know what's * there. We actually don't really care what the * blocksize on tape is and don't expect to really * read a full record. */ rblim = (struct scsi_read_block_limits_data *) malloc(8192, M_SCSISA, M_NOWAIT); if (rblim == NULL) { xpt_print(periph->path, "no memory for test read\n"); xpt_release_ccb(ccb); error = ENOMEM; goto exit; } if ((softc->quirks & SA_QUIRK_NODREAD) == 0) { scsi_sa_read_write(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, 1, FALSE, 0, 8192, (void *) rblim, 8192, SSD_FULL_SIZE, IO_TIMEOUT); (void) cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); scsi_rewind(&ccb->csio, 1, sadone, MSG_SIMPLE_Q_TAG, FALSE, SSD_FULL_SIZE, REWIND_TIMEOUT); error = cam_periph_runccb(ccb, saerror, CAM_RETRY_SELTO, SF_NO_PRINT | SF_RETRY_UA, softc->device_stats); if (error) { xpt_print(periph->path, "unable to rewind after test read\n"); xpt_release_ccb(ccb); goto exit; } } /* * Next off, determine block limits. */ scsi_read_block_limits(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, rblim, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, CAM_RETRY_SELTO, SF_NO_PRINT | SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); if (error != 0) { /* * If it's less than SCSI-2, READ BLOCK LIMITS is not * a MANDATORY command. Anyway- it doesn't matter- * we can proceed anyway. */ softc->blk_gran = 0; softc->max_blk = ~0; softc->min_blk = 0; } else { if (softc->scsi_rev >= SCSI_REV_SPC) { softc->blk_gran = RBL_GRAN(rblim); } else { softc->blk_gran = 0; } /* * We take max_blk == min_blk to mean a default to * fixed mode- but note that whatever we get out of * sagetparams below will actually determine whether * we are actually *in* fixed mode. */ softc->max_blk = scsi_3btoul(rblim->maximum); softc->min_blk = scsi_2btoul(rblim->minimum); } /* * Next, perform a mode sense to determine * current density, blocksize, compression etc. */ error = sagetparams(periph, SA_PARAM_ALL, &softc->media_blksize, &softc->media_density, &softc->media_numblks, &softc->buffer_mode, &write_protect, &softc->speed, &comp_supported, &comp_enabled, &softc->comp_algorithm, NULL, NULL, 0, 0); if (error != 0) { /* * We could work a little harder here. We could * adjust our attempts to get information. It * might be an ancient tape drive. If someone * nudges us, we'll do that. */ goto exit; } /* * If no quirk has determined that this is a device that is * preferred to be in fixed or variable mode, now is the time * to find out. */ if ((softc->quirks & (SA_QUIRK_FIXED|SA_QUIRK_VARIABLE)) == 0) { guessing = 1; /* * This could be expensive to find out. Luckily we * only need to do this once. If we start out in * 'default' mode, try and set ourselves to one * of the densities that would determine a wad * of other stuff. Go from highest to lowest. */ if (softc->media_density == SCSI_DEFAULT_DENSITY) { int i; static u_int8_t ctry[] = { SCSI_DENSITY_HALFINCH_PE, SCSI_DENSITY_HALFINCH_6250C, SCSI_DENSITY_HALFINCH_6250, SCSI_DENSITY_HALFINCH_1600, SCSI_DENSITY_HALFINCH_800, SCSI_DENSITY_QIC_4GB, SCSI_DENSITY_QIC_2GB, SCSI_DENSITY_QIC_525_320, SCSI_DENSITY_QIC_150, SCSI_DENSITY_QIC_120, SCSI_DENSITY_QIC_24, SCSI_DENSITY_QIC_11_9TRK, SCSI_DENSITY_QIC_11_4TRK, SCSI_DENSITY_QIC_1320, SCSI_DENSITY_QIC_3080, 0 }; for (i = 0; ctry[i]; i++) { error = sasetparams(periph, SA_PARAM_DENSITY, 0, ctry[i], 0, SF_NO_PRINT); if (error == 0) { softc->media_density = ctry[i]; break; } } } switch (softc->media_density) { case SCSI_DENSITY_QIC_11_4TRK: case SCSI_DENSITY_QIC_11_9TRK: case SCSI_DENSITY_QIC_24: case SCSI_DENSITY_QIC_120: case SCSI_DENSITY_QIC_150: case SCSI_DENSITY_QIC_525_320: case SCSI_DENSITY_QIC_1320: case SCSI_DENSITY_QIC_3080: softc->quirks &= ~SA_QUIRK_2FM; softc->quirks |= SA_QUIRK_FIXED|SA_QUIRK_1FM; softc->last_media_blksize = 512; break; case SCSI_DENSITY_QIC_4GB: case SCSI_DENSITY_QIC_2GB: softc->quirks &= ~SA_QUIRK_2FM; softc->quirks |= SA_QUIRK_FIXED|SA_QUIRK_1FM; softc->last_media_blksize = 1024; break; default: softc->last_media_blksize = softc->media_blksize; softc->quirks |= SA_QUIRK_VARIABLE; break; } } /* * If no quirk has determined that this is a device that needs * to have 2 Filemarks at EOD, now is the time to find out. */ if ((softc->quirks & SA_QUIRK_2FM) == 0) { switch (softc->media_density) { case SCSI_DENSITY_HALFINCH_800: case SCSI_DENSITY_HALFINCH_1600: case SCSI_DENSITY_HALFINCH_6250: case SCSI_DENSITY_HALFINCH_6250C: case SCSI_DENSITY_HALFINCH_PE: softc->quirks &= ~SA_QUIRK_1FM; softc->quirks |= SA_QUIRK_2FM; break; default: break; } } /* * Now validate that some info we got makes sense. */ if ((softc->max_blk < softc->media_blksize) || (softc->min_blk > softc->media_blksize && softc->media_blksize)) { xpt_print(periph->path, "BLOCK LIMITS (%d..%d) could not match current " "block settings (%d)- adjusting\n", softc->min_blk, softc->max_blk, softc->media_blksize); softc->max_blk = softc->min_blk = softc->media_blksize; } /* * Now put ourselves into the right frame of mind based * upon quirks... */ tryagain: /* * If we want to be in FIXED mode and our current blocksize * is not equal to our last blocksize (if nonzero), try and * set ourselves to this last blocksize (as the 'preferred' * block size). The initial quirkmatch at registry sets the * initial 'last' blocksize. If, for whatever reason, this * 'last' blocksize is zero, set the blocksize to 512, * or min_blk if that's larger. */ if ((softc->quirks & SA_QUIRK_FIXED) && (softc->quirks & SA_QUIRK_NO_MODESEL) == 0 && (softc->media_blksize != softc->last_media_blksize)) { softc->media_blksize = softc->last_media_blksize; if (softc->media_blksize == 0) { softc->media_blksize = 512; if (softc->media_blksize < softc->min_blk) { softc->media_blksize = softc->min_blk; } } error = sasetparams(periph, SA_PARAM_BLOCKSIZE, softc->media_blksize, 0, 0, SF_NO_PRINT); if (error) { xpt_print(periph->path, "unable to set fixed blocksize to %d\n", softc->media_blksize); goto exit; } } if ((softc->quirks & SA_QUIRK_VARIABLE) && (softc->media_blksize != 0)) { softc->last_media_blksize = softc->media_blksize; softc->media_blksize = 0; error = sasetparams(periph, SA_PARAM_BLOCKSIZE, 0, 0, 0, SF_NO_PRINT); if (error) { /* * If this fails and we were guessing, just * assume that we got it wrong and go try * fixed block mode. Don't even check against * density code at this point. */ if (guessing) { softc->quirks &= ~SA_QUIRK_VARIABLE; softc->quirks |= SA_QUIRK_FIXED; if (softc->last_media_blksize == 0) softc->last_media_blksize = 512; goto tryagain; } xpt_print(periph->path, "unable to set variable blocksize\n"); goto exit; } } /* * Now that we have the current block size, * set up some parameters for sastart's usage. */ if (softc->media_blksize) { softc->flags |= SA_FLAG_FIXED; if (powerof2(softc->media_blksize)) { softc->blk_shift = ffs(softc->media_blksize) - 1; softc->blk_mask = softc->media_blksize - 1; } else { softc->blk_mask = ~0; softc->blk_shift = 0; } } else { /* * The SCSI-3 spec allows 0 to mean "unspecified". * The SCSI-1 spec allows 0 to mean 'infinite'. * * Either works here. */ if (softc->max_blk == 0) { softc->max_blk = ~0; } softc->blk_shift = 0; if (softc->blk_gran != 0) { softc->blk_mask = softc->blk_gran - 1; } else { softc->blk_mask = 0; } } if (write_protect) softc->flags |= SA_FLAG_TAPE_WP; if (comp_supported) { if (softc->saved_comp_algorithm == 0) softc->saved_comp_algorithm = softc->comp_algorithm; softc->flags |= SA_FLAG_COMP_SUPP; if (comp_enabled) softc->flags |= SA_FLAG_COMP_ENABLED; } else softc->flags |= SA_FLAG_COMP_UNSUPP; if ((softc->buffer_mode == SMH_SA_BUF_MODE_NOBUF) && (softc->quirks & SA_QUIRK_NO_MODESEL) == 0) { error = sasetparams(periph, SA_PARAM_BUFF_MODE, 0, 0, 0, SF_NO_PRINT); if (error == 0) { softc->buffer_mode = SMH_SA_BUF_MODE_SIBUF; } else { xpt_print(periph->path, "unable to set buffered mode\n"); } error = 0; /* not an error */ } if (error == 0) { softc->flags |= SA_FLAG_TAPE_MOUNTED; } exit: if (rblim != NULL) free(rblim, M_SCSISA); if (error != 0) { softc->dsreg = MTIO_DSREG_NIL; } else { softc->fileno = softc->blkno = 0; softc->rep_fileno = softc->rep_blkno = -1; softc->partition = 0; softc->dsreg = MTIO_DSREG_REST; } #ifdef SA_1FM_AT_EOD if ((softc->quirks & SA_QUIRK_2FM) == 0) softc->quirks |= SA_QUIRK_1FM; #else if ((softc->quirks & SA_QUIRK_1FM) == 0) softc->quirks |= SA_QUIRK_2FM; #endif } else xpt_release_ccb(ccb); /* * If we return an error, we're not mounted any more, * so release any device reservation. */ if (error != 0) { (void) sareservereleaseunit(periph, FALSE); } else { /* * Clear I/O residual. */ softc->last_io_resid = 0; softc->last_ctl_resid = 0; } return (error); } /* * How many filemarks do we need to write if we were to terminate the * tape session right now? Note that this can be a negative number */ static int samarkswanted(struct cam_periph *periph) { int markswanted; struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; markswanted = 0; if ((softc->flags & SA_FLAG_TAPE_WRITTEN) != 0) { markswanted++; if (softc->quirks & SA_QUIRK_2FM) markswanted++; } markswanted -= softc->filemarks; return (markswanted); } static int sacheckeod(struct cam_periph *periph) { int error; int markswanted; markswanted = samarkswanted(periph); if (markswanted > 0) { error = sawritefilemarks(periph, markswanted, FALSE, FALSE); } else { error = 0; } return (error); } static int saerror(union ccb *ccb, u_int32_t cflgs, u_int32_t sflgs) { static const char *toobig = "%d-byte tape record bigger than supplied buffer\n"; struct cam_periph *periph; struct sa_softc *softc; struct ccb_scsiio *csio; struct scsi_sense_data *sense; uint64_t resid = 0; int64_t info = 0; cam_status status; int error_code, sense_key, asc, ascq, error, aqvalid, stream_valid; int sense_len; uint8_t stream_bits; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct sa_softc *)periph->softc; csio = &ccb->csio; sense = &csio->sense_data; sense_len = csio->sense_len - csio->sense_resid; scsi_extract_sense_len(sense, sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); if (asc != -1 && ascq != -1) aqvalid = 1; else aqvalid = 0; if (scsi_get_stream_info(sense, sense_len, NULL, &stream_bits) == 0) stream_valid = 1; else stream_valid = 0; error = 0; status = csio->ccb_h.status & CAM_STATUS_MASK; /* * Calculate/latch up, any residuals... We do this in a funny 2-step * so we can print stuff here if we have CAM_DEBUG enabled for this * unit. */ if (status == CAM_SCSI_STATUS_ERROR) { if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &resid, &info) == 0) { if ((softc->flags & SA_FLAG_FIXED) != 0) resid *= softc->media_blksize; } else { resid = csio->dxfer_len; info = resid; if ((softc->flags & SA_FLAG_FIXED) != 0) { if (softc->media_blksize) info /= softc->media_blksize; } } if (csio->cdb_io.cdb_bytes[0] == SA_READ || csio->cdb_io.cdb_bytes[0] == SA_WRITE) { bcopy((caddr_t) sense, (caddr_t) &softc->last_io_sense, sizeof (struct scsi_sense_data)); bcopy(csio->cdb_io.cdb_bytes, softc->last_io_cdb, (int) csio->cdb_len); softc->last_io_resid = resid; softc->last_resid_was_io = 1; } else { bcopy((caddr_t) sense, (caddr_t) &softc->last_ctl_sense, sizeof (struct scsi_sense_data)); bcopy(csio->cdb_io.cdb_bytes, softc->last_ctl_cdb, (int) csio->cdb_len); softc->last_ctl_resid = resid; softc->last_resid_was_io = 0; } CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("CDB[0]=0x%x Key 0x%x " "ASC/ASCQ 0x%x/0x%x CAM STATUS 0x%x flags 0x%x resid %jd " "dxfer_len %d\n", csio->cdb_io.cdb_bytes[0] & 0xff, sense_key, asc, ascq, status, (stream_valid) ? stream_bits : 0, (intmax_t)resid, csio->dxfer_len)); } else { CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Cam Status 0x%x\n", status)); } switch (status) { case CAM_REQ_CMP: return (0); case CAM_SCSI_STATUS_ERROR: /* * If a read/write command, we handle it here. */ if (csio->cdb_io.cdb_bytes[0] == SA_READ || csio->cdb_io.cdb_bytes[0] == SA_WRITE) { break; } /* * If this was just EOM/EOP, Filemark, Setmark or ILI detected * on a non read/write command, we assume it's not an error * and propagate the residule and return. */ if ((aqvalid && asc == 0 && ascq > 0 && ascq <= 5) || (aqvalid == 0 && sense_key == SSD_KEY_NO_SENSE)) { csio->resid = resid; QFRLS(ccb); return (0); } /* * Otherwise, we let the common code handle this. */ return (cam_periph_error(ccb, cflgs, sflgs, &softc->saved_ccb)); /* * XXX: To Be Fixed * We cannot depend upon CAM honoring retry counts for these. */ case CAM_SCSI_BUS_RESET: case CAM_BDR_SENT: if (ccb->ccb_h.retry_count <= 0) { return (EIO); } /* FALLTHROUGH */ default: return (cam_periph_error(ccb, cflgs, sflgs, &softc->saved_ccb)); } /* * Handle filemark, end of tape, mismatched record sizes.... * From this point out, we're only handling read/write cases. * Handle writes && reads differently. */ if (csio->cdb_io.cdb_bytes[0] == SA_WRITE) { if (sense_key == SSD_KEY_VOLUME_OVERFLOW) { csio->resid = resid; error = ENOSPC; } else if ((stream_valid != 0) && (stream_bits & SSD_EOM)) { softc->flags |= SA_FLAG_EOM_PENDING; /* * Grotesque as it seems, the few times * I've actually seen a non-zero resid, * the tape drive actually lied and had * written all the data!. */ csio->resid = 0; } } else { csio->resid = resid; if (sense_key == SSD_KEY_BLANK_CHECK) { if (softc->quirks & SA_QUIRK_1FM) { error = 0; softc->flags |= SA_FLAG_EOM_PENDING; } else { error = EIO; } } else if ((stream_valid != 0) && (stream_bits & SSD_FILEMARK)){ if (softc->flags & SA_FLAG_FIXED) { error = -1; softc->flags |= SA_FLAG_EOF_PENDING; } /* * Unconditionally, if we detected a filemark on a read, * mark that we've run moved a file ahead. */ if (softc->fileno != (daddr_t) -1) { softc->fileno++; softc->blkno = 0; csio->ccb_h.ccb_pflags |= SA_POSITION_UPDATED; } } } /* * Incorrect Length usually applies to read, but can apply to writes. */ if (error == 0 && (stream_valid != 0) && (stream_bits & SSD_ILI)) { if (info < 0) { xpt_print(csio->ccb_h.path, toobig, csio->dxfer_len - info); csio->resid = csio->dxfer_len; error = EIO; } else { csio->resid = resid; if (softc->flags & SA_FLAG_FIXED) { softc->flags |= SA_FLAG_EIO_PENDING; } /* * Bump the block number if we hadn't seen a filemark. * Do this independent of errors (we've moved anyway). */ if ((stream_valid == 0) || (stream_bits & SSD_FILEMARK) == 0) { if (softc->blkno != (daddr_t) -1) { softc->blkno++; csio->ccb_h.ccb_pflags |= SA_POSITION_UPDATED; } } } } if (error <= 0) { /* * Unfreeze the queue if frozen as we're not returning anything * to our waiters that would indicate an I/O error has occurred * (yet). */ QFRLS(ccb); error = 0; } return (error); } static int sagetparams(struct cam_periph *periph, sa_params params_to_get, u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks, int *buff_mode, u_int8_t *write_protect, u_int8_t *speed, int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm, sa_comp_t *tcs, struct scsi_control_data_prot_subpage *prot_page, int dp_size, int prot_changeable) { union ccb *ccb; void *mode_buffer; struct scsi_mode_header_6 *mode_hdr; struct scsi_mode_blk_desc *mode_blk; int mode_buffer_len; struct sa_softc *softc; u_int8_t cpage; int error; cam_status status; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); if (softc->quirks & SA_QUIRK_NO_CPAGE) cpage = SA_DEVICE_CONFIGURATION_PAGE; else cpage = SA_DATA_COMPRESSION_PAGE; retry: mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk); if (params_to_get & SA_PARAM_COMPRESSION) { if (softc->quirks & SA_QUIRK_NOCOMP) { *comp_supported = FALSE; params_to_get &= ~SA_PARAM_COMPRESSION; } else mode_buffer_len += sizeof (sa_comp_t); } /* XXX Fix M_NOWAIT */ mode_buffer = malloc(mode_buffer_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode_buffer == NULL) { xpt_release_ccb(ccb); return (ENOMEM); } mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; /* it is safe to retry this */ scsi_mode_sense(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, FALSE, SMS_PAGE_CTRL_CURRENT, (params_to_get & SA_PARAM_COMPRESSION) ? cpage : SMS_VENDOR_SPECIFIC_PAGE, mode_buffer, mode_buffer_len, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); status = ccb->ccb_h.status & CAM_STATUS_MASK; if (error == EINVAL && (params_to_get & SA_PARAM_COMPRESSION) != 0) { /* * Hmm. Let's see if we can try another page... * If we've already done that, give up on compression * for this device and remember this for the future * and attempt the request without asking for compression * info. */ if (cpage == SA_DATA_COMPRESSION_PAGE) { cpage = SA_DEVICE_CONFIGURATION_PAGE; goto retry; } softc->quirks |= SA_QUIRK_NOCOMP; free(mode_buffer, M_SCSISA); goto retry; } else if (status == CAM_SCSI_STATUS_ERROR) { /* Tell the user about the fatal error. */ scsi_sense_print(&ccb->csio); goto sagetparamsexit; } /* * If the user only wants the compression information, and * the device doesn't send back the block descriptor, it's * no big deal. If the user wants more than just * compression, though, and the device doesn't pass back the * block descriptor, we need to send another mode sense to * get the block descriptor. */ if ((mode_hdr->blk_desc_len == 0) && (params_to_get & SA_PARAM_COMPRESSION) && (params_to_get & ~(SA_PARAM_COMPRESSION))) { /* * Decrease the mode buffer length by the size of * the compression page, to make sure the data * there doesn't get overwritten. */ mode_buffer_len -= sizeof (sa_comp_t); /* * Now move the compression page that we presumably * got back down the memory chunk a little bit so * it doesn't get spammed. */ bcopy(&mode_hdr[0], &mode_hdr[1], sizeof (sa_comp_t)); bzero(&mode_hdr[0], sizeof (mode_hdr[0])); /* * Now, we issue another mode sense and just ask * for the block descriptor, etc. */ scsi_mode_sense(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, SMS_PAGE_CTRL_CURRENT, SMS_VENDOR_SPECIFIC_PAGE, mode_buffer, mode_buffer_len, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); if (error != 0) goto sagetparamsexit; } if (params_to_get & SA_PARAM_BLOCKSIZE) *blocksize = scsi_3btoul(mode_blk->blklen); if (params_to_get & SA_PARAM_NUMBLOCKS) *numblocks = scsi_3btoul(mode_blk->nblocks); if (params_to_get & SA_PARAM_BUFF_MODE) *buff_mode = mode_hdr->dev_spec & SMH_SA_BUF_MODE_MASK; if (params_to_get & SA_PARAM_DENSITY) *density = mode_blk->density; if (params_to_get & SA_PARAM_WP) *write_protect = (mode_hdr->dev_spec & SMH_SA_WP)? TRUE : FALSE; if (params_to_get & SA_PARAM_SPEED) *speed = mode_hdr->dev_spec & SMH_SA_SPEED_MASK; if (params_to_get & SA_PARAM_COMPRESSION) { sa_comp_t *ntcs = (sa_comp_t *) &mode_blk[1]; if (cpage == SA_DATA_COMPRESSION_PAGE) { struct scsi_data_compression_page *cp = &ntcs->dcomp; *comp_supported = (cp->dce_and_dcc & SA_DCP_DCC)? TRUE : FALSE; *comp_enabled = (cp->dce_and_dcc & SA_DCP_DCE)? TRUE : FALSE; *comp_algorithm = scsi_4btoul(cp->comp_algorithm); } else { struct scsi_dev_conf_page *cp = &ntcs->dconf; /* * We don't really know whether this device supports * Data Compression if the algorithm field is * zero. Just say we do. */ *comp_supported = TRUE; *comp_enabled = (cp->sel_comp_alg != SA_COMP_NONE)? TRUE : FALSE; *comp_algorithm = cp->sel_comp_alg; } if (tcs != NULL) bcopy(ntcs, tcs, sizeof (sa_comp_t)); } if ((params_to_get & SA_PARAM_DENSITY_EXT) && (softc->scsi_rev >= SCSI_REV_SPC)) { int i; for (i = 0; i < SA_DENSITY_TYPES; i++) { scsi_report_density_support(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*media*/ softc->density_type_bits[i] & SRDS_MEDIA, /*medium_type*/ softc->density_type_bits[i] & SRDS_MEDIUM_TYPE, /*data_ptr*/ softc->density_info[i], /*length*/ sizeof(softc->density_info[i]), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ REP_DENSITY_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); status = ccb->ccb_h.status & CAM_STATUS_MASK; /* * Some tape drives won't support this command at * all, but hopefully we'll minimize that with the * check for SPC or greater support above. If they * don't support the default report (neither the * MEDIA or MEDIUM_TYPE bits set), then there is * really no point in continuing on to look for * other reports. */ if ((error != 0) || (status != CAM_REQ_CMP)) { error = 0; softc->density_info_valid[i] = 0; if (softc->density_type_bits[i] == 0) break; else continue; } softc->density_info_valid[i] = ccb->csio.dxfer_len - ccb->csio.resid; } } /* * Get logical block protection parameters if the drive supports it. */ if ((params_to_get & SA_PARAM_LBP) && (softc->flags & SA_FLAG_PROTECT_SUPP)) { struct scsi_mode_header_10 *mode10_hdr; struct scsi_control_data_prot_subpage *dp_page; struct scsi_mode_sense_10 *cdb; struct sa_prot_state *prot; int dp_len, returned_len; if (dp_size == 0) dp_size = sizeof(*dp_page); dp_len = sizeof(*mode10_hdr) + dp_size; mode10_hdr = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode10_hdr == NULL) { error = ENOMEM; goto sagetparamsexit; } scsi_mode_sense_len(&ccb->csio, /*retries*/ 5, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*dbd*/ TRUE, /*page_code*/ (prot_changeable == 0) ? SMS_PAGE_CTRL_CURRENT : SMS_PAGE_CTRL_CHANGEABLE, /*page*/ SMS_CONTROL_MODE_PAGE, /*param_buf*/ (uint8_t *)mode10_hdr, /*param_len*/ dp_len, /*minimum_cmd_size*/ 10, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SCSIOP_TIMEOUT); /* * XXX KDM we need to be able to set the subpage in the * fill function. */ cdb = (struct scsi_mode_sense_10 *)ccb->csio.cdb_io.cdb_bytes; cdb->subpage = SA_CTRL_DP_SUBPAGE_CODE; error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); if (error != 0) { free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } status = ccb->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_REQ_CMP) { error = EINVAL; free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } /* * The returned data length at least has to be long enough * for us to look at length in the mode page header. */ returned_len = ccb->csio.dxfer_len - ccb->csio.resid; if (returned_len < sizeof(mode10_hdr->data_length)) { error = EINVAL; free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } returned_len = min(returned_len, sizeof(mode10_hdr->data_length) + scsi_2btoul(mode10_hdr->data_length)); dp_page = (struct scsi_control_data_prot_subpage *) &mode10_hdr[1]; /* * We also have to have enough data to include the prot_bits * in the subpage. */ if (returned_len < (sizeof(*mode10_hdr) + __offsetof(struct scsi_control_data_prot_subpage, prot_bits) + sizeof(dp_page->prot_bits))) { error = EINVAL; free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } prot = &softc->prot_info.cur_prot_state; prot->prot_method = dp_page->prot_method; prot->pi_length = dp_page->pi_length & SA_CTRL_DP_PI_LENGTH_MASK; prot->lbp_w = (dp_page->prot_bits & SA_CTRL_DP_LBP_W) ? 1 :0; prot->lbp_r = (dp_page->prot_bits & SA_CTRL_DP_LBP_R) ? 1 :0; prot->rbdp = (dp_page->prot_bits & SA_CTRL_DP_RBDP) ? 1 :0; prot->initialized = 1; if (prot_page != NULL) bcopy(dp_page, prot_page, min(sizeof(*prot_page), sizeof(*dp_page))); free(mode10_hdr, M_SCSISA); } if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { int idx; char *xyz = mode_buffer; xpt_print_path(periph->path); printf("Mode Sense Data="); for (idx = 0; idx < mode_buffer_len; idx++) printf(" 0x%02x", xyz[idx] & 0xff); printf("\n"); } sagetparamsexit: xpt_release_ccb(ccb); free(mode_buffer, M_SCSISA); return (error); } /* * Set protection information to the pending protection information stored * in the softc. */ static int sasetprot(struct cam_periph *periph, struct sa_prot_state *new_prot) { struct sa_softc *softc; struct scsi_control_data_prot_subpage *dp_page, *dp_changeable; struct scsi_mode_header_10 *mode10_hdr, *mode10_changeable; union ccb *ccb; uint8_t current_speed; size_t dp_size, dp_page_length; int dp_len, buff_mode; int error; softc = (struct sa_softc *)periph->softc; mode10_hdr = NULL; mode10_changeable = NULL; ccb = NULL; /* * Start off with the size set to the actual length of the page * that we have defined. */ dp_size = sizeof(*dp_changeable); dp_page_length = dp_size - __offsetof(struct scsi_control_data_prot_subpage, prot_method); retry_length: dp_len = sizeof(*mode10_changeable) + dp_size; mode10_changeable = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode10_changeable == NULL) { error = ENOMEM; goto bailout; } dp_changeable = (struct scsi_control_data_prot_subpage *)&mode10_changeable[1]; /* * First get the data protection page changeable parameters mask. * We need to know which parameters the drive supports changing. * We also need to know what the drive claims that its page length * is. The reason is that IBM drives in particular are very picky * about the page length. They want it (the length set in the * page structure itself) to be 28 bytes, and they want the * parameter list length specified in the mode select header to be * 40 bytes. So, to work with IBM drives as well as any other tape * drive, find out what the drive claims the page length is, and * make sure that we match that. */ error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP, NULL, NULL, NULL, &buff_mode, NULL, ¤t_speed, NULL, NULL, NULL, NULL, dp_changeable, dp_size, /*prot_changeable*/ 1); if (error != 0) goto bailout; if (scsi_2btoul(dp_changeable->length) > dp_page_length) { dp_page_length = scsi_2btoul(dp_changeable->length); dp_size = dp_page_length + __offsetof(struct scsi_control_data_prot_subpage, prot_method); free(mode10_changeable, M_SCSISA); mode10_changeable = NULL; goto retry_length; } mode10_hdr = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode10_hdr == NULL) { error = ENOMEM; goto bailout; } dp_page = (struct scsi_control_data_prot_subpage *)&mode10_hdr[1]; /* * Now grab the actual current settings in the page. */ error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP, NULL, NULL, NULL, &buff_mode, NULL, ¤t_speed, NULL, NULL, NULL, NULL, dp_page, dp_size, /*prot_changeable*/ 0); if (error != 0) goto bailout; /* These two fields need to be 0 for MODE SELECT */ scsi_ulto2b(0, mode10_hdr->data_length); mode10_hdr->medium_type = 0; /* We are not including a block descriptor */ scsi_ulto2b(0, mode10_hdr->blk_desc_len); mode10_hdr->dev_spec = current_speed; /* if set, set single-initiator buffering mode */ if (softc->buffer_mode == SMH_SA_BUF_MODE_SIBUF) { mode10_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF; } /* * For each field, make sure that the drive allows changing it * before bringing in the user's setting. */ if (dp_changeable->prot_method != 0) dp_page->prot_method = new_prot->prot_method; if (dp_changeable->pi_length & SA_CTRL_DP_PI_LENGTH_MASK) { dp_page->pi_length &= ~SA_CTRL_DP_PI_LENGTH_MASK; dp_page->pi_length |= (new_prot->pi_length & SA_CTRL_DP_PI_LENGTH_MASK); } if (dp_changeable->prot_bits & SA_CTRL_DP_LBP_W) { if (new_prot->lbp_w) dp_page->prot_bits |= SA_CTRL_DP_LBP_W; else dp_page->prot_bits &= ~SA_CTRL_DP_LBP_W; } if (dp_changeable->prot_bits & SA_CTRL_DP_LBP_R) { if (new_prot->lbp_r) dp_page->prot_bits |= SA_CTRL_DP_LBP_R; else dp_page->prot_bits &= ~SA_CTRL_DP_LBP_R; } if (dp_changeable->prot_bits & SA_CTRL_DP_RBDP) { if (new_prot->rbdp) dp_page->prot_bits |= SA_CTRL_DP_RBDP; else dp_page->prot_bits &= ~SA_CTRL_DP_RBDP; } ccb = cam_periph_getccb(periph, 1); scsi_mode_select_len(&ccb->csio, /*retries*/ 5, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*scsi_page_fmt*/ TRUE, /*save_pages*/ FALSE, /*param_buf*/ (uint8_t *)mode10_hdr, /*param_len*/ dp_len, /*minimum_cmd_size*/ 10, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); if (error != 0) goto bailout; if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = EINVAL; goto bailout; } /* * The operation was successful. We could just copy the settings * the user requested, but just in case the drive ignored some of * our settings, let's ask for status again. */ error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP, NULL, NULL, NULL, &buff_mode, NULL, ¤t_speed, NULL, NULL, NULL, NULL, dp_page, dp_size, 0); bailout: if (ccb != NULL) xpt_release_ccb(ccb); free(mode10_hdr, M_SCSISA); free(mode10_changeable, M_SCSISA); return (error); } /* * The purpose of this function is to set one of four different parameters * for a tape drive: * - blocksize * - density * - compression / compression algorithm * - buffering mode * * The assumption is that this will be called from saioctl(), and therefore * from a process context. Thus the waiting malloc calls below. If that * assumption ever changes, the malloc calls should be changed to be * NOWAIT mallocs. * * Any or all of the four parameters may be set when this function is * called. It should handle setting more than one parameter at once. */ static int sasetparams(struct cam_periph *periph, sa_params params_to_set, u_int32_t blocksize, u_int8_t density, u_int32_t calg, u_int32_t sense_flags) { struct sa_softc *softc; u_int32_t current_blocksize; u_int32_t current_calg; u_int8_t current_density; u_int8_t current_speed; int comp_enabled, comp_supported; void *mode_buffer; int mode_buffer_len; struct scsi_mode_header_6 *mode_hdr; struct scsi_mode_blk_desc *mode_blk; sa_comp_t *ccomp, *cpage; int buff_mode; union ccb *ccb = NULL; int error; softc = (struct sa_softc *)periph->softc; ccomp = malloc(sizeof (sa_comp_t), M_SCSISA, M_NOWAIT); if (ccomp == NULL) return (ENOMEM); /* * Since it doesn't make sense to set the number of blocks, or * write protection, we won't try to get the current value. We * always want to get the blocksize, so we can set it back to the * proper value. */ error = sagetparams(periph, params_to_set | SA_PARAM_BLOCKSIZE | SA_PARAM_SPEED, ¤t_blocksize, ¤t_density, NULL, &buff_mode, NULL, ¤t_speed, &comp_supported, &comp_enabled, ¤t_calg, ccomp, NULL, 0, 0); if (error != 0) { free(ccomp, M_SCSISA); return (error); } mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk); if (params_to_set & SA_PARAM_COMPRESSION) mode_buffer_len += sizeof (sa_comp_t); mode_buffer = malloc(mode_buffer_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode_buffer == NULL) { free(ccomp, M_SCSISA); return (ENOMEM); } mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; ccb = cam_periph_getccb(periph, 1); retry: if (params_to_set & SA_PARAM_COMPRESSION) { if (mode_blk) { cpage = (sa_comp_t *)&mode_blk[1]; } else { cpage = (sa_comp_t *)&mode_hdr[1]; } bcopy(ccomp, cpage, sizeof (sa_comp_t)); cpage->hdr.pagecode &= ~0x80; } else cpage = NULL; /* * If the caller wants us to set the blocksize, use the one they * pass in. Otherwise, use the blocksize we got back from the * mode select above. */ if (mode_blk) { if (params_to_set & SA_PARAM_BLOCKSIZE) scsi_ulto3b(blocksize, mode_blk->blklen); else scsi_ulto3b(current_blocksize, mode_blk->blklen); /* * Set density if requested, else preserve old density. * SCSI_SAME_DENSITY only applies to SCSI-2 or better * devices, else density we've latched up in our softc. */ if (params_to_set & SA_PARAM_DENSITY) { mode_blk->density = density; } else if (softc->scsi_rev > SCSI_REV_CCS) { mode_blk->density = SCSI_SAME_DENSITY; } else { mode_blk->density = softc->media_density; } } /* * For mode selects, these two fields must be zero. */ mode_hdr->data_length = 0; mode_hdr->medium_type = 0; /* set the speed to the current value */ mode_hdr->dev_spec = current_speed; /* if set, set single-initiator buffering mode */ if (softc->buffer_mode == SMH_SA_BUF_MODE_SIBUF) { mode_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF; } if (mode_blk) mode_hdr->blk_desc_len = sizeof(struct scsi_mode_blk_desc); else mode_hdr->blk_desc_len = 0; /* * First, if the user wants us to set the compression algorithm or * just turn compression on, check to make sure that this drive * supports compression. */ if (params_to_set & SA_PARAM_COMPRESSION) { /* * If the compression algorithm is 0, disable compression. * If the compression algorithm is non-zero, enable * compression and set the compression type to the * specified compression algorithm, unless the algorithm is * MT_COMP_ENABLE. In that case, we look at the * compression algorithm that is currently set and if it is * non-zero, we leave it as-is. If it is zero, and we have * saved a compression algorithm from a time when * compression was enabled before, set the compression to * the saved value. */ switch (ccomp->hdr.pagecode & ~0x80) { case SA_DEVICE_CONFIGURATION_PAGE: { struct scsi_dev_conf_page *dcp = &cpage->dconf; if (calg == 0) { dcp->sel_comp_alg = SA_COMP_NONE; break; } if (calg != MT_COMP_ENABLE) { dcp->sel_comp_alg = calg; } else if (dcp->sel_comp_alg == SA_COMP_NONE && softc->saved_comp_algorithm != 0) { dcp->sel_comp_alg = softc->saved_comp_algorithm; } break; } case SA_DATA_COMPRESSION_PAGE: if (ccomp->dcomp.dce_and_dcc & SA_DCP_DCC) { struct scsi_data_compression_page *dcp = &cpage->dcomp; if (calg == 0) { /* * Disable compression, but leave the * decompression and the capability bit * alone. */ dcp->dce_and_dcc = SA_DCP_DCC; dcp->dde_and_red |= SA_DCP_DDE; break; } /* enable compression && decompression */ dcp->dce_and_dcc = SA_DCP_DCE | SA_DCP_DCC; dcp->dde_and_red |= SA_DCP_DDE; /* * If there, use compression algorithm from caller. * Otherwise, if there's a saved compression algorithm * and there is no current algorithm, use the saved * algorithm. Else parrot back what we got and hope * for the best. */ if (calg != MT_COMP_ENABLE) { scsi_ulto4b(calg, dcp->comp_algorithm); scsi_ulto4b(calg, dcp->decomp_algorithm); } else if (scsi_4btoul(dcp->comp_algorithm) == 0 && softc->saved_comp_algorithm != 0) { scsi_ulto4b(softc->saved_comp_algorithm, dcp->comp_algorithm); scsi_ulto4b(softc->saved_comp_algorithm, dcp->decomp_algorithm); } break; } /* * Compression does not appear to be supported- * at least via the DATA COMPRESSION page. It * would be too much to ask us to believe that * the page itself is supported, but incorrectly * reports an ability to manipulate data compression, * so we'll assume that this device doesn't support * compression. We can just fall through for that. */ /* FALLTHROUGH */ default: /* * The drive doesn't seem to support compression, * so turn off the set compression bit. */ params_to_set &= ~SA_PARAM_COMPRESSION; xpt_print(periph->path, "device does not seem to support compression\n"); /* * If that was the only thing the user wanted us to set, * clean up allocated resources and return with * 'operation not supported'. */ if (params_to_set == SA_PARAM_NONE) { free(mode_buffer, M_SCSISA); xpt_release_ccb(ccb); return (ENODEV); } /* * That wasn't the only thing the user wanted us to set. * So, decrease the stated mode buffer length by the * size of the compression mode page. */ mode_buffer_len -= sizeof(sa_comp_t); } } /* It is safe to retry this operation */ scsi_mode_select(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, (params_to_set & SA_PARAM_COMPRESSION)? TRUE : FALSE, FALSE, mode_buffer, mode_buffer_len, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, sense_flags, softc->device_stats); if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { int idx; char *xyz = mode_buffer; xpt_print_path(periph->path); printf("Err%d, Mode Select Data=", error); for (idx = 0; idx < mode_buffer_len; idx++) printf(" 0x%02x", xyz[idx] & 0xff); printf("\n"); } if (error) { /* * If we can, try without setting density/blocksize. */ if (mode_blk) { if ((params_to_set & (SA_PARAM_DENSITY|SA_PARAM_BLOCKSIZE)) == 0) { mode_blk = NULL; goto retry; } } else { mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; cpage = (sa_comp_t *)&mode_blk[1]; } /* * If we were setting the blocksize, and that failed, we * want to set it to its original value. If we weren't * setting the blocksize, we don't want to change it. */ scsi_ulto3b(current_blocksize, mode_blk->blklen); /* * Set density if requested, else preserve old density. * SCSI_SAME_DENSITY only applies to SCSI-2 or better * devices, else density we've latched up in our softc. */ if (params_to_set & SA_PARAM_DENSITY) { mode_blk->density = current_density; } else if (softc->scsi_rev > SCSI_REV_CCS) { mode_blk->density = SCSI_SAME_DENSITY; } else { mode_blk->density = softc->media_density; } if (params_to_set & SA_PARAM_COMPRESSION) bcopy(ccomp, cpage, sizeof (sa_comp_t)); /* * The retry count is the only CCB field that might have been * changed that we care about, so reset it back to 1. */ ccb->ccb_h.retry_count = 1; cam_periph_runccb(ccb, saerror, 0, sense_flags, softc->device_stats); } xpt_release_ccb(ccb); if (ccomp != NULL) free(ccomp, M_SCSISA); if (params_to_set & SA_PARAM_COMPRESSION) { if (error) { softc->flags &= ~SA_FLAG_COMP_ENABLED; /* * Even if we get an error setting compression, * do not say that we don't support it. We could * have been wrong, or it may be media specific. * softc->flags &= ~SA_FLAG_COMP_SUPP; */ softc->saved_comp_algorithm = softc->comp_algorithm; softc->comp_algorithm = 0; } else { softc->flags |= SA_FLAG_COMP_ENABLED; softc->comp_algorithm = calg; } } free(mode_buffer, M_SCSISA); return (error); } static int saextget(struct cdev *dev, struct cam_periph *periph, struct sbuf *sb, struct mtextget *g) { int indent, error; char tmpstr[80]; struct sa_softc *softc; int tmpint; uint32_t maxio_tmp; struct ccb_getdev cgd; softc = (struct sa_softc *)periph->softc; error = 0; error = sagetparams_common(dev, periph); if (error) goto extget_bailout; if (!SA_IS_CTRL(dev) && !softc->open_pending_mount) sagetpos(periph); indent = 0; SASBADDNODE(sb, indent, mtextget); /* * Basic CAM peripheral information. */ SASBADDVARSTR(sb, indent, periph->periph_name, %s, periph_name, strlen(periph->periph_name) + 1); SASBADDUINT(sb, indent, periph->unit_number, %u, unit_number); xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if ((cgd.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { g->status = MT_EXT_GET_ERROR; snprintf(g->error_str, sizeof(g->error_str), "Error %#x returned for XPT_GDEV_TYPE CCB", cgd.ccb_h.status); goto extget_bailout; } cam_strvis(tmpstr, cgd.inq_data.vendor, sizeof(cgd.inq_data.vendor), sizeof(tmpstr)); SASBADDVARSTRDESC(sb, indent, tmpstr, %s, vendor, sizeof(cgd.inq_data.vendor) + 1, "SCSI Vendor ID"); cam_strvis(tmpstr, cgd.inq_data.product, sizeof(cgd.inq_data.product), sizeof(tmpstr)); SASBADDVARSTRDESC(sb, indent, tmpstr, %s, product, sizeof(cgd.inq_data.product) + 1, "SCSI Product ID"); cam_strvis(tmpstr, cgd.inq_data.revision, sizeof(cgd.inq_data.revision), sizeof(tmpstr)); SASBADDVARSTRDESC(sb, indent, tmpstr, %s, revision, sizeof(cgd.inq_data.revision) + 1, "SCSI Revision"); if (cgd.serial_num_len > 0) { char *tmpstr2; size_t ts2_len; int ts2_malloc; ts2_len = 0; if (cgd.serial_num_len > sizeof(tmpstr)) { ts2_len = cgd.serial_num_len + 1; ts2_malloc = 1; tmpstr2 = malloc(ts2_len, M_SCSISA, M_NOWAIT | M_ZERO); /* * The 80 characters allocated on the stack above * will handle the vast majority of serial numbers. * If we run into one that is larger than that, and * we can't malloc the length without blocking, * bail out with an out of memory error. */ if (tmpstr2 == NULL) { error = ENOMEM; goto extget_bailout; } } else { ts2_len = sizeof(tmpstr); ts2_malloc = 0; tmpstr2 = tmpstr; } cam_strvis(tmpstr2, cgd.serial_num, cgd.serial_num_len, ts2_len); SASBADDVARSTRDESC(sb, indent, tmpstr2, %s, serial_num, (ssize_t)cgd.serial_num_len + 1, "Serial Number"); if (ts2_malloc != 0) free(tmpstr2, M_SCSISA); } else { /* * We return a serial_num element in any case, but it will * be empty if the device has no serial number. */ tmpstr[0] = '\0'; SASBADDVARSTRDESC(sb, indent, tmpstr, %s, serial_num, (ssize_t)0, "Serial Number"); } SASBADDUINTDESC(sb, indent, softc->maxio, %u, maxio, "Maximum I/O size allowed by driver and controller"); SASBADDUINTDESC(sb, indent, softc->cpi_maxio, %u, cpi_maxio, "Maximum I/O size reported by controller"); SASBADDUINTDESC(sb, indent, softc->max_blk, %u, max_blk, "Maximum block size supported by tape drive and media"); SASBADDUINTDESC(sb, indent, softc->min_blk, %u, min_blk, "Minimum block size supported by tape drive and media"); SASBADDUINTDESC(sb, indent, softc->blk_gran, %u, blk_gran, "Block granularity supported by tape drive and media"); maxio_tmp = min(softc->max_blk, softc->maxio); SASBADDUINTDESC(sb, indent, maxio_tmp, %u, max_effective_iosize, "Maximum possible I/O size"); SASBADDINTDESC(sb, indent, softc->flags & SA_FLAG_FIXED ? 1 : 0, %d, fixed_mode, "Set to 1 for fixed block mode, 0 for variable block"); /* * XXX KDM include SIM, bus, target, LUN? */ if (softc->flags & SA_FLAG_COMP_UNSUPP) tmpint = 0; else tmpint = 1; SASBADDINTDESC(sb, indent, tmpint, %d, compression_supported, "Set to 1 if compression is supported, 0 if not"); if (softc->flags & SA_FLAG_COMP_ENABLED) tmpint = 1; else tmpint = 0; SASBADDINTDESC(sb, indent, tmpint, %d, compression_enabled, "Set to 1 if compression is enabled, 0 if not"); SASBADDUINTDESC(sb, indent, softc->comp_algorithm, %u, compression_algorithm, "Numeric compression algorithm"); safillprot(softc, &indent, sb); SASBADDUINTDESC(sb, indent, softc->media_blksize, %u, media_blocksize, "Block size reported by drive or set by user"); SASBADDINTDESC(sb, indent, (intmax_t)softc->fileno, %jd, calculated_fileno, "Calculated file number, -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->blkno, %jd, calculated_rel_blkno, "Calculated block number relative to file, " "set to -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->rep_fileno, %jd, reported_fileno, "File number reported by drive, -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->rep_blkno, %jd, reported_blkno, "Block number relative to BOP/BOT reported by " "drive, -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->partition, %jd, partition, "Current partition number, 0 is the default"); SASBADDINTDESC(sb, indent, softc->bop, %d, bop, "Set to 1 if drive is at the beginning of partition/tape, 0 if " "not, -1 if unknown"); SASBADDINTDESC(sb, indent, softc->eop, %d, eop, "Set to 1 if drive is past early warning, 0 if not, -1 if unknown"); SASBADDINTDESC(sb, indent, softc->bpew, %d, bpew, "Set to 1 if drive is past programmable early warning, 0 if not, " "-1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->last_io_resid, %jd, residual, "Residual for the last I/O"); /* * XXX KDM should we send a string with the current driver * status already decoded instead of a numeric value? */ SASBADDINTDESC(sb, indent, softc->dsreg, %d, dsreg, "Current state of the driver"); safilldensitysb(softc, &indent, sb); SASBENDNODE(sb, indent, mtextget); extget_bailout: return (error); } static int saparamget(struct sa_softc *softc, struct sbuf *sb) { int indent; indent = 0; SASBADDNODE(sb, indent, mtparamget); SASBADDINTDESC(sb, indent, softc->sili, %d, sili, "Suppress an error on underlength variable reads"); SASBADDINTDESC(sb, indent, softc->eot_warn, %d, eot_warn, "Return an error to warn that end of tape is approaching"); safillprot(softc, &indent, sb); SASBENDNODE(sb, indent, mtparamget); return (0); } static void saprevent(struct cam_periph *periph, int action) { struct sa_softc *softc; union ccb *ccb; int error, sf; softc = (struct sa_softc *)periph->softc; if ((action == PR_ALLOW) && (softc->flags & SA_FLAG_TAPE_LOCKED) == 0) return; if ((action == PR_PREVENT) && (softc->flags & SA_FLAG_TAPE_LOCKED) != 0) return; /* * We can be quiet about illegal requests. */ if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { sf = 0; } else sf = SF_QUIET_IR; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_prevent(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, sf, softc->device_stats); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~SA_FLAG_TAPE_LOCKED; else softc->flags |= SA_FLAG_TAPE_LOCKED; } xpt_release_ccb(ccb); } static int sarewind(struct cam_periph *periph) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_rewind(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, SSD_FULL_SIZE, REWIND_TIMEOUT); softc->dsreg = MTIO_DSREG_REW; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); if (error == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) 0; softc->rep_fileno = softc->rep_blkno = (daddr_t) 0; } else { softc->fileno = softc->blkno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->rep_fileno = softc->rep_blkno = (daddr_t) -1; } return (error); } static int saspace(struct cam_periph *periph, int count, scsi_space_code code) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* This cannot be retried */ scsi_space(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, code, count, SSD_FULL_SIZE, SPACE_TIMEOUT); /* * Clear residual because we will be using it. */ softc->last_ctl_resid = 0; softc->dsreg = (count < 0)? MTIO_DSREG_REV : MTIO_DSREG_FWD; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); /* * If a spacing operation has failed, we need to invalidate * this mount. * * If the spacing operation was setmarks or to end of recorded data, * we no longer know our relative position. * * If the spacing operations was spacing files in reverse, we * take account of the residual, but still check against less * than zero- if we've gone negative, we must have hit BOT. * * If the spacing operations was spacing records in reverse and * we have a residual, we've either hit BOT or hit a filemark. * In the former case, we know our new record number (0). In * the latter case, we have absolutely no idea what the real * record number is- we've stopped between the end of the last * record in the previous file and the filemark that stopped * our spacing backwards. */ if (error) { softc->fileno = softc->blkno = (daddr_t) -1; softc->rep_blkno = softc->partition = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; } else if (code == SS_SETMARKS || code == SS_EOD) { softc->fileno = softc->blkno = (daddr_t) -1; } else if (code == SS_FILEMARKS && softc->fileno != (daddr_t) -1) { softc->fileno += (count - softc->last_ctl_resid); if (softc->fileno < 0) /* we must of hit BOT */ softc->fileno = 0; softc->blkno = 0; } else if (code == SS_BLOCKS && softc->blkno != (daddr_t) -1) { softc->blkno += (count - softc->last_ctl_resid); if (count < 0) { if (softc->last_ctl_resid || softc->blkno < 0) { if (softc->fileno == 0) { softc->blkno = 0; } else { softc->blkno = (daddr_t) -1; } } } } if (error == 0) sagetpos(periph); return (error); } static int sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks, int immed) { union ccb *ccb; struct sa_softc *softc; int error, nwm = 0; softc = (struct sa_softc *)periph->softc; if (softc->open_rdonly) return (EBADF); ccb = cam_periph_getccb(periph, 1); /* * Clear residual because we will be using it. */ softc->last_ctl_resid = 0; softc->dsreg = MTIO_DSREG_FMK; /* this *must* not be retried */ scsi_write_filemarks(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, immed, setmarks, nmarks, SSD_FULL_SIZE, IO_TIMEOUT); softc->dsreg = MTIO_DSREG_REST; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); if (error == 0 && nmarks) { struct sa_softc *softc = (struct sa_softc *)periph->softc; nwm = nmarks - softc->last_ctl_resid; softc->filemarks += nwm; } xpt_release_ccb(ccb); /* * Update relative positions (if we're doing that). */ if (error) { softc->fileno = softc->blkno = softc->partition = (daddr_t) -1; } else if (softc->fileno != (daddr_t) -1) { softc->fileno += nwm; softc->blkno = 0; } /* * Ask the tape drive for position information. */ sagetpos(periph); /* * If we got valid position information, since we just wrote a file * mark, we know we're at the file mark and block 0 after that * filemark. */ if (softc->rep_fileno != (daddr_t) -1) { softc->fileno = softc->rep_fileno; softc->blkno = 0; } return (error); } static int sagetpos(struct cam_periph *periph) { union ccb *ccb; struct scsi_tape_position_long_data long_pos; struct sa_softc *softc = (struct sa_softc *)periph->softc; int error; if (softc->quirks & SA_QUIRK_NO_LONG_POS) { softc->rep_fileno = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; softc->bop = softc->eop = softc->bpew = -1; return (EOPNOTSUPP); } bzero(&long_pos, sizeof(long_pos)); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_read_position_10(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ SA_RPOS_LONG_FORM, /*data_ptr*/ (uint8_t *)&long_pos, /*length*/ sizeof(long_pos), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SCSIOP_TIMEOUT); softc->dsreg = MTIO_DSREG_RBSY; error = cam_periph_runccb(ccb, saerror, 0, SF_QUIET_IR, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; if (error == 0) { if (long_pos.flags & SA_RPOS_LONG_MPU) { /* * If the drive doesn't know what file mark it is * on, our calculated filemark isn't going to be * accurate either. */ softc->fileno = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; } else { softc->fileno = softc->rep_fileno = scsi_8btou64(long_pos.logical_file_num); } if (long_pos.flags & SA_RPOS_LONG_LONU) { softc->partition = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; /* * If the tape drive doesn't know its block * position, we can't claim to know it either. */ softc->blkno = (daddr_t) -1; } else { softc->partition = scsi_4btoul(long_pos.partition); softc->rep_blkno = scsi_8btou64(long_pos.logical_object_num); } if (long_pos.flags & SA_RPOS_LONG_BOP) softc->bop = 1; else softc->bop = 0; if (long_pos.flags & SA_RPOS_LONG_EOP) softc->eop = 1; else softc->eop = 0; if ((long_pos.flags & SA_RPOS_LONG_BPEW) || (softc->set_pews_status != 0)) { softc->bpew = 1; if (softc->set_pews_status > 0) softc->set_pews_status--; } else softc->bpew = 0; } else if (error == EINVAL) { /* * If this drive returned an invalid-request type error, * then it likely doesn't support the long form report. */ softc->quirks |= SA_QUIRK_NO_LONG_POS; } if (error != 0) { softc->rep_fileno = softc->rep_blkno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->bop = softc->eop = softc->bpew = -1; } xpt_release_ccb(ccb); return (error); } static int sardpos(struct cam_periph *periph, int hard, u_int32_t *blkptr) { struct scsi_tape_position_data loc; union ccb *ccb; struct sa_softc *softc = (struct sa_softc *)periph->softc; int error; /* * We try and flush any buffered writes here if we were writing * and we're trying to get hardware block position. It eats * up performance substantially, but I'm wary of drive firmware. * * I think that *logical* block position is probably okay- * but hardware block position might have to wait for data * to hit media to be valid. Caveat Emptor. */ if (hard && (softc->flags & SA_FLAG_TAPE_WRITTEN)) { error = sawritefilemarks(periph, 0, 0, 0); if (error && error != EACCES) return (error); } ccb = cam_periph_getccb(periph, 1); scsi_read_position(&ccb->csio, 1, sadone, MSG_SIMPLE_Q_TAG, hard, &loc, SSD_FULL_SIZE, SCSIOP_TIMEOUT); softc->dsreg = MTIO_DSREG_RBSY; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; if (error == 0) { if (loc.flags & SA_RPOS_UNCERTAIN) { error = EINVAL; /* nothing is certain */ } else { *blkptr = scsi_4btoul(loc.firstblk); } } xpt_release_ccb(ccb); return (error); } static int sasetpos(struct cam_periph *periph, int hard, struct mtlocate *locate_info) { union ccb *ccb; struct sa_softc *softc; int locate16; int immed, cp; int error; /* * We used to try and flush any buffered writes here. * Now we push this onto user applications to either * flush the pending writes themselves (via a zero count * WRITE FILEMARKS command) or they can trust their tape * drive to do this correctly for them. */ softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); cp = locate_info->flags & MT_LOCATE_FLAG_CHANGE_PART ? 1 : 0; immed = locate_info->flags & MT_LOCATE_FLAG_IMMED ? 1 : 0; /* * Determine whether we have to use LOCATE or LOCATE16. The hard * bit is only possible with LOCATE, but the new ioctls do not * allow setting that bit. So we can't get into the situation of * having the hard bit set with a block address that is larger than * 32-bits. */ if (hard != 0) locate16 = 0; else if ((locate_info->dest_type != MT_LOCATE_DEST_OBJECT) || (locate_info->block_address_mode != MT_LOCATE_BAM_IMPLICIT) || (locate_info->logical_id > SA_SPOS_MAX_BLK)) locate16 = 1; else locate16 = 0; if (locate16 != 0) { scsi_locate_16(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*immed*/ immed, /*cp*/ cp, /*dest_type*/ locate_info->dest_type, /*bam*/ locate_info->block_address_mode, /*partition*/ locate_info->partition, /*logical_id*/ locate_info->logical_id, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SPACE_TIMEOUT); } else { scsi_locate_10(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*immed*/ immed, /*cp*/ cp, /*hard*/ hard, /*partition*/ locate_info->partition, /*block_address*/ locate_info->logical_id, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SPACE_TIMEOUT); } softc->dsreg = MTIO_DSREG_POS; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); /* * We assume the calculated file and block numbers are unknown * unless we have enough information to populate them. */ softc->fileno = softc->blkno = (daddr_t) -1; /* * If the user requested changing the partition and the request * succeeded, note the partition. */ if ((error == 0) && (cp != 0)) softc->partition = locate_info->partition; else softc->partition = (daddr_t) -1; if (error == 0) { switch (locate_info->dest_type) { case MT_LOCATE_DEST_FILE: /* * This is the only case where we can reliably * calculate the file and block numbers. */ softc->fileno = locate_info->logical_id; softc->blkno = 0; break; case MT_LOCATE_DEST_OBJECT: case MT_LOCATE_DEST_SET: case MT_LOCATE_DEST_EOD: default: break; } } /* * Ask the drive for current position information. */ sagetpos(periph); return (error); } static int saretension(struct cam_periph *periph) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_load_unload(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, FALSE, FALSE, TRUE, TRUE, SSD_FULL_SIZE, ERASE_TIMEOUT); softc->dsreg = MTIO_DSREG_TEN; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); if (error == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) 0; sagetpos(periph); } else softc->partition = softc->fileno = softc->blkno = (daddr_t) -1; return (error); } static int sareservereleaseunit(struct cam_periph *periph, int reserve) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_reserve_release_unit(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, 0, SSD_FULL_SIZE, SCSIOP_TIMEOUT, reserve); softc->dsreg = MTIO_DSREG_RBSY; error = cam_periph_runccb(ccb, saerror, 0, SF_RETRY_UA | SF_NO_PRINT, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); /* * If the error was Illegal Request, then the device doesn't support * RESERVE/RELEASE. This is not an error. */ if (error == EINVAL) { error = 0; } return (error); } static int saloadunload(struct cam_periph *periph, int load) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_load_unload(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, FALSE, FALSE, FALSE, load, SSD_FULL_SIZE, REWIND_TIMEOUT); softc->dsreg = (load)? MTIO_DSREG_LD : MTIO_DSREG_UNL; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); if (error || load == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) -1; softc->rep_fileno = softc->rep_blkno = (daddr_t) -1; } else if (error == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) 0; sagetpos(periph); } return (error); } static int saerase(struct cam_periph *periph, int longerase) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; if (softc->open_rdonly) return (EBADF); ccb = cam_periph_getccb(periph, 1); scsi_erase(&ccb->csio, 1, sadone, MSG_SIMPLE_Q_TAG, FALSE, longerase, SSD_FULL_SIZE, ERASE_TIMEOUT); softc->dsreg = MTIO_DSREG_ZER; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); return (error); } /* * Fill an sbuf with density data in XML format. This particular macro * works for multi-byte integer fields. * * Note that 1 byte fields aren't supported here. The reason is that the * compiler does not evaluate the sizeof(), and assumes that any of the * sizes are possible for a given field. So passing in a multi-byte * field will result in a warning that the assignment makes an integer * from a pointer without a cast, if there is an assignment in the 1 byte * case. */ #define SAFILLDENSSB(dens_data, sb, indent, field, desc_remain, \ len_to_go, cur_offset, desc){ \ size_t cur_field_len; \ \ cur_field_len = sizeof(dens_data->field); \ if (desc_remain < cur_field_len) { \ len_to_go -= desc_remain; \ cur_offset += desc_remain; \ continue; \ } \ len_to_go -= cur_field_len; \ cur_offset += cur_field_len; \ desc_remain -= cur_field_len; \ \ switch (sizeof(dens_data->field)) { \ case 1: \ KASSERT(1 == 0, ("Programmer error, invalid 1 byte " \ "field width for SAFILLDENSFIELD")); \ break; \ case 2: \ SASBADDUINTDESC(sb, indent, \ scsi_2btoul(dens_data->field), %u, field, desc); \ break; \ case 3: \ SASBADDUINTDESC(sb, indent, \ scsi_3btoul(dens_data->field), %u, field, desc); \ break; \ case 4: \ SASBADDUINTDESC(sb, indent, \ scsi_4btoul(dens_data->field), %u, field, desc); \ break; \ case 8: \ SASBADDUINTDESC(sb, indent, \ (uintmax_t)scsi_8btou64(dens_data->field), %ju, \ field, desc); \ break; \ default: \ break; \ } \ }; /* * Fill an sbuf with density data in XML format. This particular macro * works for strings. */ #define SAFILLDENSSBSTR(dens_data, sb, indent, field, desc_remain, \ len_to_go, cur_offset, desc){ \ size_t cur_field_len; \ char tmpstr[32]; \ \ cur_field_len = sizeof(dens_data->field); \ if (desc_remain < cur_field_len) { \ len_to_go -= desc_remain; \ cur_offset += desc_remain; \ continue; \ } \ len_to_go -= cur_field_len; \ cur_offset += cur_field_len; \ desc_remain -= cur_field_len; \ \ cam_strvis(tmpstr, dens_data->field, \ sizeof(dens_data->field), sizeof(tmpstr)); \ SASBADDVARSTRDESC(sb, indent, tmpstr, %s, field, \ strlen(tmpstr) + 1, desc); \ }; /* * Fill an sbuf with density data descriptors. */ static void safilldenstypesb(struct sbuf *sb, int *indent, uint8_t *buf, int buf_len, int is_density) { struct scsi_density_hdr *hdr; uint32_t hdr_len; int len_to_go, cur_offset; int length_offset; int num_reports, need_close; /* * We need at least the header length. Note that this isn't an * error, not all tape drives will have every data type. */ if (buf_len < sizeof(*hdr)) goto bailout; hdr = (struct scsi_density_hdr *)buf; hdr_len = scsi_2btoul(hdr->length); len_to_go = min(buf_len - sizeof(*hdr), hdr_len); if (is_density) { length_offset = __offsetof(struct scsi_density_data, bits_per_mm); } else { length_offset = __offsetof(struct scsi_medium_type_data, num_density_codes); } cur_offset = sizeof(*hdr); num_reports = 0; need_close = 0; while (len_to_go > length_offset) { struct scsi_density_data *dens_data; struct scsi_medium_type_data *type_data; int desc_remain; size_t cur_field_len; dens_data = NULL; type_data = NULL; if (is_density) { dens_data =(struct scsi_density_data *)&buf[cur_offset]; if (dens_data->byte2 & SDD_DLV) desc_remain = scsi_2btoul(dens_data->length); else desc_remain = SDD_DEFAULT_LENGTH - length_offset; } else { type_data = (struct scsi_medium_type_data *) &buf[cur_offset]; desc_remain = scsi_2btoul(type_data->length); } len_to_go -= length_offset; desc_remain = min(desc_remain, len_to_go); cur_offset += length_offset; if (need_close != 0) { SASBENDNODE(sb, *indent, density_entry); } SASBADDNODENUM(sb, *indent, density_entry, num_reports); num_reports++; need_close = 1; if (is_density) { SASBADDUINTDESC(sb, *indent, dens_data->primary_density_code, %u, primary_density_code, "Primary Density Code"); SASBADDUINTDESC(sb, *indent, dens_data->secondary_density_code, %u, secondary_density_code, "Secondary Density Code"); SASBADDUINTDESC(sb, *indent, dens_data->byte2 & ~SDD_DLV, %#x, density_flags, "Density Flags"); SAFILLDENSSB(dens_data, sb, *indent, bits_per_mm, desc_remain, len_to_go, cur_offset, "Bits per mm"); SAFILLDENSSB(dens_data, sb, *indent, media_width, desc_remain, len_to_go, cur_offset, "Media width"); SAFILLDENSSB(dens_data, sb, *indent, tracks, desc_remain, len_to_go, cur_offset, "Number of Tracks"); SAFILLDENSSB(dens_data, sb, *indent, capacity, desc_remain, len_to_go, cur_offset, "Capacity"); SAFILLDENSSBSTR(dens_data, sb, *indent, assigning_org, desc_remain, len_to_go, cur_offset, "Assigning Organization"); SAFILLDENSSBSTR(dens_data, sb, *indent, density_name, desc_remain, len_to_go, cur_offset, "Density Name"); SAFILLDENSSBSTR(dens_data, sb, *indent, description, desc_remain, len_to_go, cur_offset, "Description"); } else { int i; SASBADDUINTDESC(sb, *indent, type_data->medium_type, %u, medium_type, "Medium Type"); cur_field_len = __offsetof(struct scsi_medium_type_data, media_width) - __offsetof(struct scsi_medium_type_data, num_density_codes); if (desc_remain < cur_field_len) { len_to_go -= desc_remain; cur_offset += desc_remain; continue; } len_to_go -= cur_field_len; cur_offset += cur_field_len; desc_remain -= cur_field_len; SASBADDINTDESC(sb, *indent, type_data->num_density_codes, %d, num_density_codes, "Number of Density Codes"); SASBADDNODE(sb, *indent, density_code_list); for (i = 0; i < type_data->num_density_codes; i++) { SASBADDUINTDESC(sb, *indent, type_data->primary_density_codes[i], %u, density_code, "Density Code"); } SASBENDNODE(sb, *indent, density_code_list); SAFILLDENSSB(type_data, sb, *indent, media_width, desc_remain, len_to_go, cur_offset, "Media width"); SAFILLDENSSB(type_data, sb, *indent, medium_length, desc_remain, len_to_go, cur_offset, "Medium length"); /* * Account for the two reserved bytes. */ cur_field_len = sizeof(type_data->reserved2); if (desc_remain < cur_field_len) { len_to_go -= desc_remain; cur_offset += desc_remain; continue; } len_to_go -= cur_field_len; cur_offset += cur_field_len; desc_remain -= cur_field_len; SAFILLDENSSBSTR(type_data, sb, *indent, assigning_org, desc_remain, len_to_go, cur_offset, "Assigning Organization"); SAFILLDENSSBSTR(type_data, sb, *indent, medium_type_name, desc_remain, len_to_go, cur_offset, "Medium type name"); SAFILLDENSSBSTR(type_data, sb, *indent, description, desc_remain, len_to_go, cur_offset, "Description"); } } if (need_close != 0) { SASBENDNODE(sb, *indent, density_entry); } bailout: return; } /* * Fill an sbuf with density data information */ static void safilldensitysb(struct sa_softc *softc, int *indent, struct sbuf *sb) { int i, is_density; SASBADDNODE(sb, *indent, mtdensity); SASBADDUINTDESC(sb, *indent, softc->media_density, %u, media_density, "Current Medium Density"); is_density = 0; for (i = 0; i < SA_DENSITY_TYPES; i++) { int tmpint; if (softc->density_info_valid[i] == 0) continue; SASBADDNODE(sb, *indent, density_report); if (softc->density_type_bits[i] & SRDS_MEDIUM_TYPE) { tmpint = 1; is_density = 0; } else { tmpint = 0; is_density = 1; } SASBADDINTDESC(sb, *indent, tmpint, %d, medium_type_report, "Medium type report"); if (softc->density_type_bits[i] & SRDS_MEDIA) tmpint = 1; else tmpint = 0; SASBADDINTDESC(sb, *indent, tmpint, %d, media_report, "Media report"); safilldenstypesb(sb, indent, softc->density_info[i], softc->density_info_valid[i], is_density); SASBENDNODE(sb, *indent, density_report); } SASBENDNODE(sb, *indent, mtdensity); } #endif /* _KERNEL */ /* * Read tape block limits command. */ void scsi_read_block_limits(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, struct scsi_read_block_limits_data *rlimit_buf, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_block_limits *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, (u_int8_t *)rlimit_buf, sizeof(*rlimit_buf), sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_read_block_limits *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_BLOCK_LIMITS; } void scsi_sa_read_write(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int readop, int sli, int fixed, u_int32_t length, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sa_rw *scsi_cmd; int read; read = (readop & SCSI_RW_DIRMASK) == SCSI_RW_READ; scsi_cmd = (struct scsi_sa_rw *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? SA_READ : SA_WRITE; scsi_cmd->sli_fixed = 0; if (sli && read) scsi_cmd->sli_fixed |= SAR_SLI; if (fixed) scsi_cmd->sli_fixed |= SARW_FIXED; scsi_ulto3b(length, scsi_cmd->length); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, (read ? CAM_DIR_IN : CAM_DIR_OUT) | ((readop & SCSI_RW_BIO) != 0 ? CAM_DATA_BIO : 0), tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_load_unload(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int eot, int reten, int load, u_int8_t sense_len, u_int32_t timeout) { struct scsi_load_unload *scsi_cmd; scsi_cmd = (struct scsi_load_unload *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOAD_UNLOAD; if (immediate) scsi_cmd->immediate = SLU_IMMED; if (eot) scsi_cmd->eot_reten_load |= SLU_EOT; if (reten) scsi_cmd->eot_reten_load |= SLU_RETEN; if (load) scsi_cmd->eot_reten_load |= SLU_LOAD; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_rewind(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, u_int8_t sense_len, u_int32_t timeout) { struct scsi_rewind *scsi_cmd; scsi_cmd = (struct scsi_rewind *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REWIND; if (immediate) scsi_cmd->immediate = SREW_IMMED; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_space(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, scsi_space_code code, u_int32_t count, u_int8_t sense_len, u_int32_t timeout) { struct scsi_space *scsi_cmd; scsi_cmd = (struct scsi_space *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = SPACE; scsi_cmd->code = code; scsi_ulto3b(count, scsi_cmd->count); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_write_filemarks(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int setmark, u_int32_t num_marks, u_int8_t sense_len, u_int32_t timeout) { struct scsi_write_filemarks *scsi_cmd; scsi_cmd = (struct scsi_write_filemarks *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = WRITE_FILEMARKS; if (immediate) scsi_cmd->byte2 |= SWFMRK_IMMED; if (setmark) scsi_cmd->byte2 |= SWFMRK_WSMK; scsi_ulto3b(num_marks, scsi_cmd->num_marks); cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } /* * The reserve and release unit commands differ only by their opcodes. */ void scsi_reserve_release_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int third_party, int third_party_id, u_int8_t sense_len, u_int32_t timeout, int reserve) { struct scsi_reserve_release_unit *scsi_cmd; scsi_cmd = (struct scsi_reserve_release_unit *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); if (reserve) scsi_cmd->opcode = RESERVE_UNIT; else scsi_cmd->opcode = RELEASE_UNIT; if (third_party) { scsi_cmd->lun_thirdparty |= SRRU_3RD_PARTY; scsi_cmd->lun_thirdparty |= ((third_party_id << SRRU_3RD_SHAMT) & SRRU_3RD_MASK); } cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_erase(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int long_erase, u_int8_t sense_len, u_int32_t timeout) { struct scsi_erase *scsi_cmd; scsi_cmd = (struct scsi_erase *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = ERASE; if (immediate) scsi_cmd->lun_imm_long |= SE_IMMED; if (long_erase) scsi_cmd->lun_imm_long |= SE_LONG; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } /* * Read Tape Position command. */ void scsi_read_position(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int hardsoft, struct scsi_tape_position_data *sbp, u_int8_t sense_len, u_int32_t timeout) { struct scsi_tape_read_position *scmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, (u_int8_t *)sbp, sizeof (*sbp), sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_read_position *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = READ_POSITION; scmd->byte1 = hardsoft; } /* * Read Tape Position command. */ void scsi_read_position_10(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int service_action, u_int8_t *data_ptr, u_int32_t length, u_int32_t sense_len, u_int32_t timeout) { struct scsi_tape_read_position *scmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_read_position *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = READ_POSITION; scmd->byte1 = service_action; /* * The length is only currently set (as of SSC4r03) if the extended * form is specified. The other forms have fixed lengths. */ if (service_action == SA_RPOS_EXTENDED_FORM) scsi_ulto2b(length, scmd->length); } /* * Set Tape Position command. */ void scsi_set_position(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int hardsoft, u_int32_t blkno, u_int8_t sense_len, u_int32_t timeout) { struct scsi_tape_locate *scmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, (u_int8_t *)NULL, 0, sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_locate *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = LOCATE; if (hardsoft) scmd->byte1 |= SA_SPOS_BT; scsi_ulto4b(blkno, scmd->blkaddr); } /* * XXX KDM figure out how to make a compatibility function. */ void scsi_locate_10(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immed, int cp, int hard, int64_t partition, u_int32_t block_address, int sense_len, u_int32_t timeout) { struct scsi_tape_locate *scmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_locate *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = LOCATE; if (immed) scmd->byte1 |= SA_SPOS_IMMED; if (cp) scmd->byte1 |= SA_SPOS_CP; if (hard) scmd->byte1 |= SA_SPOS_BT; scsi_ulto4b(block_address, scmd->blkaddr); scmd->partition = partition; } void scsi_locate_16(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immed, int cp, u_int8_t dest_type, int bam, int64_t partition, u_int64_t logical_id, int sense_len, u_int32_t timeout) { struct scsi_locate_16 *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_locate_16 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOCATE_16; if (immed) scsi_cmd->byte1 |= SA_LC_IMMEDIATE; if (cp) scsi_cmd->byte1 |= SA_LC_CP; scsi_cmd->byte1 |= (dest_type << SA_LC_DEST_TYPE_SHIFT); scsi_cmd->byte2 |= bam; scsi_cmd->partition = partition; scsi_u64to8b(logical_id, scsi_cmd->logical_id); } void scsi_report_density_support(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int media, int medium_type, u_int8_t *data_ptr, u_int32_t length, u_int32_t sense_len, u_int32_t timeout) { struct scsi_report_density_support *scsi_cmd; scsi_cmd =(struct scsi_report_density_support *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_DENSITY_SUPPORT; if (media != 0) scsi_cmd->byte1 |= SRDS_MEDIA; if (medium_type != 0) scsi_cmd->byte1 |= SRDS_MEDIUM_TYPE; scsi_ulto2b(length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_set_capacity(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int byte1, u_int32_t proportion, u_int32_t sense_len, u_int32_t timeout) { struct scsi_set_capacity *scsi_cmd; scsi_cmd = (struct scsi_set_capacity *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SET_CAPACITY; scsi_cmd->byte1 = byte1; scsi_ulto2b(proportion, scsi_cmd->cap_proportion); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_format_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int byte1, int byte2, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t sense_len, u_int32_t timeout) { struct scsi_format_medium *scsi_cmd; scsi_cmd = (struct scsi_format_medium*)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = FORMAT_MEDIUM; scsi_cmd->byte1 = byte1; scsi_cmd->byte2 = byte2; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/(dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_allow_overwrite(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int allow_overwrite, int partition, u_int64_t logical_id, u_int32_t sense_len, u_int32_t timeout) { struct scsi_allow_overwrite *scsi_cmd; scsi_cmd = (struct scsi_allow_overwrite *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = ALLOW_OVERWRITE; scsi_cmd->allow_overwrite = allow_overwrite; scsi_cmd->partition = partition; scsi_u64to8b(logical_id, scsi_cmd->logical_id); cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_sa.h =================================================================== --- head/sys/cam/scsi/scsi_sa.h (revision 326264) +++ head/sys/cam/scsi/scsi_sa.h (revision 326265) @@ -1,1081 +1,1083 @@ /*- * Structure and function declarations for the * SCSI Sequential Access Peripheral driver for CAM. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1999, 2000 Matthew Jacob * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SCSI_SCSI_SA_H #define _SCSI_SCSI_SA_H 1 #include struct scsi_read_block_limits { u_int8_t opcode; u_int8_t byte2; u_int8_t unused[3]; u_int8_t control; }; struct scsi_read_block_limits_data { u_int8_t gran; #define RBL_GRAN_MASK 0x1F #define RBL_GRAN(rblim) ((rblim)->gran & RBL_GRAN_MASK) u_int8_t maximum[3]; u_int8_t minimum[2]; }; struct scsi_sa_rw { u_int8_t opcode; u_int8_t sli_fixed; #define SAR_SLI 0x02 #define SARW_FIXED 0x01 u_int8_t length[3]; u_int8_t control; }; struct scsi_load_unload { u_int8_t opcode; u_int8_t immediate; #define SLU_IMMED 0x01 u_int8_t reserved[2]; u_int8_t eot_reten_load; #define SLU_EOT 0x04 #define SLU_RETEN 0x02 #define SLU_LOAD 0x01 u_int8_t control; }; struct scsi_rewind { u_int8_t opcode; u_int8_t immediate; #define SREW_IMMED 0x01 u_int8_t reserved[3]; u_int8_t control; }; typedef enum { SS_BLOCKS, SS_FILEMARKS, SS_SEQFILEMARKS, SS_EOD, SS_SETMARKS, SS_SEQSETMARKS } scsi_space_code; struct scsi_space { u_int8_t opcode; u_int8_t code; #define SREW_IMMED 0x01 u_int8_t count[3]; u_int8_t control; }; struct scsi_write_filemarks { u_int8_t opcode; u_int8_t byte2; #define SWFMRK_IMMED 0x01 #define SWFMRK_WSMK 0x02 u_int8_t num_marks[3]; u_int8_t control; }; /* * Reserve and release unit have the same exact cdb format, but different * opcodes. */ struct scsi_reserve_release_unit { u_int8_t opcode; u_int8_t lun_thirdparty; #define SRRU_LUN_MASK 0xE0 #define SRRU_3RD_PARTY 0x10 #define SRRU_3RD_SHAMT 1 #define SRRU_3RD_MASK 0xE u_int8_t reserved[3]; u_int8_t control; }; /* * Erase a tape */ struct scsi_erase { u_int8_t opcode; u_int8_t lun_imm_long; #define SE_LUN_MASK 0xE0 #define SE_LONG 0x1 #define SE_IMMED 0x2 u_int8_t reserved[3]; u_int8_t control; }; /* * Set tape capacity. */ struct scsi_set_capacity { u_int8_t opcode; u_int8_t byte1; #define SA_SSC_IMMED 0x01 u_int8_t reserved; u_int8_t cap_proportion[2]; u_int8_t control; }; /* * Format tape media. The CDB opcode is the same as the disk-specific * FORMAT UNIT command, but the fields are different inside the CDB. Thus * the reason for a separate definition here. */ struct scsi_format_medium { u_int8_t opcode; u_int8_t byte1; #define SFM_IMMED 0x01 #define SFM_VERIFY 0x02 u_int8_t byte2; #define SFM_FORMAT_DEFAULT 0x00 #define SFM_FORMAT_PARTITION 0x01 #define SFM_FORMAT_DEF_PART 0x02 #define SFM_FORMAT_MASK 0x0f u_int8_t length[2]; u_int8_t control; }; struct scsi_allow_overwrite { u_int8_t opcode; u_int8_t reserved1; u_int8_t allow_overwrite; #define SAO_ALLOW_OVERWRITE_DISABLED 0x00 #define SAO_ALLOW_OVERWRITE_CUR_POS 0x01 #define SAO_ALLOW_OVERWRITE_FORMAT 0x02 u_int8_t partition; u_int8_t logical_id[8]; u_int8_t reserved2[3]; u_int8_t control; }; /* * Dev specific mode page masks. */ #define SMH_SA_WP 0x80 #define SMH_SA_BUF_MODE_MASK 0x70 #define SMH_SA_BUF_MODE_NOBUF 0x00 #define SMH_SA_BUF_MODE_SIBUF 0x10 /* Single-Initiator buffering */ #define SMH_SA_BUF_MODE_MIBUF 0x20 /* Multi-Initiator buffering */ #define SMH_SA_SPEED_MASK 0x0F #define SMH_SA_SPEED_DEFAULT 0x00 /* * Sequential-access specific mode page numbers. */ #define SA_DEVICE_CONFIGURATION_PAGE 0x10 #define SA_MEDIUM_PARTITION_PAGE_1 0x11 #define SA_MEDIUM_PARTITION_PAGE_2 0x12 #define SA_MEDIUM_PARTITION_PAGE_3 0x13 #define SA_MEDIUM_PARTITION_PAGE_4 0x14 #define SA_DATA_COMPRESSION_PAGE 0x0f /* SCSI-3 */ /* * Mode page definitions. */ /* See SCSI-II spec 9.3.3.1 */ struct scsi_dev_conf_page { u_int8_t pagecode; /* 0x10 */ u_int8_t pagelength; /* 0x0e */ u_int8_t byte2; /* CAP, CAF, Active Format */ u_int8_t active_partition; u_int8_t wb_full_ratio; u_int8_t rb_empty_ratio; u_int8_t wrdelay_time[2]; u_int8_t byte8; #define SA_DBR 0x80 /* data buffer recovery */ #define SA_BIS 0x40 /* block identifiers supported */ #define SA_RSMK 0x20 /* report setmarks */ #define SA_AVC 0x10 /* automatic velocity control */ #define SA_SOCF_MASK 0x0c /* stop on consecutive formats */ #define SA_RBO 0x02 /* recover buffer order */ #define SA_REW 0x01 /* report early warning */ u_int8_t gap_size; u_int8_t byte10; /* from SCSI-3: SSC-4 Working draft (2/14) 8.3.3 */ #define SA_EOD_DEF_MASK 0xe0 /* EOD defined */ #define SA_EEG 0x10 /* Enable EOD Generation */ #define SA_SEW 0x08 /* Synchronize at Early Warning */ #define SA_SOFT_WP 0x04 /* Software Write Protect */ #define SA_BAML 0x02 /* Block Address Mode Lock */ #define SA_BAM 0x01 /* Block Address Mode */ u_int8_t ew_bufsize[3]; u_int8_t sel_comp_alg; #define SA_COMP_NONE 0x00 #define SA_COMP_DEFAULT 0x01 /* the following is 'reserved' in SCSI-2 but is defined in SSC-r22 */ u_int8_t extra_wp; #define SA_ASOC_WP 0x04 /* Associated Write Protect */ #define SA_PERS_WP 0x02 /* Persistent Write Protect */ #define SA_PERM_WP 0x01 /* Permanent Write Protect */ }; /* from SCSI-3: SSC-Rev10 (6/97) */ struct scsi_data_compression_page { u_int8_t page_code; /* 0x0f */ u_int8_t page_length; /* 0x0e */ u_int8_t dce_and_dcc; #define SA_DCP_DCE 0x80 /* Data compression enable */ #define SA_DCP_DCC 0x40 /* Data compression capable */ u_int8_t dde_and_red; #define SA_DCP_DDE 0x80 /* Data decompression enable */ #define SA_DCP_RED_MASK 0x60 /* Report Exception on Decomp. */ #define SA_DCP_RED_SHAMT 5 #define SA_DCP_RED_0 0x00 #define SA_DCP_RED_1 0x20 #define SA_DCP_RED_2 0x40 u_int8_t comp_algorithm[4]; u_int8_t decomp_algorithm[4]; u_int8_t reserved[4]; }; typedef union { struct { u_int8_t pagecode, pagelength; } hdr; struct scsi_dev_conf_page dconf; struct scsi_data_compression_page dcomp; } sa_comp_t; /* * Control Data Protection subpage. This is as defined in SSC3r03. */ struct scsi_control_data_prot_subpage { uint8_t page_code; #define SA_CTRL_DP_PAGE_CODE 0x0a uint8_t subpage_code; #define SA_CTRL_DP_SUBPAGE_CODE 0xf0 uint8_t length[2]; uint8_t prot_method; #define SA_CTRL_DP_NO_LBP 0x00 #define SA_CTRL_DP_REED_SOLOMON 0x01 #define SA_CTRL_DP_METHOD_MAX 0xff uint8_t pi_length; #define SA_CTRL_DP_PI_LENGTH_MASK 0x3f #define SA_CTRL_DP_RS_LENGTH 4 uint8_t prot_bits; #define SA_CTRL_DP_LBP_W 0x80 #define SA_CTRL_DP_LBP_R 0x40 #define SA_CTRL_DP_RBDP 0x20 uint8_t reserved[]; }; /* * This is the Read/Write Control mode page used on IBM Enterprise Tape * Drives. They are known as 3592, TS, or Jaguar drives. The SCSI inquiry * data will show a Product ID "03592XXX", where XXX is 'J1A', 'E05' (TS1120), * 'E06' (TS1130), 'E07' (TS1140) or 'E08' (TS1150). * * This page definition is current as of the 3592 SCSI Reference v6, * released on December 16th, 2014. */ struct scsi_tape_ibm_rw_control { uint8_t page_code; #define SA_IBM_RW_CTRL_PAGE_CODE 0x25 uint8_t page_length; uint8_t ignore_seq_checks; #define SA_IBM_RW_CTRL_LOC_IGNORE_SEQ 0x04 #define SA_IBM_RW_CTRL_SPC_BLK_IGNORE_SEQ 0x02 #define SA_IBM_RW_CTRL_SPC_FM_IGNORE_SEQ 0x01 uint8_t ignore_data_checks; #define SA_IBM_RW_CTRL_LOC_IGNORE_DATA 0x04 #define SA_IBM_RW_CTRL_SPC_BLK_IGNORE_DATA 0x02 #define SA_IBM_RW_CTRL_SPC_FM_IGNORE_DATA 0x01 uint8_t reserved1; uint8_t leop_method; #define SA_IBM_RW_CTRL_LEOP_DEFAULT 0x00 #define SA_IBM_RW_CTRL_LEOP_MAX_CAP 0x01 #define SA_IBM_RW_CTRL_LEOP_CONST_CAP 0x02 uint8_t leop_ew[2]; uint8_t byte8; #define SA_IBM_RW_CTRL_DISABLE_FASTSYNC 0x80 #define SA_IBM_RW_CTRL_DISABLE_SKIPSYNC 0x40 #define SA_IBM_RW_CTRL_DISABLE_CROSS_EOD 0x08 #define SA_IBM_RW_CTRL_DISABLE_CROSS_PERM_ERR 0x04 #define SA_IBM_RW_CTRL_REPORT_SEG_EW 0x02 #define SA_IBM_RW_CTRL_REPORT_HOUSEKEEPING_ERR 0x01 uint8_t default_write_dens_bop_0; uint8_t pending_write_dens_bop_0; uint8_t reserved2[21]; }; struct scsi_tape_read_position { u_int8_t opcode; /* READ_POSITION */ u_int8_t byte1; /* set LSB to read hardware block pos */ #define SA_RPOS_SHORT_FORM 0x00 #define SA_RPOS_SHORT_VENDOR 0x01 #define SA_RPOS_LONG_FORM 0x06 #define SA_RPOS_EXTENDED_FORM 0x08 u_int8_t reserved[5]; u_int8_t length[2]; u_int8_t control; }; struct scsi_tape_position_data { /* Short Form */ u_int8_t flags; #define SA_RPOS_BOP 0x80 /* Beginning of Partition */ #define SA_RPOS_EOP 0x40 /* End of Partition */ #define SA_RPOS_BCU 0x20 /* Block Count Unknown (SCSI3) */ #define SA_RPOS_BYCU 0x10 /* Byte Count Unknown (SCSI3) */ #define SA_RPOS_BPU 0x04 /* Block Position Unknown */ #define SA_RPOS_PERR 0x02 /* Position Error (SCSI3) */ #define SA_RPOS_BPEW 0x01 /* Beyond Programmable Early Warning */ #define SA_RPOS_UNCERTAIN SA_RPOS_BPU u_int8_t partition; u_int8_t reserved[2]; u_int8_t firstblk[4]; u_int8_t lastblk[4]; u_int8_t reserved2; u_int8_t nbufblk[3]; u_int8_t nbufbyte[4]; }; struct scsi_tape_position_long_data { u_int8_t flags; #define SA_RPOS_LONG_BOP 0x80 /* Beginning of Partition */ #define SA_RPOS_LONG_EOP 0x40 /* End of Partition */ #define SA_RPOS_LONG_MPU 0x08 /* Mark Position Unknown */ #define SA_RPOS_LONG_LONU 0x04 /* Logical Object Number Unknown */ #define SA_RPOS_LONG_BPEW 0x01 /* Beyond Programmable Early Warning */ u_int8_t reserved[3]; u_int8_t partition[4]; u_int8_t logical_object_num[8]; u_int8_t logical_file_num[8]; u_int8_t set_id[8]; }; struct scsi_tape_position_ext_data { u_int8_t flags; #define SA_RPOS_EXT_BOP 0x80 /* Beginning of Partition */ #define SA_RPOS_EXT_EOP 0x40 /* End of Partition */ #define SA_RPOS_EXT_LOCU 0x20 /* Logical Object Count Unknown */ #define SA_RPOS_EXT_BYCU 0x10 /* Byte Count Unknown */ #define SA_RPOS_EXT_LOLU 0x04 /* Logical Object Location Unknown */ #define SA_RPOS_EXT_PERR 0x02 /* Position Error */ #define SA_RPOS_EXT_BPEW 0x01 /* Beyond Programmable Early Warning */ u_int8_t partition; u_int8_t length[2]; u_int8_t reserved; u_int8_t num_objects[3]; u_int8_t first_object[8]; u_int8_t last_object[8]; u_int8_t bytes_in_buffer[8]; }; struct scsi_tape_locate { u_int8_t opcode; u_int8_t byte1; #define SA_SPOS_IMMED 0x01 #define SA_SPOS_CP 0x02 #define SA_SPOS_BT 0x04 u_int8_t reserved1; u_int8_t blkaddr[4]; #define SA_SPOS_MAX_BLK 0xffffffff u_int8_t reserved2; u_int8_t partition; u_int8_t control; }; struct scsi_locate_16 { u_int8_t opcode; u_int8_t byte1; #define SA_LC_IMMEDIATE 0x01 #define SA_LC_CP 0x02 #define SA_LC_DEST_TYPE_MASK 0x38 #define SA_LC_DEST_TYPE_SHIFT 3 #define SA_LC_DEST_OBJECT 0x00 #define SA_LC_DEST_FILE 0x01 #define SA_LC_DEST_SET 0x02 #define SA_LC_DEST_EOD 0x03 u_int8_t byte2; #define SA_LC_BAM_IMPLICIT 0x00 #define SA_LC_BAM_EXPLICIT 0x01 u_int8_t partition; u_int8_t logical_id[8]; u_int8_t reserved[3]; u_int8_t control; }; struct scsi_report_density_support { u_int8_t opcode; u_int8_t byte1; #define SRDS_MEDIA 0x01 #define SRDS_MEDIUM_TYPE 0x02 u_int8_t reserved[5]; u_int8_t length[2]; #define SRDS_MAX_LENGTH 0xffff u_int8_t control; }; struct scsi_density_hdr { u_int8_t length[2]; u_int8_t reserved[2]; u_int8_t descriptor[]; }; struct scsi_density_data { u_int8_t primary_density_code; u_int8_t secondary_density_code; u_int8_t byte2; #define SDD_DLV 0x01 #define SDD_DEFLT 0x20 #define SDD_DUP 0x40 #define SDD_WRTOK 0x80 u_int8_t length[2]; #define SDD_DEFAULT_LENGTH 52 u_int8_t bits_per_mm[3]; u_int8_t media_width[2]; u_int8_t tracks[2]; u_int8_t capacity[4]; u_int8_t assigning_org[8]; u_int8_t density_name[8]; u_int8_t description[20]; }; struct scsi_medium_type_data { u_int8_t medium_type; u_int8_t reserved1; u_int8_t length[2]; #define SMTD_DEFAULT_LENGTH 52 u_int8_t num_density_codes; u_int8_t primary_density_codes[9]; u_int8_t media_width[2]; u_int8_t medium_length[2]; u_int8_t reserved2[2]; u_int8_t assigning_org[8]; u_int8_t medium_type_name[8]; u_int8_t description[20]; }; /* * Manufacturer-assigned Serial Number VPD page. * Current as of SSC-5r03, 28 September 2016. */ struct scsi_vpd_mfg_serial_number { u_int8_t device; u_int8_t page_code; #define SVPD_MFG_SERIAL_NUMBER_PAGE_CODE 0xB1 u_int8_t page_length[2]; u_int8_t mfg_serial_num[]; }; /* * Security Protocol Specific values for the Tape Data Encryption protocol * (0x20) used with SECURITY PROTOCOL IN. See below for values used with * SECURITY PROTOCOL OUT. Current as of SSC4r03. */ #define TDE_IN_SUPPORT_PAGE 0x0000 #define TDE_OUT_SUPPORT_PAGE 0x0001 #define TDE_DATA_ENC_CAP_PAGE 0x0010 #define TDE_SUPPORTED_KEY_FORMATS_PAGE 0x0011 #define TDE_DATA_ENC_MAN_CAP_PAGE 0x0012 #define TDE_DATA_ENC_STATUS_PAGE 0x0020 #define TDE_NEXT_BLOCK_ENC_STATUS_PAGE 0x0021 #define TDE_GET_ENC_MAN_ATTR_PAGE 0x0022 #define TDE_RANDOM_NUM_PAGE 0x0030 #define TDE_KEY_WRAP_PK_PAGE 0x0031 /* * Tape Data Encryption protocol pages used with SECURITY PROTOCOL IN and * SECURITY PROTOCOL OUT. */ /* * Tape Data Encryption In Support page (0x0000). */ struct tde_in_support_page { uint8_t page_code[2]; uint8_t page_length[2]; uint8_t page_codes[]; }; /* * Tape Data Encryption Out Support page (0x0001). */ struct tde_out_support_page { uint8_t page_code[2]; uint8_t page_length[2]; uint8_t page_codes[]; }; /* * Logical block encryption algorithm descriptor. This is reported in the * Data Encryption Capabilities page. */ struct tde_block_enc_alg_desc { uint8_t alg_index; uint8_t reserved1; uint8_t desc_length[2]; uint8_t byte4; #define TDE_BEA_AVFMV 0x80 #define TDE_BEA_SDK_C 0x40 #define TDE_BEA_MAC_C 0x20 #define TDE_BEA_DELB_C 0x10 #define TDE_BEA_DECRYPT_C_MASK 0x0c #define TDE_BEA_DECRYPT_C_EXT 0x0c #define TDE_BEA_DECRYPT_C_HARD 0x08 #define TDE_BEA_DECRYPT_C_SOFT 0x04 #define TDE_BEA_DECRYPT_C_NO_CAP 0x00 #define TDE_BEA_ENCRYPT_C_MASK 0x03 #define TDE_BEA_ENCRYPT_C_EXT 0x03 #define TDE_BEA_ENCRYPT_C_HARD 0x02 #define TDE_BEA_ENCRYPT_C_SOFT 0x01 #define TDE_BEA_ENCRYPT_C_NO_CAP 0x00 uint8_t byte5; #define TDE_BEA_AVFCLP_MASK 0xc0 #define TDE_BEA_AVFCLP_VALID 0x80 #define TDE_BEA_AVFCLP_NOT_VALID 0x40 #define TDE_BEA_AVFCLP_NOT_APP 0x00 #define TDE_BEA_NONCE_C_MASK 0x30 #define TDE_BEA_NONCE_C_SUPPORTED 0x30 #define TDE_BEA_NONCE_C_PROVIDED 0x20 #define TDE_BEA_NONCE_C_GENERATED 0x10 #define TDE_BEA_NONCE_C_NOT_REQUIRED 0x00 #define TDE_BEA_KADF_C 0x08 #define TDE_BEA_VCELB_C 0x04 #define TDE_BEA_UKADF 0x02 #define TDE_BEA_AKADF 0x01 uint8_t max_unauth_key_bytes[2]; uint8_t max_auth_key_bytes[2]; uint8_t lbe_key_size[2]; uint8_t byte12; #define TDE_BEA_DKAD_C_MASK 0xc0 #define TDE_BEA_DKAD_C_CAPABLE 0xc0 #define TDE_BEA_DKAD_C_NOT_ALLOWED 0x80 #define TDE_BEA_DKAD_C_REQUIRED 0x40 #define TDE_BEA_EEMC_C_MASK 0x30 #define TDE_BEA_EEMC_C_ALLOWED 0x20 #define TDE_BEA_EEMC_C_NOT_ALLOWED 0x10 #define TDE_BEA_EEMC_C_NOT_SPECIFIED 0x00 /* * Raw Decryption Mode Control Capabilities (RDMC_C) field. The * descriptions are too complex to represent as a simple name. */ #define TDE_BEA_RDMC_C_MASK 0x0e #define TDE_BEA_RDMC_C_MODE_7 0x0e #define TDE_BEA_RDMC_C_MODE_6 0x0c #define TDE_BEA_RDMC_C_MODE_5 0x0a #define TDE_BEA_RDMC_C_MODE_4 0x08 #define TDE_BEA_RDMC_C_MODE_1 0x02 #define TDE_BEA_EAREM 0x01 uint8_t byte13; #define TDE_BEA_MAX_EEDKS_MASK 0x0f uint8_t msdk_count[2]; uint8_t max_eedk_size[2]; uint8_t reserved2[2]; uint8_t security_algo_code[4]; }; /* * Data Encryption Capabilities page (0x0010). */ struct tde_data_enc_cap_page { uint8_t page_code[2]; uint8_t page_length; uint8_t byte4; #define DATA_ENC_CAP_EXTDECC_MASK 0x0c #define DATA_ENC_CAP_EXTDECC_NOT_REPORTED 0x00 #define DATA_ENC_CAP_EXTDECC_NOT_CAPABLE 0x04 #define DATA_ENC_CAP_EXTDECC_CAPABLE 0x08 #define DATA_ENC_CAP_CFG_P_MASK 0x03 #define DATA_ENC_CAP_CFG_P_NOT_REPORTED 0x00 #define DATA_ENC_CAP_CFG_P_ALLOWED 0x01 #define DATA_ENC_CAP_CFG_P_NOT_ALLOWED 0x02 uint8_t reserved[15]; struct tde_block_enc_alg_desc alg_descs[]; }; /* * Tape Data Encryption Supported Key Formats page (0x0011). */ struct tde_supported_key_formats_page { uint8_t page_code[2]; uint8_t page_length[2]; uint8_t key_formats_list[]; }; /* * Tape Data Encryption Management Capabilities page (0x0012). */ struct tde_data_enc_man_cap_page { uint8_t page_code[2]; uint8_t page_length[2]; uint8_t byte4; #define TDE_DEMC_LOCK_C 0x01 uint8_t byte5; #define TDE_DEMC_CKOD_C 0x04 #define TDE_DEMC_CKORP_C 0x02 #define TDE_DEMC_CKORL_C 0x01 uint8_t reserved1; uint8_t byte7; #define TDE_DEMC_AITN_C 0x04 #define TDE_DEMC_LOCAL_C 0x02 #define TDE_DEMC_PUBLIC_C 0x01 uint8_t reserved2[8]; }; /* * Tape Data Encryption Status Page (0x0020). */ struct tde_data_enc_status_page { uint8_t page_code[2]; uint8_t page_length[2]; uint8_t scope; #define TDE_DES_IT_NEXUS_SCOPE_MASK 0xe0 #define TDE_DES_LBE_SCOPE_MASK 0x07 uint8_t encryption_mode; uint8_t decryption_mode; uint8_t algo_index; uint8_t key_instance_counter[4]; uint8_t byte12; #define TDE_DES_PARAM_CTRL_MASK 0x70 #define TDE_DES_PARAM_CTRL_MGMT 0x40 #define TDE_DES_PARAM_CTRL_CHANGER 0x30 #define TDE_DES_PARAM_CTRL_DRIVE 0x20 #define TDE_DES_PARAM_CTRL_EXT 0x10 #define TDE_DES_PARAM_CTRL_NOT_REPORTED 0x00 #define TDE_DES_VCELB 0x08 #define TDE_DES_CEEMS_MASK 0x06 #define TDE_DES_RDMD 0x01 uint8_t enc_params_kad_format; uint8_t asdk_count[2]; uint8_t reserved[8]; uint8_t key_assoc_data_desc[]; }; /* * Tape Data Encryption Next Block Encryption Status page (0x0021). */ struct tde_next_block_enc_status_page { uint8_t page_code[2]; uint8_t page_length[2]; uint8_t logical_obj_number[8]; uint8_t status; #define TDE_NBES_COMP_STATUS_MASK 0xf0 #define TDE_NBES_COMP_INCAPABLE 0x00 #define TDE_NBES_COMP_NOT_YET 0x10 #define TDE_NBES_COMP_NOT_A_BLOCK 0x20 #define TDE_NBES_COMP_NOT_COMPRESSED 0x30 #define TDE_NBES_COMP_COMPRESSED 0x40 #define TDE_NBES_ENC_STATUS_MASK 0x0f #define TDE_NBES_ENC_INCAPABLE 0x00 #define TDE_NBES_ENC_NOT_YET 0x01 #define TDE_NBES_ENC_NOT_A_BLOCK 0x02 #define TDE_NBES_ENC_NOT_ENCRYPTED 0x03 #define TDE_NBES_ENC_ALG_NOT_SUPPORTED 0x04 #define TDE_NBES_ENC_SUPPORTED_ALG 0x05 #define TDE_NBES_ENC_NO_KEY 0x06 uint8_t algo_index; uint8_t byte14; #define TDE_NBES_EMES 0x02 #define TDE_NBES_RDMDS 0x01 uint8_t next_block_kad_format; uint8_t key_assoc_data_desc[]; }; /* * Tape Data Encryption Get Encryption Management Attributes page (0x0022). */ struct tde_get_enc_man_attr_page { uint8_t page_code[2]; uint8_t reserved[3]; uint8_t byte5; #define TDE_GEMA_CAOD 0x01 uint8_t page_length[2]; uint8_t enc_mgmt_attr_desc[]; }; /* * Tape Data Encryption Random Number page (0x0030). */ struct tde_random_num_page { uint8_t page_code[2]; uint8_t page_length[2]; uint8_t random_number[32]; }; /* * Tape Data Encryption Device Server Key Wrapping Public Key page (0x0031). */ struct tde_key_wrap_pk_page { uint8_t page_code[2]; uint8_t page_length[2]; uint8_t public_key_type[4]; uint8_t public_key_format[4]; uint8_t public_key_length[2]; uint8_t public_key[]; }; /* * Security Protocol Specific values for the Tape Data Encryption protocol * (0x20) used with SECURITY PROTOCOL OUT. See above for values used with * SECURITY PROTOCOL IN. Current as of SSCr03. */ #define TDE_SET_DATA_ENC_PAGE 0x0010 #define TDE_SA_ENCAP_PAGE 0x0011 #define TDE_SET_ENC_MGMT_ATTR_PAGE 0x0022 /* * Tape Data Encryption Set Data Encryption page (0x0010). */ struct tde_set_data_enc_page { uint8_t page_code[2]; uint8_t page_length[2]; uint8_t byte4; #define TDE_SDE_SCOPE_MASK 0xe0 #define TDE_SDE_SCOPE_ALL_IT_NEXUS 0x80 #define TDE_SDE_SCOPE_LOCAL 0x40 #define TDE_SDE_SCOPE_PUBLIC 0x00 #define TDE_SDE_LOCK 0x01 uint8_t byte5; #define TDE_SDE_CEEM_MASK 0xc0 #define TDE_SDE_CEEM_ENCRYPT 0xc0 #define TDE_SDE_CEEM_EXTERNAL 0x80 #define TDE_SDE_CEEM_NO_CHECK 0x40 #define TDE_SDE_RDMC_MASK 0x30 #define TDE_SDE_RDMC_DISABLED 0x30 #define TDE_SDE_RDMC_ENABLED 0x20 #define TDE_SDE_RDMC_DEFAULT 0x00 #define TDE_SDE_SDK 0x08 #define TDE_SDE_CKOD 0x04 #define TDE_SDE_CKORP 0x02 #define TDE_SDE_CKORL 0x01 uint8_t encryption_mode; #define TDE_SDE_ENC_MODE_DISABLE 0x00 #define TDE_SDE_ENC_MODE_EXTERNAL 0x01 #define TDE_SDE_ENC_MODE_ENCRYPT 0x02 uint8_t decryption_mode; #define TDE_SDE_DEC_MODE_DISABLE 0x00 #define TDE_SDE_DEC_MODE_RAW 0x01 #define TDE_SDE_DEC_MODE_DECRYPT 0x02 #define TDE_SDE_DEC_MODE_MIXED 0x03 uint8_t algo_index; uint8_t lbe_key_format; #define TDE_SDE_KEY_PLAINTEXT 0x00 #define TDE_SDE_KEY_VENDOR_SPEC 0x01 #define TDE_SDE_KEY_PUBLIC_WRAP 0x02 #define TDE_SDE_KEY_ESP_SCSI 0x03 uint8_t kad_format; #define TDE_SDE_KAD_ASCII 0x02 #define TDE_SDE_KAD_BINARY 0x01 #define TDE_SDE_KAD_UNSPECIFIED 0x00 uint8_t reserved[7]; uint8_t lbe_key_length[2]; uint8_t lbe_key[]; }; /* * Used for the Vendor Specific key format (0x01). */ struct tde_key_format_vendor { uint8_t t10_vendor_id[8]; uint8_t vendor_key[]; }; /* * Used for the public key wrapped format (0x02). */ struct tde_key_format_public_wrap { uint8_t parameter_set[2]; #define TDE_PARAM_SET_RSA2048 0x0000 #define TDE_PARAM_SET_ECC521 0x0010 uint8_t label_length[2]; uint8_t label[]; }; /* * Tape Data Encryption SA Encapsulation page (0x0011). */ struct tde_sa_encap_page { uint8_t page_code[2]; uint8_t data_desc[]; }; /* * Tape Data Encryption Set Encryption Management Attributes page (0x0022). */ struct tde_set_enc_mgmt_attr_page { uint8_t page_code[2]; uint8_t reserved[3]; uint8_t byte5; #define TDE_SEMA_CAOD 0x01 uint8_t page_length[2]; uint8_t attr_desc[]; }; /* * Tape Data Encryption descriptor format. * SSC4r03 Section 8.5.4.2.1 Table 197 */ struct tde_data_enc_desc { uint8_t key_desc_type; #define TDE_KEY_DESC_WK_KAD 0x04 #define TDE_KEY_DESC_M_KAD 0x03 #define TDE_KEY_DESC_NONCE_VALUE 0x02 #define TDE_KEY_DESC_A_KAD 0x01 #define TDE_KEY_DESC_U_KAD 0x00 uint8_t byte2; #define TDE_KEY_DESC_AUTH_MASK 0x07 #define TDE_KEY_DESC_AUTH_FAILED 0x04 #define TDE_KEY_DESC_AUTH_SUCCESS 0x03 #define TDE_KEY_DESC_AUTH_NO_ATTEMPT 0x02 #define TDE_KEY_DESC_AUTH_U_KAD 0x01 uint8_t key_desc_length[2]; uint8_t key_desc[]; }; /* * Wrapped Key descriptor format. * SSC4r03 Section 8.5.4.3.1 Table 200 */ struct tde_wrapped_key_desc { uint8_t wrapped_key_type; #define TDE_WRAP_KEY_DESC_LENGTH 0x04 #define TDE_WRAP_KEY_DESC_IDENT 0x03 #define TDE_WRAP_KEY_DESC_INFO 0x02 #define TDE_WRAP_KEY_DESC_ENTITY_ID 0x01 #define TDE_WRAP_KEY_DESC_DEVICE_ID 0x00 uint8_t reserved; uint8_t wrapped_desc_length[2]; uint8_t wrapped_desc[]; }; /* * Encryption management attributes descriptor format. * SSC4r03 Section 8.5.4.4.1 Table 202 */ struct tde_enc_mgmt_attr_desc { uint8_t enc_mgmt_attr_type[2]; #define TDE_EMAD_DESIRED_KEY_MGR_OP 0x0000 #define TDE_EMAD_LOG_BLOCK_ENC_KEY_CRIT 0x0001 #define TDE_EMAD_LOG_BLOCK_ENC_KEY_WRAP 0x0002 uint8_t reserved; uint8_t byte2; #define TDE_EMAD_CRIT 0x80 uint8_t attr_length[2]; uint8_t attributes[]; #define TDE_EMAD_DESIRED_KEY_CREATE 0x0001 #define TDE_EMAD_DESIRED_KEY_RESOLVE 0x0002 }; /* * Logical block encryption key selection criteria descriptor format. * SSC4r03 Section 8.5.4.4.3.1 Table 206 */ struct tde_lb_enc_key_sel_desc { uint8_t lbe_key_sel_crit_type[2]; /* * The CRIT bit is the top bit of the first byte of the type. */ #define TDE_LBE_KEY_SEL_CRIT 0x80 #define TDE_LBE_KEY_SEL_ALGO 0x0001 #define TDE_LBE_KEY_SEL_ID 0x0002 uint8_t lbe_key_sel_crit_length[2]; uint8_t lbe_key_sel_crit[]; }; /* * Logical block encryption key wrapping attribute descriptor format. * SSC4r03 Section 8.5.4.4.4.1 Table 209 */ struct tde_lb_enc_key_wrap_desc { uint8_t lbe_key_wrap_type[2]; /* * The CRIT bit is the top bit of the first byte of the type. */ #define TDE_LBE_KEY_WRAP_CRIT 0x80 #define TDE_LBE_KEY_WRAP_KEKS 0x0001 uint8_t lbe_key_wrap_length[2]; uint8_t lbe_key_wrap_attr[]; }; /* * Opcodes */ #define REWIND 0x01 #define FORMAT_MEDIUM 0x04 #define READ_BLOCK_LIMITS 0x05 #define SA_READ 0x08 #define SA_WRITE 0x0A #define SET_CAPACITY 0x0B #define WRITE_FILEMARKS 0x10 #define SPACE 0x11 #define RESERVE_UNIT 0x16 #define RELEASE_UNIT 0x17 #define ERASE 0x19 #define LOAD_UNLOAD 0x1B #define LOCATE 0x2B #define READ_POSITION 0x34 #define REPORT_DENSITY_SUPPORT 0x44 #define ALLOW_OVERWRITE 0x82 #define LOCATE_16 0x92 /* * Tape specific density codes- only enough of them here to recognize * some specific older units so we can choose 2FM@EOD or FIXED blocksize * quirks. */ #define SCSI_DENSITY_HALFINCH_800 0x01 #define SCSI_DENSITY_HALFINCH_1600 0x02 #define SCSI_DENSITY_HALFINCH_6250 0x03 #define SCSI_DENSITY_HALFINCH_6250C 0xC3 /* HP Compressed 6250 */ #define SCSI_DENSITY_QIC_11_4TRK 0x04 #define SCSI_DENSITY_QIC_11_9TRK 0x84 /* Vendor Unique Emulex */ #define SCSI_DENSITY_QIC_24 0x05 #define SCSI_DENSITY_HALFINCH_PE 0x06 #define SCSI_DENSITY_QIC_120 0x0f #define SCSI_DENSITY_QIC_150 0x10 #define SCSI_DENSITY_QIC_525_320 0x11 #define SCSI_DENSITY_QIC_1320 0x12 #define SCSI_DENSITY_QIC_2GB 0x22 #define SCSI_DENSITY_QIC_4GB 0x26 #define SCSI_DENSITY_QIC_3080 0x29 __BEGIN_DECLS void scsi_read_block_limits(struct ccb_scsiio *, u_int32_t, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t, struct scsi_read_block_limits_data *, u_int8_t , u_int32_t); void scsi_sa_read_write(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int readop, int sli, int fixed, u_int32_t length, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_rewind(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, u_int8_t sense_len, u_int32_t timeout); void scsi_space(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, scsi_space_code code, u_int32_t count, u_int8_t sense_len, u_int32_t timeout); void scsi_load_unload(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int eot, int reten, int load, u_int8_t sense_len, u_int32_t timeout); void scsi_write_filemarks(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int setmark, u_int32_t num_marks, u_int8_t sense_len, u_int32_t timeout); void scsi_reserve_release_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int third_party, int third_party_id, u_int8_t sense_len, u_int32_t timeout, int reserve); void scsi_erase(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int long_erase, u_int8_t sense_len, u_int32_t timeout); void scsi_data_comp_page(struct scsi_data_compression_page *page, u_int8_t dce, u_int8_t dde, u_int8_t red, u_int32_t comp_algorithm, u_int32_t decomp_algorithm); void scsi_read_position(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int hardsoft, struct scsi_tape_position_data *sbp, u_int8_t sense_len, u_int32_t timeout); void scsi_read_position_10(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int service_action, u_int8_t *data_ptr, u_int32_t length, u_int32_t sense_len, u_int32_t timeout); void scsi_set_position(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int hardsoft, u_int32_t blkno, u_int8_t sense_len, u_int32_t timeout); void scsi_locate_10(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immed, int cp, int hard, int64_t partition, u_int32_t block_address, int sense_len, u_int32_t timeout); void scsi_locate_16(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immed, int cp, u_int8_t dest_type, int bam, int64_t partition, u_int64_t logical_id, int sense_len, u_int32_t timeout); void scsi_report_density_support(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int media, int medium_type, u_int8_t *data_ptr, u_int32_t length, u_int32_t sense_len, u_int32_t timeout); void scsi_set_capacity(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int byte1, u_int32_t proportion, u_int32_t sense_len, u_int32_t timeout); void scsi_format_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int byte1, int byte2, u_int8_t *data_ptr, u_int32_t length, u_int32_t sense_len, u_int32_t timeout); void scsi_allow_overwrite(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int allow_overwrite, int partition, u_int64_t logical_id, u_int32_t sense_len, u_int32_t timeout); __END_DECLS #endif /* _SCSI_SCSI_SA_H */ Index: head/sys/cam/scsi/scsi_sg.c =================================================================== --- head/sys/cam/scsi/scsi_sg.c (revision 326264) +++ head/sys/cam/scsi/scsi_sg.c (revision 326265) @@ -1,1018 +1,1020 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2007 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * scsi_sg peripheral driver. This driver is meant to implement the Linux * SG passthrough interface for SCSI. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { SG_FLAG_LOCKED = 0x01, SG_FLAG_INVALID = 0x02 } sg_flags; typedef enum { SG_STATE_NORMAL } sg_state; typedef enum { SG_RDWR_FREE, SG_RDWR_INPROG, SG_RDWR_DONE } sg_rdwr_state; typedef enum { SG_CCB_RDWR_IO } sg_ccb_types; #define ccb_type ppriv_field0 #define ccb_rdwr ppriv_ptr1 struct sg_rdwr { TAILQ_ENTRY(sg_rdwr) rdwr_link; int tag; int state; int buf_len; char *buf; union ccb *ccb; union { struct sg_header hdr; struct sg_io_hdr io_hdr; } hdr; }; struct sg_softc { sg_state state; sg_flags flags; int open_count; u_int maxio; struct devstat *device_stats; TAILQ_HEAD(, sg_rdwr) rdwr_done; struct cdev *dev; int sg_timeout; int sg_user_timeout; uint8_t pd_type; union ccb saved_ccb; }; static d_open_t sgopen; static d_close_t sgclose; static d_ioctl_t sgioctl; static d_write_t sgwrite; static d_read_t sgread; static periph_init_t sginit; static periph_ctor_t sgregister; static periph_oninv_t sgoninvalidate; static periph_dtor_t sgcleanup; static void sgasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static void sgdone(struct cam_periph *periph, union ccb *done_ccb); static int sgsendccb(struct cam_periph *periph, union ccb *ccb); static int sgsendrdwr(struct cam_periph *periph, union ccb *ccb); static int sgerror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags); static void sg_scsiio_status(struct ccb_scsiio *csio, u_short *hoststat, u_short *drvstat); static int scsi_group_len(u_char cmd); static struct periph_driver sgdriver = { sginit, "sg", TAILQ_HEAD_INITIALIZER(sgdriver.units), /* gen */ 0 }; PERIPHDRIVER_DECLARE(sg, sgdriver); static struct cdevsw sg_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT | D_TRACKCLOSE, .d_open = sgopen, .d_close = sgclose, .d_ioctl = sgioctl, .d_write = sgwrite, .d_read = sgread, .d_name = "sg", }; static int sg_version = 30125; static void sginit(void) { cam_status status; /* * Install a global async callback. This callback will receive aync * callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, sgasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("sg: Failed to attach master async callbac " "due to status 0x%x!\n", status); } } static void sgdevgonecb(void *arg) { struct cam_periph *periph; struct sg_softc *softc; struct mtx *mtx; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct sg_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for the device node, it is gone now. */ cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void sgoninvalidate(struct cam_periph *periph) { struct sg_softc *softc; softc = (struct sg_softc *)periph->softc; /* * Deregister any async callbacks. */ xpt_register_async(0, sgasync, periph, periph->path); softc->flags |= SG_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, sgdevgonecb, periph); /* * XXX Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ } static void sgcleanup(struct cam_periph *periph) { struct sg_softc *softc; softc = (struct sg_softc *)periph->softc; devstat_remove_entry(softc->device_stats); free(softc, M_DEVBUF); } static void sgasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; /* * Allocate a peripheral instance for this device and * start the probe process. */ status = cam_periph_alloc(sgregister, sgoninvalidate, sgcleanup, NULL, "sg", CAM_PERIPH_BIO, path, sgasync, AC_FOUND_DEVICE, cgd); if ((status != CAM_REQ_CMP) && (status != CAM_REQ_INPROG)) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("sgasync: Unable to attach new device " "due to status %#x: %s\n", status, entry ? entry->status_text : "Unknown"); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status sgregister(struct cam_periph *periph, void *arg) { struct sg_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; int no_tags, error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("sgregister: no getdev CCB, can't register device\n"); return (CAM_REQ_CMP_ERR); } softc = malloc(sizeof(*softc), M_DEVBUF, M_ZERO | M_NOWAIT); if (softc == NULL) { printf("sgregister: Unable to allocate softc\n"); return (CAM_REQ_CMP_ERR); } softc->state = SG_STATE_NORMAL; softc->pd_type = SID_TYPE(&cgd->inq_data); softc->sg_timeout = SG_DEFAULT_TIMEOUT / SG_DEFAULT_HZ * hz; softc->sg_user_timeout = SG_DEFAULT_TIMEOUT; TAILQ_INIT(&softc->rdwr_done); periph->softc = softc; bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ /* * We pass in 0 for all blocksize, since we don't know what the * blocksize of the device is, if it even has a blocksize. */ cam_periph_unlock(periph); no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0; softc->device_stats = devstat_new_entry("sg", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0), softc->pd_type | XPORT_DEVSTAT_TYPE(cpi.transport) | DEVSTAT_TYPE_PASS, DEVSTAT_PRIORITY_PASS); /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* Register the device */ make_dev_args_init(&args); args.mda_devsw = &sg_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); cam_periph_release_locked(periph); return (CAM_REQ_CMP_ERR); } if (periph->unit_number < 26) { (void)make_dev_alias(softc->dev, "sg%c", periph->unit_number + 'a'); } else { (void)make_dev_alias(softc->dev, "sg%c%c", ((periph->unit_number / 26) - 1) + 'a', (periph->unit_number % 26) + 'a'); } cam_periph_lock(periph); /* * Add as async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_LOST_DEVICE, sgasync, periph, periph->path); if (bootverbose) xpt_announce_periph(periph, NULL); return (CAM_REQ_CMP); } static void sgdone(struct cam_periph *periph, union ccb *done_ccb) { struct sg_softc *softc; struct ccb_scsiio *csio; softc = (struct sg_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_type) { case SG_CCB_RDWR_IO: { struct sg_rdwr *rdwr; int state; devstat_end_transaction(softc->device_stats, csio->dxfer_len, csio->tag_action & 0xf, ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (csio->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, NULL); rdwr = done_ccb->ccb_h.ccb_rdwr; state = rdwr->state; rdwr->state = SG_RDWR_DONE; wakeup(rdwr); break; } default: panic("unknown sg CCB type"); } } static int sgopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct sg_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); /* * Don't allow access when we're running at a high securelevel. */ error = securelevel_gt(td->td_ucred, 1); if (error) { cam_periph_release(periph); return (error); } cam_periph_lock(periph); softc = (struct sg_softc *)periph->softc; if (softc->flags & SG_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return (ENXIO); } softc->open_count++; cam_periph_unlock(periph); return (error); } static int sgclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct sg_softc *softc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = periph->softc; softc->open_count--; cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } static int sgioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { union ccb *ccb; struct ccb_scsiio *csio; struct cam_periph *periph; struct sg_softc *softc; struct sg_io_hdr *req; int dir, error; periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); softc = (struct sg_softc *)periph->softc; error = 0; switch (cmd) { case SG_GET_VERSION_NUM: { int *version = (int *)arg; *version = sg_version; break; } case SG_SET_TIMEOUT: { u_int user_timeout = *(u_int *)arg; softc->sg_user_timeout = user_timeout; softc->sg_timeout = user_timeout / SG_DEFAULT_HZ * hz; break; } case SG_GET_TIMEOUT: /* * The value is returned directly to the syscall. */ td->td_retval[0] = softc->sg_user_timeout; error = 0; break; case SG_IO: req = (struct sg_io_hdr *)arg; if (req->cmd_len > IOCDBLEN) { error = EINVAL; break; } if (req->iovec_count != 0) { error = EOPNOTSUPP; break; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; error = copyin(req->cmdp, &csio->cdb_io.cdb_bytes, req->cmd_len); if (error) { xpt_release_ccb(ccb); break; } switch(req->dxfer_direction) { case SG_DXFER_TO_DEV: dir = CAM_DIR_OUT; break; case SG_DXFER_FROM_DEV: dir = CAM_DIR_IN; break; case SG_DXFER_TO_FROM_DEV: dir = CAM_DIR_IN | CAM_DIR_OUT; break; case SG_DXFER_NONE: default: dir = CAM_DIR_NONE; break; } cam_fill_csio(csio, /*retries*/1, sgdone, dir|CAM_DEV_QFRZDIS, MSG_SIMPLE_Q_TAG, req->dxferp, req->dxfer_len, req->mx_sb_len, req->cmd_len, req->timeout); error = sgsendccb(periph, ccb); if (error) { req->host_status = DID_ERROR; req->driver_status = DRIVER_INVALID; xpt_release_ccb(ccb); break; } req->status = csio->scsi_status; req->masked_status = (csio->scsi_status >> 1) & 0x7f; sg_scsiio_status(csio, &req->host_status, &req->driver_status); req->resid = csio->resid; req->duration = csio->ccb_h.timeout; req->info = 0; if ((csio->ccb_h.status & CAM_AUTOSNS_VALID) && (req->sbp != NULL)) { req->sb_len_wr = req->mx_sb_len - csio->sense_resid; error = copyout(&csio->sense_data, req->sbp, req->sb_len_wr); } xpt_release_ccb(ccb); break; case SG_GET_RESERVED_SIZE: { int *size = (int *)arg; *size = DFLTPHYS; break; } case SG_GET_SCSI_ID: { struct sg_scsi_id *id = (struct sg_scsi_id *)arg; id->host_no = cam_sim_path(xpt_path_sim(periph->path)); id->channel = xpt_path_path_id(periph->path); id->scsi_id = xpt_path_target_id(periph->path); id->lun = xpt_path_lun_id(periph->path); id->scsi_type = softc->pd_type; id->h_cmd_per_lun = 1; id->d_queue_depth = 1; id->unused[0] = 0; id->unused[1] = 0; break; } case SG_GET_SG_TABLESIZE: { int *size = (int *)arg; *size = 0; break; } case SG_EMULATED_HOST: case SG_SET_TRANSFORM: case SG_GET_TRANSFORM: case SG_GET_NUM_WAITING: case SG_SCSI_RESET: case SG_GET_REQUEST_TABLE: case SG_SET_KEEP_ORPHAN: case SG_GET_KEEP_ORPHAN: case SG_GET_ACCESS_COUNT: case SG_SET_FORCE_LOW_DMA: case SG_GET_LOW_DMA: case SG_SET_FORCE_PACK_ID: case SG_GET_PACK_ID: case SG_SET_RESERVED_SIZE: case SG_GET_COMMAND_Q: case SG_SET_COMMAND_Q: case SG_SET_DEBUG: case SG_NEXT_CMD_LEN: default: #ifdef CAMDEBUG printf("sgioctl: rejecting cmd 0x%lx\n", cmd); #endif error = ENODEV; break; } cam_periph_unlock(periph); return (error); } static int sgwrite(struct cdev *dev, struct uio *uio, int ioflag) { union ccb *ccb; struct cam_periph *periph; struct ccb_scsiio *csio; struct sg_softc *sc; struct sg_header *hdr; struct sg_rdwr *rdwr; u_char cdb_cmd; char *buf; int error = 0, cdb_len, buf_len, dir; periph = dev->si_drv1; rdwr = malloc(sizeof(*rdwr), M_DEVBUF, M_WAITOK | M_ZERO); hdr = &rdwr->hdr.hdr; /* Copy in the header block and sanity check it */ if (uio->uio_resid < sizeof(*hdr)) { error = EINVAL; goto out_hdr; } error = uiomove(hdr, sizeof(*hdr), uio); if (error) goto out_hdr; /* XXX: We don't support SG 3.x read/write API. */ if (hdr->reply_len < 0) { error = ENODEV; goto out_hdr; } ccb = xpt_alloc_ccb(); if (ccb == NULL) { error = ENOMEM; goto out_hdr; } csio = &ccb->csio; /* * Copy in the CDB block. The designers of the interface didn't * bother to provide a size for this in the header, so we have to * figure it out ourselves. */ if (uio->uio_resid < 1) goto out_ccb; error = uiomove(&cdb_cmd, 1, uio); if (error) goto out_ccb; if (hdr->twelve_byte) cdb_len = 12; else cdb_len = scsi_group_len(cdb_cmd); /* * We've already read the first byte of the CDB and advanced the uio * pointer. Just read the rest. */ csio->cdb_io.cdb_bytes[0] = cdb_cmd; error = uiomove(&csio->cdb_io.cdb_bytes[1], cdb_len - 1, uio); if (error) goto out_ccb; /* * Now set up the data block. Again, the designers didn't bother * to make this reliable. */ buf_len = uio->uio_resid; if (buf_len != 0) { buf = malloc(buf_len, M_DEVBUF, M_WAITOK | M_ZERO); error = uiomove(buf, buf_len, uio); if (error) goto out_buf; dir = CAM_DIR_OUT; } else if (hdr->reply_len != 0) { buf = malloc(hdr->reply_len, M_DEVBUF, M_WAITOK | M_ZERO); buf_len = hdr->reply_len; dir = CAM_DIR_IN; } else { buf = NULL; buf_len = 0; dir = CAM_DIR_NONE; } cam_periph_lock(periph); sc = periph->softc; xpt_setup_ccb(&ccb->ccb_h, periph->path, CAM_PRIORITY_NORMAL); cam_fill_csio(csio, /*retries*/1, sgdone, dir|CAM_DEV_QFRZDIS, MSG_SIMPLE_Q_TAG, buf, buf_len, SG_MAX_SENSE, cdb_len, sc->sg_timeout); /* * Send off the command and hope that it works. This path does not * go through sgstart because the I/O is supposed to be asynchronous. */ rdwr->buf = buf; rdwr->buf_len = buf_len; rdwr->tag = hdr->pack_id; rdwr->ccb = ccb; rdwr->state = SG_RDWR_INPROG; ccb->ccb_h.ccb_rdwr = rdwr; ccb->ccb_h.ccb_type = SG_CCB_RDWR_IO; TAILQ_INSERT_TAIL(&sc->rdwr_done, rdwr, rdwr_link); error = sgsendrdwr(periph, ccb); cam_periph_unlock(periph); return (error); out_buf: free(buf, M_DEVBUF); out_ccb: xpt_free_ccb(ccb); out_hdr: free(rdwr, M_DEVBUF); return (error); } static int sgread(struct cdev *dev, struct uio *uio, int ioflag) { struct ccb_scsiio *csio; struct cam_periph *periph; struct sg_softc *sc; struct sg_header *hdr; struct sg_rdwr *rdwr; u_short hstat, dstat; int error, pack_len, reply_len, pack_id; periph = dev->si_drv1; /* XXX The pack len field needs to be updated and written out instead * of discarded. Not sure how to do that. */ uio->uio_rw = UIO_WRITE; if ((error = uiomove(&pack_len, 4, uio)) != 0) return (error); if ((error = uiomove(&reply_len, 4, uio)) != 0) return (error); if ((error = uiomove(&pack_id, 4, uio)) != 0) return (error); uio->uio_rw = UIO_READ; cam_periph_lock(periph); sc = periph->softc; search: TAILQ_FOREACH(rdwr, &sc->rdwr_done, rdwr_link) { if (rdwr->tag == pack_id) break; } if ((rdwr == NULL) || (rdwr->state != SG_RDWR_DONE)) { if (cam_periph_sleep(periph, rdwr, PCATCH, "sgread", 0) == ERESTART) return (EAGAIN); goto search; } TAILQ_REMOVE(&sc->rdwr_done, rdwr, rdwr_link); cam_periph_unlock(periph); hdr = &rdwr->hdr.hdr; csio = &rdwr->ccb->csio; sg_scsiio_status(csio, &hstat, &dstat); hdr->host_status = hstat; hdr->driver_status = dstat; hdr->target_status = csio->scsi_status >> 1; switch (hstat) { case DID_OK: case DID_PASSTHROUGH: case DID_SOFT_ERROR: hdr->result = 0; break; case DID_NO_CONNECT: case DID_BUS_BUSY: case DID_TIME_OUT: hdr->result = EBUSY; break; case DID_BAD_TARGET: case DID_ABORT: case DID_PARITY: case DID_RESET: case DID_BAD_INTR: case DID_ERROR: default: hdr->result = EIO; break; } if (dstat == DRIVER_SENSE) { bcopy(&csio->sense_data, hdr->sense_buffer, min(csio->sense_len, SG_MAX_SENSE)); #ifdef CAMDEBUG scsi_sense_print(csio); #endif } error = uiomove(&hdr->result, sizeof(*hdr) - offsetof(struct sg_header, result), uio); if ((error == 0) && (hdr->result == 0)) error = uiomove(rdwr->buf, rdwr->buf_len, uio); cam_periph_lock(periph); xpt_free_ccb(rdwr->ccb); cam_periph_unlock(periph); free(rdwr->buf, M_DEVBUF); free(rdwr, M_DEVBUF); return (error); } static int sgsendccb(struct cam_periph *periph, union ccb *ccb) { struct sg_softc *softc; struct cam_periph_map_info mapinfo; int error; softc = periph->softc; bzero(&mapinfo, sizeof(mapinfo)); /* * cam_periph_mapmem calls into proc and vm functions that can * sleep as well as trigger I/O, so we can't hold the lock. * Dropping it here is reasonably safe. * The only CCB opcode that is possible here is XPT_SCSI_IO, no * need for additional checks. */ cam_periph_unlock(periph); error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio); cam_periph_lock(periph); if (error) return (error); error = cam_periph_runccb(ccb, sgerror, CAM_RETRY_SELTO, SF_RETRY_UA, softc->device_stats); cam_periph_unmapmem(ccb, &mapinfo); return (error); } static int sgsendrdwr(struct cam_periph *periph, union ccb *ccb) { struct sg_softc *softc; softc = periph->softc; devstat_start_transaction(softc->device_stats, NULL); xpt_action(ccb); return (0); } static int sgerror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags) { struct cam_periph *periph; struct sg_softc *softc; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct sg_softc *)periph->softc; return (cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static void sg_scsiio_status(struct ccb_scsiio *csio, u_short *hoststat, u_short *drvstat) { int status; status = csio->ccb_h.status; switch (status & CAM_STATUS_MASK) { case CAM_REQ_CMP: *hoststat = DID_OK; *drvstat = 0; break; case CAM_REQ_CMP_ERR: *hoststat = DID_ERROR; *drvstat = 0; break; case CAM_REQ_ABORTED: *hoststat = DID_ABORT; *drvstat = 0; break; case CAM_REQ_INVALID: *hoststat = DID_ERROR; *drvstat = DRIVER_INVALID; break; case CAM_DEV_NOT_THERE: *hoststat = DID_BAD_TARGET; *drvstat = 0; break; case CAM_SEL_TIMEOUT: *hoststat = DID_NO_CONNECT; *drvstat = 0; break; case CAM_CMD_TIMEOUT: *hoststat = DID_TIME_OUT; *drvstat = 0; break; case CAM_SCSI_STATUS_ERROR: *hoststat = DID_ERROR; *drvstat = 0; break; case CAM_SCSI_BUS_RESET: *hoststat = DID_RESET; *drvstat = 0; break; case CAM_UNCOR_PARITY: *hoststat = DID_PARITY; *drvstat = 0; break; case CAM_SCSI_BUSY: *hoststat = DID_BUS_BUSY; *drvstat = 0; break; default: *hoststat = DID_ERROR; *drvstat = DRIVER_ERROR; } if (status & CAM_AUTOSNS_VALID) *drvstat = DRIVER_SENSE; } static int scsi_group_len(u_char cmd) { int len[] = {6, 10, 10, 12, 12, 12, 10, 10}; int group; group = (cmd >> 5) & 0x7; return (len[group]); } Index: head/sys/cam/scsi/scsi_targ_bh.c =================================================================== --- head/sys/cam/scsi/scsi_targ_bh.c (revision 326264) +++ head/sys/cam/scsi/scsi_targ_bh.c (revision 326265) @@ -1,765 +1,767 @@ /*- * Implementation of the Target Mode 'Black Hole device' for CAM. * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1999 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_SCSIBH, "SCSI bh", "SCSI blackhole buffers"); typedef enum { TARGBH_STATE_NORMAL, TARGBH_STATE_EXCEPTION, TARGBH_STATE_TEARDOWN } targbh_state; typedef enum { TARGBH_FLAG_NONE = 0x00, TARGBH_FLAG_LUN_ENABLED = 0x01 } targbh_flags; typedef enum { TARGBH_CCB_WORKQ } targbh_ccb_types; #define MAX_ACCEPT 8 #define MAX_IMMEDIATE 16 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ /* Offsets into our private CCB area for storing accept information */ #define ccb_type ppriv_field0 #define ccb_descr ppriv_ptr1 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ #define ccb_atio ppriv_ptr1 TAILQ_HEAD(ccb_queue, ccb_hdr); struct targbh_softc { struct ccb_queue pending_queue; struct ccb_queue work_queue; struct ccb_queue unknown_atio_queue; struct devstat device_stats; targbh_state state; targbh_flags flags; u_int init_level; u_int inq_data_len; struct ccb_accept_tio *accept_tio_list; struct ccb_hdr_slist immed_notify_slist; }; struct targbh_cmd_desc { struct ccb_accept_tio* atio_link; u_int data_resid; /* How much left to transfer */ u_int data_increment;/* Amount to send before next disconnect */ void* data; /* The data. Can be from backing_store or not */ void* backing_store;/* Backing store allocated for this descriptor*/ u_int max_size; /* Size of backing_store */ u_int32_t timeout; u_int8_t status; /* Status to return to initiator */ }; static struct scsi_inquiry_data no_lun_inq_data = { T_NODEVICE | (SID_QUAL_BAD_LU << 5), 0, /* version */2, /* format version */2 }; static struct scsi_sense_data_fixed no_lun_sense_data = { SSD_CURRENT_ERROR|SSD_ERRCODE_VALID, 0, SSD_KEY_NOT_READY, { 0, 0, 0, 0 }, /*extra_len*/offsetof(struct scsi_sense_data_fixed, fru) - offsetof(struct scsi_sense_data_fixed, extra_len), { 0, 0, 0, 0 }, /* Logical Unit Not Supported */ /*ASC*/0x25, /*ASCQ*/0 }; static const int request_sense_size = offsetof(struct scsi_sense_data_fixed, fru); static periph_init_t targbhinit; static void targbhasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static cam_status targbhenlun(struct cam_periph *periph); static cam_status targbhdislun(struct cam_periph *periph); static periph_ctor_t targbhctor; static periph_dtor_t targbhdtor; static periph_start_t targbhstart; static void targbhdone(struct cam_periph *periph, union ccb *done_ccb); #ifdef NOTYET static int targbherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); #endif static struct targbh_cmd_desc* targbhallocdescr(void); static void targbhfreedescr(struct targbh_cmd_desc *buf); static struct periph_driver targbhdriver = { targbhinit, "targbh", TAILQ_HEAD_INITIALIZER(targbhdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(targbh, targbhdriver); static void targbhinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new path registered". */ status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED, targbhasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("targbh: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void targbhasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_path *new_path; struct ccb_pathinq *cpi; path_id_t bus_path_id; cam_status status; cpi = (struct ccb_pathinq *)arg; if (code == AC_PATH_REGISTERED) bus_path_id = cpi->ccb_h.path_id; else bus_path_id = xpt_path_path_id(path); /* * Allocate a peripheral instance for * this target instance. */ status = xpt_create_path(&new_path, NULL, bus_path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("targbhasync: Unable to create path " "due to status 0x%x\n", status); return; } switch (code) { case AC_PATH_REGISTERED: { /* Only attach to controllers that support target mode */ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) break; status = cam_periph_alloc(targbhctor, NULL, targbhdtor, targbhstart, "targbh", CAM_PERIPH_BIO, new_path, targbhasync, AC_PATH_REGISTERED, cpi); break; } case AC_PATH_DEREGISTERED: { struct cam_periph *periph; if ((periph = cam_periph_find(new_path, "targbh")) != NULL) cam_periph_invalidate(periph); break; } default: break; } xpt_free_path(new_path); } /* Attempt to enable our lun */ static cam_status targbhenlun(struct cam_periph *periph) { union ccb immed_ccb; struct targbh_softc *softc; cam_status status; int i; softc = (struct targbh_softc *)periph->softc; if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) != 0) return (CAM_REQ_CMP); xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); immed_ccb.ccb_h.func_code = XPT_EN_LUN; /* Don't need support for any vendor specific commands */ immed_ccb.cel.grp6_len = 0; immed_ccb.cel.grp7_len = 0; immed_ccb.cel.enable = 1; xpt_action(&immed_ccb); status = immed_ccb.ccb_h.status; if (status != CAM_REQ_CMP) { xpt_print(periph->path, "targbhenlun - Enable Lun Rejected with status 0x%x\n", status); return (status); } softc->flags |= TARGBH_FLAG_LUN_ENABLED; /* * Build up a buffer of accept target I/O * operations for incoming selections. */ for (i = 0; i < MAX_ACCEPT; i++) { struct ccb_accept_tio *atio; atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_SCSIBH, M_NOWAIT); if (atio == NULL) { status = CAM_RESRC_UNAVAIL; break; } atio->ccb_h.ccb_descr = targbhallocdescr(); if (atio->ccb_h.ccb_descr == NULL) { free(atio, M_SCSIBH); status = CAM_RESRC_UNAVAIL; break; } xpt_setup_ccb(&atio->ccb_h, periph->path, CAM_PRIORITY_NORMAL); atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; atio->ccb_h.cbfcnp = targbhdone; ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = softc->accept_tio_list; softc->accept_tio_list = atio; xpt_action((union ccb *)atio); status = atio->ccb_h.status; if (status != CAM_REQ_INPROG) break; } if (i == 0) { xpt_print(periph->path, "targbhenlun - Could not allocate accept tio CCBs: status " "= 0x%x\n", status); targbhdislun(periph); return (CAM_REQ_CMP_ERR); } /* * Build up a buffer of immediate notify CCBs * so the SIM can tell us of asynchronous target mode events. */ for (i = 0; i < MAX_ACCEPT; i++) { struct ccb_immediate_notify *inot; inot = (struct ccb_immediate_notify*)malloc(sizeof(*inot), M_SCSIBH, M_NOWAIT); if (inot == NULL) { status = CAM_RESRC_UNAVAIL; break; } xpt_setup_ccb(&inot->ccb_h, periph->path, CAM_PRIORITY_NORMAL); inot->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; inot->ccb_h.cbfcnp = targbhdone; SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, periph_links.sle); xpt_action((union ccb *)inot); status = inot->ccb_h.status; if (status != CAM_REQ_INPROG) break; } if (i == 0) { xpt_print(periph->path, "targbhenlun - Could not allocate immediate notify " "CCBs: status = 0x%x\n", status); targbhdislun(periph); return (CAM_REQ_CMP_ERR); } return (CAM_REQ_CMP); } static cam_status targbhdislun(struct cam_periph *periph) { union ccb ccb; struct targbh_softc *softc; struct ccb_accept_tio* atio; struct ccb_hdr *ccb_h; softc = (struct targbh_softc *)periph->softc; if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) == 0) return CAM_REQ_CMP; /* XXX Block for Continue I/O completion */ /* Kill off all ACCECPT and IMMEDIATE CCBs */ while ((atio = softc->accept_tio_list) != NULL) { softc->accept_tio_list = ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccb.cab.ccb_h.func_code = XPT_ABORT; ccb.cab.abort_ccb = (union ccb *)atio; xpt_action(&ccb); } while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccb.cab.ccb_h.func_code = XPT_ABORT; ccb.cab.abort_ccb = (union ccb *)ccb_h; xpt_action(&ccb); } /* * Dissable this lun. */ xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccb.cel.ccb_h.func_code = XPT_EN_LUN; ccb.cel.enable = 0; xpt_action(&ccb); if (ccb.cel.ccb_h.status != CAM_REQ_CMP) printf("targbhdislun - Disabling lun on controller failed " "with status 0x%x\n", ccb.cel.ccb_h.status); else softc->flags &= ~TARGBH_FLAG_LUN_ENABLED; return (ccb.cel.ccb_h.status); } static cam_status targbhctor(struct cam_periph *periph, void *arg) { struct targbh_softc *softc; /* Allocate our per-instance private storage */ softc = (struct targbh_softc *)malloc(sizeof(*softc), M_SCSIBH, M_NOWAIT); if (softc == NULL) { printf("targctor: unable to malloc softc\n"); return (CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); TAILQ_INIT(&softc->pending_queue); TAILQ_INIT(&softc->work_queue); softc->accept_tio_list = NULL; SLIST_INIT(&softc->immed_notify_slist); softc->state = TARGBH_STATE_NORMAL; periph->softc = softc; softc->init_level++; if (targbhenlun(periph) != CAM_REQ_CMP) cam_periph_invalidate(periph); return (CAM_REQ_CMP); } static void targbhdtor(struct cam_periph *periph) { struct targbh_softc *softc; softc = (struct targbh_softc *)periph->softc; softc->state = TARGBH_STATE_TEARDOWN; targbhdislun(periph); switch (softc->init_level) { case 0: panic("targdtor - impossible init level"); case 1: /* FALLTHROUGH */ default: /* XXX Wait for callback of targbhdislun() */ cam_periph_sleep(periph, softc, PRIBIO, "targbh", hz/2); free(softc, M_SCSIBH); break; } } static void targbhstart(struct cam_periph *periph, union ccb *start_ccb) { struct targbh_softc *softc; struct ccb_hdr *ccbh; struct ccb_accept_tio *atio; struct targbh_cmd_desc *desc; struct ccb_scsiio *csio; ccb_flags flags; softc = (struct targbh_softc *)periph->softc; ccbh = TAILQ_FIRST(&softc->work_queue); if (ccbh == NULL) { xpt_release_ccb(start_ccb); } else { TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh, periph_links.tqe); atio = (struct ccb_accept_tio*)ccbh; desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; /* Is this a tagged request? */ flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); csio = &start_ccb->csio; /* * If we are done with the transaction, tell the * controller to send status and perform a CMD_CMPLT. * If we have associated sense data, see if we can * send that too. */ if (desc->data_resid == desc->data_increment) { flags |= CAM_SEND_STATUS; if (atio->sense_len) { csio->sense_len = atio->sense_len; csio->sense_data = atio->sense_data; flags |= CAM_SEND_SENSE; } } cam_fill_ctio(csio, /*retries*/2, targbhdone, flags, (flags & CAM_TAG_ACTION_VALID)? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, desc->status, /*data_ptr*/desc->data_increment == 0 ? NULL : desc->data, /*dxfer_len*/desc->data_increment, /*timeout*/desc->timeout); /* Override our wildcard attachment */ start_ccb->ccb_h.target_id = atio->ccb_h.target_id; start_ccb->ccb_h.target_lun = atio->ccb_h.target_lun; start_ccb->ccb_h.ccb_type = TARGBH_CCB_WORKQ; start_ccb->ccb_h.ccb_atio = atio; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Sending a CTIO\n")); xpt_action(start_ccb); /* * If the queue was frozen waiting for the response * to this ATIO (for instance disconnection was disallowed), * then release it now that our response has been queued. */ if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); atio->ccb_h.status &= ~CAM_DEV_QFRZN; } ccbh = TAILQ_FIRST(&softc->work_queue); } if (ccbh != NULL) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } static void targbhdone(struct cam_periph *periph, union ccb *done_ccb) { struct targbh_softc *softc; softc = (struct targbh_softc *)periph->softc; switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { struct ccb_accept_tio *atio; struct targbh_cmd_desc *descr; u_int8_t *cdb; int priority; atio = &done_ccb->atio; descr = (struct targbh_cmd_desc*)atio->ccb_h.ccb_descr; cdb = atio->cdb_io.cdb_bytes; if (softc->state == TARGBH_STATE_TEARDOWN || atio->ccb_h.status == CAM_REQ_ABORTED) { targbhfreedescr(descr); xpt_free_ccb(done_ccb); return; } /* * Determine the type of incoming command and * setup our buffer for a response. */ switch (cdb[0]) { case INQUIRY: { struct scsi_inquiry *inq; inq = (struct scsi_inquiry *)cdb; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Saw an inquiry!\n")); /* * Validate the command. We don't * support any VPD pages, so complain * if EVPD is set. */ if ((inq->byte2 & SI_EVPD) != 0 || inq->page_code != 0) { atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_NONE; /* * This needs to have other than a * no_lun_sense_data response. */ bcopy(&no_lun_sense_data, &atio->sense_data, min(sizeof(no_lun_sense_data), sizeof(atio->sense_data))); atio->sense_len = sizeof(no_lun_sense_data); descr->data_resid = 0; descr->data_increment = 0; descr->status = SCSI_STATUS_CHECK_COND; break; } /* * Direction is always relative * to the initator. */ atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_IN; descr->data = &no_lun_inq_data; descr->data_resid = MIN(sizeof(no_lun_inq_data), scsi_2btoul(inq->length)); descr->data_increment = descr->data_resid; descr->timeout = 5 * 1000; descr->status = SCSI_STATUS_OK; break; } case REQUEST_SENSE: { struct scsi_request_sense *rsense; rsense = (struct scsi_request_sense *)cdb; /* Refer to static sense data */ atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_IN; descr->data = &no_lun_sense_data; descr->data_resid = request_sense_size; descr->data_resid = MIN(descr->data_resid, SCSI_CDB6_LEN(rsense->length)); descr->data_increment = descr->data_resid; descr->timeout = 5 * 1000; descr->status = SCSI_STATUS_OK; break; } default: /* Constant CA, tell initiator */ /* Direction is always relative to the initator */ atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_NONE; bcopy(&no_lun_sense_data, &atio->sense_data, min(sizeof(no_lun_sense_data), sizeof(atio->sense_data))); atio->sense_len = sizeof (no_lun_sense_data); descr->data_resid = 0; descr->data_increment = 0; descr->timeout = 5 * 1000; descr->status = SCSI_STATUS_CHECK_COND; break; } /* Queue us up to receive a Continue Target I/O ccb. */ if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) { TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.tqe); priority = 0; } else { TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, periph_links.tqe); priority = CAM_PRIORITY_NORMAL; } xpt_schedule(periph, priority); break; } case XPT_CONT_TARGET_IO: { struct ccb_accept_tio *atio; struct targbh_cmd_desc *desc; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Received completed CTIO\n")); atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h, periph_links.tqe); /* * We could check for CAM_SENT_SENSE bein set here, * but since we're not maintaining any CA/UA state, * there's no point. */ atio->sense_len = 0; done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE; done_ccb->ccb_h.status &= ~CAM_SENT_SENSE; /* * Any errors will not change the data we return, * so make sure the queue is not left frozen. * XXX - At some point there may be errors that * leave us in a connected state with the * initiator... */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { printf("Releasing Queue\n"); cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } desc->data_resid -= desc->data_increment; xpt_release_ccb(done_ccb); if (softc->state != TARGBH_STATE_TEARDOWN) { /* * Send the original accept TIO back to the * controller to handle more work. */ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Returning ATIO to target\n")); /* Restore wildcards */ atio->ccb_h.target_id = CAM_TARGET_WILDCARD; atio->ccb_h.target_lun = CAM_LUN_WILDCARD; xpt_action((union ccb *)atio); break; } else { targbhfreedescr(desc); free(atio, M_SCSIBH); } break; } case XPT_IMMEDIATE_NOTIFY: { int frozen; frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; if (softc->state == TARGBH_STATE_TEARDOWN || done_ccb->ccb_h.status == CAM_REQ_ABORTED) { printf("Freed an immediate notify\n"); xpt_free_ccb(done_ccb); } else { /* Requeue for another immediate event */ xpt_action(done_ccb); } if (frozen != 0) cam_release_devq(periph->path, /*relsim_flags*/0, /*opening reduction*/0, /*timeout*/0, /*getcount_only*/0); break; } default: panic("targbhdone: Unexpected ccb opcode"); break; } } #ifdef NOTYET static int targbherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { return 0; } #endif static struct targbh_cmd_desc* targbhallocdescr() { struct targbh_cmd_desc* descr; /* Allocate the targbh_descr structure */ descr = (struct targbh_cmd_desc *)malloc(sizeof(*descr), M_SCSIBH, M_NOWAIT); if (descr == NULL) return (NULL); bzero(descr, sizeof(*descr)); /* Allocate buffer backing store */ descr->backing_store = malloc(MAX_BUF_SIZE, M_SCSIBH, M_NOWAIT); if (descr->backing_store == NULL) { free(descr, M_SCSIBH); return (NULL); } descr->max_size = MAX_BUF_SIZE; return (descr); } static void targbhfreedescr(struct targbh_cmd_desc *descr) { free(descr->backing_store, M_SCSIBH); free(descr, M_SCSIBH); } Index: head/sys/cam/scsi/scsi_target.c =================================================================== --- head/sys/cam/scsi/scsi_target.c (revision 326264) +++ head/sys/cam/scsi/scsi_target.c (revision 326265) @@ -1,1161 +1,1163 @@ /*- * Generic SCSI Target Kernel Mode Driver * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2002 Nate Lawson. * Copyright (c) 1998, 1999, 2001, 2002 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include /* Includes to support callout */ #include #include #include #include #include #include #include #include /* Transaction information attached to each CCB sent by the user */ struct targ_cmd_descr { struct cam_periph_map_info mapinfo; TAILQ_ENTRY(targ_cmd_descr) tqe; union ccb *user_ccb; int priority; int func_code; }; /* Offset into the private CCB area for storing our descriptor */ #define targ_descr periph_priv.entries[1].ptr TAILQ_HEAD(descr_queue, targ_cmd_descr); typedef enum { TARG_STATE_RESV = 0x00, /* Invalid state */ TARG_STATE_OPENED = 0x01, /* Device opened, softc initialized */ TARG_STATE_LUN_ENABLED = 0x02 /* Device enabled for a path */ } targ_state; /* Per-instance device software context */ struct targ_softc { /* CCBs (CTIOs, ATIOs, INOTs) pending on the controller */ struct ccb_queue pending_ccb_queue; /* Command descriptors awaiting CTIO resources from the XPT */ struct descr_queue work_queue; /* Command descriptors that have been aborted back to the user. */ struct descr_queue abort_queue; /* * Queue of CCBs that have been copied out to userland, but our * userland daemon has not yet seen. */ struct ccb_queue user_ccb_queue; struct cam_periph *periph; struct cam_path *path; targ_state state; u_int maxio; struct selinfo read_select; struct devstat device_stats; }; static d_open_t targopen; static d_read_t targread; static d_write_t targwrite; static d_ioctl_t targioctl; static d_poll_t targpoll; static d_kqfilter_t targkqfilter; static void targreadfiltdetach(struct knote *kn); static int targreadfilt(struct knote *kn, long hint); static struct filterops targread_filtops = { .f_isfd = 1, .f_detach = targreadfiltdetach, .f_event = targreadfilt, }; static struct cdevsw targ_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = targopen, .d_read = targread, .d_write = targwrite, .d_ioctl = targioctl, .d_poll = targpoll, .d_name = "targ", .d_kqfilter = targkqfilter }; static cam_status targendislun(struct cam_path *path, int enable, int grp6_len, int grp7_len); static cam_status targenable(struct targ_softc *softc, struct cam_path *path, int grp6_len, int grp7_len); static cam_status targdisable(struct targ_softc *softc); static periph_ctor_t targctor; static periph_dtor_t targdtor; static periph_start_t targstart; static int targusermerge(struct targ_softc *softc, struct targ_cmd_descr *descr, union ccb *ccb); static int targsendccb(struct targ_softc *softc, union ccb *ccb, struct targ_cmd_descr *descr); static void targdone(struct cam_periph *periph, union ccb *done_ccb); static int targreturnccb(struct targ_softc *softc, union ccb *ccb); static union ccb * targgetccb(struct targ_softc *softc, xpt_opcode type, int priority); static void targfreeccb(struct targ_softc *softc, union ccb *ccb); static struct targ_cmd_descr * targgetdescr(struct targ_softc *softc); static periph_init_t targinit; static void targasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void abort_all_pending(struct targ_softc *softc); static void notify_user(struct targ_softc *softc); static int targcamstatus(cam_status status); static size_t targccblen(xpt_opcode func_code); static struct periph_driver targdriver = { targinit, "targ", TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(targ, targdriver); static MALLOC_DEFINE(M_TARG, "TARG", "TARG data"); /* Disable LUN if enabled and teardown softc */ static void targcdevdtor(void *data) { struct targ_softc *softc; struct cam_periph *periph; softc = data; if (softc->periph == NULL) { printf("%s: destroying non-enabled target\n", __func__); free(softc, M_TARG); return; } /* * Acquire a hold on the periph so that it doesn't go away before * we are ready at the end of the function. */ periph = softc->periph; cam_periph_acquire(periph); cam_periph_lock(periph); (void)targdisable(softc); if (softc->periph != NULL) { cam_periph_invalidate(softc->periph); softc->periph = NULL; } cam_periph_unlock(periph); cam_periph_release(periph); free(softc, M_TARG); } /* * Create softc and initialize it. There is no locking here because a * periph doesn't get created until an ioctl is issued to do so, and * that can't happen until this method returns. */ static int targopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct targ_softc *softc; /* Allocate its softc, initialize it */ softc = malloc(sizeof(*softc), M_TARG, M_WAITOK | M_ZERO); softc->state = TARG_STATE_OPENED; softc->periph = NULL; softc->path = NULL; TAILQ_INIT(&softc->pending_ccb_queue); TAILQ_INIT(&softc->work_queue); TAILQ_INIT(&softc->abort_queue); TAILQ_INIT(&softc->user_ccb_queue); knlist_init_mtx(&softc->read_select.si_note, NULL); devfs_set_cdevpriv(softc, targcdevdtor); return (0); } /* Enable/disable LUNs, set debugging level */ static int targioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct targ_softc *softc; cam_status status; devfs_get_cdevpriv((void **)&softc); switch (cmd) { case TARGIOCENABLE: { struct ioc_enable_lun *new_lun; struct cam_path *path; new_lun = (struct ioc_enable_lun *)addr; status = xpt_create_path(&path, /*periph*/NULL, new_lun->path_id, new_lun->target_id, new_lun->lun_id); if (status != CAM_REQ_CMP) { printf("Couldn't create path, status %#x\n", status); break; } xpt_path_lock(path); status = targenable(softc, path, new_lun->grp6_len, new_lun->grp7_len); xpt_path_unlock(path); xpt_free_path(path); break; } case TARGIOCDISABLE: if (softc->periph == NULL) { status = CAM_DEV_NOT_THERE; break; } cam_periph_lock(softc->periph); status = targdisable(softc); cam_periph_unlock(softc->periph); break; case TARGIOCDEBUG: { struct ccb_debug cdbg; /* If no periph available, disallow debugging changes */ if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) { status = CAM_DEV_NOT_THERE; break; } bzero(&cdbg, sizeof cdbg); if (*((int *)addr) != 0) cdbg.flags = CAM_DEBUG_PERIPH; else cdbg.flags = CAM_DEBUG_NONE; xpt_setup_ccb(&cdbg.ccb_h, softc->path, CAM_PRIORITY_NORMAL); cdbg.ccb_h.func_code = XPT_DEBUG; cdbg.ccb_h.cbfcnp = targdone; xpt_action((union ccb *)&cdbg); status = cdbg.ccb_h.status & CAM_STATUS_MASK; break; } default: status = CAM_PROVIDE_FAIL; break; } return (targcamstatus(status)); } /* Writes are always ready, reads wait for user_ccb_queue or abort_queue */ static int targpoll(struct cdev *dev, int poll_events, struct thread *td) { struct targ_softc *softc; int revents; devfs_get_cdevpriv((void **)&softc); /* Poll for write() is always ok. */ revents = poll_events & (POLLOUT | POLLWRNORM); if ((poll_events & (POLLIN | POLLRDNORM)) != 0) { /* Poll for read() depends on user and abort queues. */ cam_periph_lock(softc->periph); if (!TAILQ_EMPTY(&softc->user_ccb_queue) || !TAILQ_EMPTY(&softc->abort_queue)) { revents |= poll_events & (POLLIN | POLLRDNORM); } cam_periph_unlock(softc->periph); /* Only sleep if the user didn't poll for write. */ if (revents == 0) selrecord(td, &softc->read_select); } return (revents); } static int targkqfilter(struct cdev *dev, struct knote *kn) { struct targ_softc *softc; devfs_get_cdevpriv((void **)&softc); kn->kn_hook = (caddr_t)softc; kn->kn_fop = &targread_filtops; knlist_add(&softc->read_select.si_note, kn, 0); return (0); } static void targreadfiltdetach(struct knote *kn) { struct targ_softc *softc; softc = (struct targ_softc *)kn->kn_hook; knlist_remove(&softc->read_select.si_note, kn, 0); } /* Notify the user's kqueue when the user queue or abort queue gets a CCB */ static int targreadfilt(struct knote *kn, long hint) { struct targ_softc *softc; int retval; softc = (struct targ_softc *)kn->kn_hook; cam_periph_lock(softc->periph); retval = !TAILQ_EMPTY(&softc->user_ccb_queue) || !TAILQ_EMPTY(&softc->abort_queue); cam_periph_unlock(softc->periph); return (retval); } /* Send the HBA the enable/disable message */ static cam_status targendislun(struct cam_path *path, int enable, int grp6_len, int grp7_len) { struct ccb_en_lun en_ccb; cam_status status; /* Tell the lun to begin answering selects */ xpt_setup_ccb(&en_ccb.ccb_h, path, CAM_PRIORITY_NORMAL); en_ccb.ccb_h.func_code = XPT_EN_LUN; /* Don't need support for any vendor specific commands */ en_ccb.grp6_len = grp6_len; en_ccb.grp7_len = grp7_len; en_ccb.enable = enable ? 1 : 0; xpt_action((union ccb *)&en_ccb); status = en_ccb.ccb_h.status & CAM_STATUS_MASK; if (status != CAM_REQ_CMP) { xpt_print(path, "%sable lun CCB rejected, status %#x\n", enable ? "en" : "dis", status); } return (status); } /* Enable target mode on a LUN, given its path */ static cam_status targenable(struct targ_softc *softc, struct cam_path *path, int grp6_len, int grp7_len) { struct cam_periph *periph; struct ccb_pathinq cpi; cam_status status; if ((softc->state & TARG_STATE_LUN_ENABLED) != 0) return (CAM_LUN_ALRDY_ENA); /* Make sure SIM supports target mode */ xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); status = cpi.ccb_h.status & CAM_STATUS_MASK; if (status != CAM_REQ_CMP) { printf("pathinq failed, status %#x\n", status); goto enable_fail; } if ((cpi.target_sprt & PIT_PROCESSOR) == 0) { printf("controller does not support target mode\n"); status = CAM_FUNC_NOTAVAIL; goto enable_fail; } if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ /* Destroy any periph on our path if it is disabled */ periph = cam_periph_find(path, "targ"); if (periph != NULL) { struct targ_softc *del_softc; del_softc = (struct targ_softc *)periph->softc; if ((del_softc->state & TARG_STATE_LUN_ENABLED) == 0) { cam_periph_invalidate(del_softc->periph); del_softc->periph = NULL; } else { printf("Requested path still in use by targ%d\n", periph->unit_number); status = CAM_LUN_ALRDY_ENA; goto enable_fail; } } /* Create a periph instance attached to this path */ status = cam_periph_alloc(targctor, NULL, targdtor, targstart, "targ", CAM_PERIPH_BIO, path, targasync, 0, softc); if (status != CAM_REQ_CMP) { printf("cam_periph_alloc failed, status %#x\n", status); goto enable_fail; } /* Ensure that the periph now exists. */ if (cam_periph_find(path, "targ") == NULL) { panic("targenable: succeeded but no periph?"); /* NOTREACHED */ } /* Send the enable lun message */ status = targendislun(path, /*enable*/1, grp6_len, grp7_len); if (status != CAM_REQ_CMP) { printf("enable lun failed, status %#x\n", status); goto enable_fail; } softc->state |= TARG_STATE_LUN_ENABLED; enable_fail: return (status); } /* Disable this softc's target instance if enabled */ static cam_status targdisable(struct targ_softc *softc) { cam_status status; if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) return (CAM_REQ_CMP); CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targdisable\n")); /* Abort any ccbs pending on the controller */ abort_all_pending(softc); /* Disable this lun */ status = targendislun(softc->path, /*enable*/0, /*grp6_len*/0, /*grp7_len*/0); if (status == CAM_REQ_CMP) softc->state &= ~TARG_STATE_LUN_ENABLED; else printf("Disable lun failed, status %#x\n", status); return (status); } /* Initialize a periph (called from cam_periph_alloc) */ static cam_status targctor(struct cam_periph *periph, void *arg) { struct targ_softc *softc; /* Store pointer to softc for periph-driven routines */ softc = (struct targ_softc *)arg; periph->softc = softc; softc->periph = periph; softc->path = periph->path; return (CAM_REQ_CMP); } static void targdtor(struct cam_periph *periph) { struct targ_softc *softc; struct ccb_hdr *ccb_h; struct targ_cmd_descr *descr; softc = (struct targ_softc *)periph->softc; /* * targdisable() aborts CCBs back to the user and leaves them * on user_ccb_queue and abort_queue in case the user is still * interested in them. We free them now. */ while ((ccb_h = TAILQ_FIRST(&softc->user_ccb_queue)) != NULL) { TAILQ_REMOVE(&softc->user_ccb_queue, ccb_h, periph_links.tqe); targfreeccb(softc, (union ccb *)ccb_h); } while ((descr = TAILQ_FIRST(&softc->abort_queue)) != NULL) { TAILQ_REMOVE(&softc->abort_queue, descr, tqe); free(descr, M_TARG); } softc->periph = NULL; softc->path = NULL; periph->softc = NULL; } /* Receive CCBs from user mode proc and send them to the HBA */ static int targwrite(struct cdev *dev, struct uio *uio, int ioflag) { union ccb *user_ccb; struct targ_softc *softc; struct targ_cmd_descr *descr; int write_len, error; int func_code, priority; devfs_get_cdevpriv((void **)&softc); write_len = error = 0; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("write - uio_resid %zd\n", uio->uio_resid)); while (uio->uio_resid >= sizeof(user_ccb) && error == 0) { union ccb *ccb; error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio); if (error != 0) { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("write - uiomove failed (%d)\n", error)); break; } priority = fuword32(&user_ccb->ccb_h.pinfo.priority); if (priority == CAM_PRIORITY_NONE) { error = EINVAL; break; } func_code = fuword32(&user_ccb->ccb_h.func_code); switch (func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: case XPT_IMMEDIATE_NOTIFY: cam_periph_lock(softc->periph); ccb = targgetccb(softc, func_code, priority); descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr; descr->user_ccb = user_ccb; descr->func_code = func_code; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Sent ATIO/INOT (%p)\n", user_ccb)); xpt_action(ccb); TAILQ_INSERT_TAIL(&softc->pending_ccb_queue, &ccb->ccb_h, periph_links.tqe); cam_periph_unlock(softc->periph); break; default: cam_periph_lock(softc->periph); if ((func_code & XPT_FC_QUEUED) != 0) { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Sending queued ccb %#x (%p)\n", func_code, user_ccb)); descr = targgetdescr(softc); descr->user_ccb = user_ccb; descr->priority = priority; descr->func_code = func_code; TAILQ_INSERT_TAIL(&softc->work_queue, descr, tqe); xpt_schedule(softc->periph, priority); } else { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Sending inline ccb %#x (%p)\n", func_code, user_ccb)); ccb = targgetccb(softc, func_code, priority); descr = (struct targ_cmd_descr *) ccb->ccb_h.targ_descr; descr->user_ccb = user_ccb; descr->priority = priority; descr->func_code = func_code; if (targusermerge(softc, descr, ccb) != EFAULT) targsendccb(softc, ccb, descr); targreturnccb(softc, ccb); } cam_periph_unlock(softc->periph); break; } write_len += sizeof(user_ccb); } /* * If we've successfully taken in some amount of * data, return success for that data first. If * an error is persistent, it will be reported * on the next write. */ if (error != 0 && write_len == 0) return (error); if (write_len == 0 && uio->uio_resid != 0) return (ENOSPC); return (0); } /* Process requests (descrs) via the periph-supplied CCBs */ static void targstart(struct cam_periph *periph, union ccb *start_ccb) { struct targ_softc *softc; struct targ_cmd_descr *descr, *next_descr; int error; softc = (struct targ_softc *)periph->softc; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targstart %p\n", start_ccb)); descr = TAILQ_FIRST(&softc->work_queue); if (descr == NULL) { xpt_release_ccb(start_ccb); } else { TAILQ_REMOVE(&softc->work_queue, descr, tqe); next_descr = TAILQ_FIRST(&softc->work_queue); /* Initiate a transaction using the descr and supplied CCB */ error = targusermerge(softc, descr, start_ccb); if (error == 0) error = targsendccb(softc, start_ccb, descr); if (error != 0) { xpt_print(periph->path, "targsendccb failed, err %d\n", error); xpt_release_ccb(start_ccb); suword(&descr->user_ccb->ccb_h.status, CAM_REQ_CMP_ERR); TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe); notify_user(softc); } /* If we have more work to do, stay scheduled */ if (next_descr != NULL) xpt_schedule(periph, next_descr->priority); } } static int targusermerge(struct targ_softc *softc, struct targ_cmd_descr *descr, union ccb *ccb) { struct ccb_hdr *u_ccbh, *k_ccbh; size_t ccb_len; int error; u_ccbh = &descr->user_ccb->ccb_h; k_ccbh = &ccb->ccb_h; /* * There are some fields in the CCB header that need to be * preserved, the rest we get from the user ccb. (See xpt_merge_ccb) */ xpt_setup_ccb(k_ccbh, softc->path, descr->priority); k_ccbh->retry_count = fuword32(&u_ccbh->retry_count); k_ccbh->func_code = descr->func_code; k_ccbh->flags = fuword32(&u_ccbh->flags); k_ccbh->timeout = fuword32(&u_ccbh->timeout); ccb_len = targccblen(k_ccbh->func_code) - sizeof(struct ccb_hdr); error = copyin(u_ccbh + 1, k_ccbh + 1, ccb_len); if (error != 0) { k_ccbh->status = CAM_REQ_CMP_ERR; return (error); } /* Translate usermode abort_ccb pointer to its kernel counterpart */ if (k_ccbh->func_code == XPT_ABORT) { struct ccb_abort *cab; struct ccb_hdr *ccb_h; cab = (struct ccb_abort *)ccb; TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue, periph_links.tqe) { struct targ_cmd_descr *ab_descr; ab_descr = (struct targ_cmd_descr *)ccb_h->targ_descr; if (ab_descr->user_ccb == cab->abort_ccb) { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Changing abort for %p to %p\n", cab->abort_ccb, ccb_h)); cab->abort_ccb = (union ccb *)ccb_h; break; } } /* CCB not found, set appropriate status */ if (ccb_h == NULL) { k_ccbh->status = CAM_PATH_INVALID; error = ESRCH; } } return (error); } /* Build and send a kernel CCB formed from descr->user_ccb */ static int targsendccb(struct targ_softc *softc, union ccb *ccb, struct targ_cmd_descr *descr) { struct cam_periph_map_info *mapinfo; struct ccb_hdr *ccb_h; int error; ccb_h = &ccb->ccb_h; mapinfo = &descr->mapinfo; mapinfo->num_bufs_used = 0; /* * There's no way for the user to have a completion * function, so we put our own completion function in here. * We also stash in a reference to our descriptor so targreturnccb() * can find our mapping info. */ ccb_h->cbfcnp = targdone; ccb_h->targ_descr = descr; if ((ccb_h->func_code == XPT_CONT_TARGET_IO) || (ccb_h->func_code == XPT_DEV_MATCH)) { error = cam_periph_mapmem(ccb, mapinfo, softc->maxio); /* * cam_periph_mapmem returned an error, we can't continue. * Return the error to the user. */ if (error) { ccb_h->status = CAM_REQ_CMP_ERR; mapinfo->num_bufs_used = 0; return (error); } } /* * Once queued on the pending CCB list, this CCB will be protected * by our error recovery handler. */ CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("sendccb %p\n", ccb)); if (XPT_FC_IS_QUEUED(ccb)) { TAILQ_INSERT_TAIL(&softc->pending_ccb_queue, ccb_h, periph_links.tqe); } xpt_action(ccb); return (0); } /* Completion routine for CCBs (called at splsoftcam) */ static void targdone(struct cam_periph *periph, union ccb *done_ccb) { struct targ_softc *softc; cam_status status; CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, ("targdone %p\n", done_ccb)); softc = (struct targ_softc *)periph->softc; TAILQ_REMOVE(&softc->pending_ccb_queue, &done_ccb->ccb_h, periph_links.tqe); status = done_ccb->ccb_h.status & CAM_STATUS_MASK; /* If we're no longer enabled, throw away CCB */ if ((softc->state & TARG_STATE_LUN_ENABLED) == 0) { targfreeccb(softc, done_ccb); return; } /* abort_all_pending() waits for pending queue to be empty */ if (TAILQ_EMPTY(&softc->pending_ccb_queue)) wakeup(&softc->pending_ccb_queue); switch (done_ccb->ccb_h.func_code) { /* All FC_*_QUEUED CCBs go back to userland */ case XPT_IMMED_NOTIFY: case XPT_IMMEDIATE_NOTIFY: case XPT_ACCEPT_TARGET_IO: case XPT_CONT_TARGET_IO: TAILQ_INSERT_TAIL(&softc->user_ccb_queue, &done_ccb->ccb_h, periph_links.tqe); cam_periph_unlock(softc->periph); notify_user(softc); cam_periph_lock(softc->periph); break; default: panic("targdone: impossible xpt opcode %#x", done_ccb->ccb_h.func_code); /* NOTREACHED */ } } /* Return CCBs to the user from the user queue and abort queue */ static int targread(struct cdev *dev, struct uio *uio, int ioflag) { struct descr_queue *abort_queue; struct targ_cmd_descr *user_descr; struct targ_softc *softc; struct ccb_queue *user_queue; struct ccb_hdr *ccb_h; union ccb *user_ccb; int read_len, error; error = 0; read_len = 0; devfs_get_cdevpriv((void **)&softc); user_queue = &softc->user_ccb_queue; abort_queue = &softc->abort_queue; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread\n")); /* If no data is available, wait or return immediately */ cam_periph_lock(softc->periph); ccb_h = TAILQ_FIRST(user_queue); user_descr = TAILQ_FIRST(abort_queue); while (ccb_h == NULL && user_descr == NULL) { if ((ioflag & IO_NDELAY) == 0) { error = cam_periph_sleep(softc->periph, user_queue, PRIBIO | PCATCH, "targrd", 0); ccb_h = TAILQ_FIRST(user_queue); user_descr = TAILQ_FIRST(abort_queue); if (error != 0) { if (error == ERESTART) { continue; } else { goto read_fail; } } } else { cam_periph_unlock(softc->periph); return (EAGAIN); } } /* Data is available so fill the user's buffer */ while (ccb_h != NULL) { struct targ_cmd_descr *descr; if (uio->uio_resid < sizeof(user_ccb)) break; TAILQ_REMOVE(user_queue, ccb_h, periph_links.tqe); descr = (struct targ_cmd_descr *)ccb_h->targ_descr; user_ccb = descr->user_ccb; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread ccb %p (%p)\n", ccb_h, user_ccb)); error = targreturnccb(softc, (union ccb *)ccb_h); if (error != 0) goto read_fail; cam_periph_unlock(softc->periph); error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio); cam_periph_lock(softc->periph); if (error != 0) goto read_fail; read_len += sizeof(user_ccb); ccb_h = TAILQ_FIRST(user_queue); } /* Flush out any aborted descriptors */ while (user_descr != NULL) { if (uio->uio_resid < sizeof(user_ccb)) break; TAILQ_REMOVE(abort_queue, user_descr, tqe); user_ccb = user_descr->user_ccb; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targread aborted descr %p (%p)\n", user_descr, user_ccb)); suword(&user_ccb->ccb_h.status, CAM_REQ_ABORTED); cam_periph_unlock(softc->periph); error = uiomove((caddr_t)&user_ccb, sizeof(user_ccb), uio); cam_periph_lock(softc->periph); if (error != 0) goto read_fail; read_len += sizeof(user_ccb); user_descr = TAILQ_FIRST(abort_queue); } /* * If we've successfully read some amount of data, don't report an * error. If the error is persistent, it will be reported on the * next read(). */ if (read_len == 0 && uio->uio_resid != 0) error = ENOSPC; read_fail: cam_periph_unlock(softc->periph); return (error); } /* Copy completed ccb back to the user */ static int targreturnccb(struct targ_softc *softc, union ccb *ccb) { struct targ_cmd_descr *descr; struct ccb_hdr *u_ccbh; size_t ccb_len; int error; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("targreturnccb %p\n", ccb)); descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr; u_ccbh = &descr->user_ccb->ccb_h; /* Copy out the central portion of the ccb_hdr */ copyout(&ccb->ccb_h.retry_count, &u_ccbh->retry_count, offsetof(struct ccb_hdr, periph_priv) - offsetof(struct ccb_hdr, retry_count)); /* Copy out the rest of the ccb (after the ccb_hdr) */ ccb_len = targccblen(ccb->ccb_h.func_code) - sizeof(struct ccb_hdr); if (descr->mapinfo.num_bufs_used != 0) cam_periph_unmapmem(ccb, &descr->mapinfo); error = copyout(&ccb->ccb_h + 1, u_ccbh + 1, ccb_len); if (error != 0) { xpt_print(softc->path, "targreturnccb - CCB copyout failed (%d)\n", error); } /* Free CCB or send back to devq. */ targfreeccb(softc, ccb); return (error); } static union ccb * targgetccb(struct targ_softc *softc, xpt_opcode type, int priority) { union ccb *ccb; int ccb_len; ccb_len = targccblen(type); ccb = malloc(ccb_len, M_TARG, M_NOWAIT); CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("getccb %p\n", ccb)); if (ccb == NULL) { return (ccb); } xpt_setup_ccb(&ccb->ccb_h, softc->path, priority); ccb->ccb_h.func_code = type; ccb->ccb_h.cbfcnp = targdone; ccb->ccb_h.targ_descr = targgetdescr(softc); if (ccb->ccb_h.targ_descr == NULL) { free (ccb, M_TARG); ccb = NULL; } return (ccb); } static void targfreeccb(struct targ_softc *softc, union ccb *ccb) { CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("targfreeccb descr %p and\n", ccb->ccb_h.targ_descr)); free(ccb->ccb_h.targ_descr, M_TARG); switch (ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: case XPT_IMMEDIATE_NOTIFY: CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("freeing ccb %p\n", ccb)); free(ccb, M_TARG); break; default: /* Send back CCB if we got it from the periph */ if (XPT_FC_IS_QUEUED(ccb)) { CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("returning queued ccb %p\n", ccb)); xpt_release_ccb(ccb); } else { CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("freeing ccb %p\n", ccb)); free(ccb, M_TARG); } break; } } static struct targ_cmd_descr * targgetdescr(struct targ_softc *softc) { struct targ_cmd_descr *descr; descr = malloc(sizeof(*descr), M_TARG, M_NOWAIT); if (descr) { descr->mapinfo.num_bufs_used = 0; } return (descr); } static void targinit(void) { struct cdev *dev; /* Add symbolic link to targ0 for compatibility. */ dev = make_dev(&targ_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "targ"); make_dev_alias(dev, "targ0"); } static void targasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { /* All events are handled in usermode by INOTs */ panic("targasync() called, should be an INOT instead"); } /* Cancel all pending requests and CCBs awaiting work. */ static void abort_all_pending(struct targ_softc *softc) { struct targ_cmd_descr *descr; struct ccb_abort cab; struct ccb_hdr *ccb_h; CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("abort_all_pending\n")); /* First abort the descriptors awaiting resources */ while ((descr = TAILQ_FIRST(&softc->work_queue)) != NULL) { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Aborting descr from workq %p\n", descr)); TAILQ_REMOVE(&softc->work_queue, descr, tqe); TAILQ_INSERT_TAIL(&softc->abort_queue, descr, tqe); } /* * Then abort all pending CCBs. * targdone() will return the aborted CCB via user_ccb_queue */ xpt_setup_ccb(&cab.ccb_h, softc->path, CAM_PRIORITY_NORMAL); cab.ccb_h.func_code = XPT_ABORT; cab.ccb_h.status = CAM_REQ_CMP_ERR; TAILQ_FOREACH(ccb_h, &softc->pending_ccb_queue, periph_links.tqe) { CAM_DEBUG(softc->path, CAM_DEBUG_PERIPH, ("Aborting pending CCB %p\n", ccb_h)); cab.abort_ccb = (union ccb *)ccb_h; xpt_action((union ccb *)&cab); if (cab.ccb_h.status != CAM_REQ_CMP) { xpt_print(cab.ccb_h.path, "Unable to abort CCB, status %#x\n", cab.ccb_h.status); } } /* If we aborted at least one pending CCB ok, wait for it. */ if (cab.ccb_h.status == CAM_REQ_CMP) { cam_periph_sleep(softc->periph, &softc->pending_ccb_queue, PRIBIO | PCATCH, "tgabrt", 0); } /* If we aborted anything from the work queue, wakeup user. */ if (!TAILQ_EMPTY(&softc->user_ccb_queue) || !TAILQ_EMPTY(&softc->abort_queue)) { cam_periph_unlock(softc->periph); notify_user(softc); cam_periph_lock(softc->periph); } } /* Notify the user that data is ready */ static void notify_user(struct targ_softc *softc) { /* * Notify users sleeping via poll(), kqueue(), and * blocking read(). */ selwakeuppri(&softc->read_select, PRIBIO); KNOTE_UNLOCKED(&softc->read_select.si_note, 0); wakeup(&softc->user_ccb_queue); } /* Convert CAM status to errno values */ static int targcamstatus(cam_status status) { switch (status & CAM_STATUS_MASK) { case CAM_REQ_CMP: /* CCB request completed without error */ return (0); case CAM_REQ_INPROG: /* CCB request is in progress */ return (EINPROGRESS); case CAM_REQ_CMP_ERR: /* CCB request completed with an error */ return (EIO); case CAM_PROVIDE_FAIL: /* Unable to provide requested capability */ return (ENOTTY); case CAM_FUNC_NOTAVAIL: /* The requested function is not available */ return (ENOTSUP); case CAM_LUN_ALRDY_ENA: /* LUN is already enabled for target mode */ return (EADDRINUSE); case CAM_PATH_INVALID: /* Supplied Path ID is invalid */ case CAM_DEV_NOT_THERE: /* SCSI Device Not Installed/there */ return (ENOENT); case CAM_REQ_ABORTED: /* CCB request aborted by the host */ return (ECANCELED); case CAM_CMD_TIMEOUT: /* Command timeout */ return (ETIMEDOUT); case CAM_REQUEUE_REQ: /* Requeue to preserve transaction ordering */ return (EAGAIN); case CAM_REQ_INVALID: /* CCB request was invalid */ return (EINVAL); case CAM_RESRC_UNAVAIL: /* Resource Unavailable */ return (ENOMEM); case CAM_BUSY: /* CAM subsystem is busy */ case CAM_UA_ABORT: /* Unable to abort CCB request */ return (EBUSY); default: return (ENXIO); } } static size_t targccblen(xpt_opcode func_code) { int len; /* Codes we expect to see as a target */ switch (func_code) { case XPT_CONT_TARGET_IO: case XPT_SCSI_IO: len = sizeof(struct ccb_scsiio); break; case XPT_ACCEPT_TARGET_IO: len = sizeof(struct ccb_accept_tio); break; case XPT_IMMED_NOTIFY: len = sizeof(struct ccb_immed_notify); break; case XPT_IMMEDIATE_NOTIFY: len = sizeof(struct ccb_immediate_notify); break; case XPT_REL_SIMQ: len = sizeof(struct ccb_relsim); break; case XPT_PATH_INQ: len = sizeof(struct ccb_pathinq); break; case XPT_DEBUG: len = sizeof(struct ccb_debug); break; case XPT_ABORT: len = sizeof(struct ccb_abort); break; case XPT_EN_LUN: len = sizeof(struct ccb_en_lun); break; default: len = sizeof(union ccb); break; } return (len); } Index: head/sys/cam/scsi/scsi_targetio.h =================================================================== --- head/sys/cam/scsi/scsi_targetio.h (revision 326264) +++ head/sys/cam/scsi/scsi_targetio.h (revision 326265) @@ -1,77 +1,79 @@ /*- * Ioctl definitions for the SCSI Target Driver * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2002 Nate Lawson. * Copyright (c) 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_SCSI_SCSI_TARGETIO_H_ #define _CAM_SCSI_SCSI_TARGETIO_H_ #ifndef _KERNEL #include #endif #include #include #include /* * CCBs (ATIO, CTIO, INOT, REL_SIMQ) are sent to the kernel driver * by writing one or more pointers. The user receives notification * of CCB completion through poll/select/kqueue and then calls * read(2) which outputs pointers to the completed CCBs. */ /* * Enable and disable a target mode instance. For enabling, the path_id, * target_id, and lun_id fields must be set. The grp6/7_len fields * specify the length of vendor-specific CDBs the target expects and * should normally be set to 0. On successful completion * of enable, the specified target instance will answer selection. * Disable causes the target instance to abort any outstanding commands * and stop accepting new ones. The aborted CCBs will be returned to * the user via read(2) or discarded if the user closes the device. * The user can then re-enable the device for a new path. */ struct ioc_enable_lun { path_id_t path_id; target_id_t target_id; lun_id_t lun_id; int grp6_len; int grp7_len; }; #define TARGIOCENABLE _IOW('C', 5, struct ioc_enable_lun) #define TARGIOCDISABLE _IO('C', 6) /* * Set/clear debugging for this target mode instance */ #define TARGIOCDEBUG _IOW('C', 7, int) TAILQ_HEAD(ccb_queue, ccb_hdr); #endif /* _CAM_SCSI_SCSI_TARGETIO_H_ */ Index: head/sys/cam/scsi/scsi_xpt.c =================================================================== --- head/sys/cam/scsi/scsi_xpt.c (revision 326264) +++ head/sys/cam/scsi/scsi_xpt.c (revision 326265) @@ -1,3242 +1,3244 @@ /*- * Implementation of the SCSI Transport * + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ #include "opt_cam.h" struct scsi_quirk_entry { struct scsi_inquiry_pattern inq_pat; u_int8_t quirks; #define CAM_QUIRK_NOLUNS 0x01 #define CAM_QUIRK_NOVPDS 0x02 #define CAM_QUIRK_HILUNS 0x04 #define CAM_QUIRK_NOHILUNS 0x08 #define CAM_QUIRK_NORPTLUNS 0x10 u_int mintags; u_int maxtags; }; #define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk)) static int cam_srch_hi = 0; static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT | CTLFLAG_RWTUN, 0, 0, sysctl_cam_search_luns, "I", "allow search above LUN 7 for SCSI3 and greater devices"); #define CAM_SCSI2_MAXLUN 8 #define CAM_CAN_GET_SIMPLE_LUN(x, i) \ ((((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_PERIPH) || \ (((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_FLAT)) #define CAM_GET_SIMPLE_LUN(lp, i, lval) \ if (((lp)->luns[(i)].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_PERIPH) { \ (lval) = (lp)->luns[(i)].lundata[1]; \ } else { \ (lval) = (lp)->luns[(i)].lundata[0]; \ (lval) &= RPL_LUNDATA_FLAT_LUN_MASK; \ (lval) <<= 8; \ (lval) |= (lp)->luns[(i)].lundata[1]; \ } #define CAM_GET_LUN(lp, i, lval) \ (lval) = scsi_8btou64((lp)->luns[(i)].lundata); \ (lval) = CAM_EXTLUN_BYTE_SWIZZLE(lval); /* * If we're not quirked to search <= the first 8 luns * and we are either quirked to search above lun 8, * or we're > SCSI-2 and we've enabled hilun searching, * or we're > SCSI-2 and the last lun was a success, * we can look for luns above lun 8. */ #define CAN_SRCH_HI_SPARSE(dv) \ (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0) \ && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS) \ || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi))) #define CAN_SRCH_HI_DENSE(dv) \ (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0) \ && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS) \ || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2))) static periph_init_t probe_periph_init; static struct periph_driver probe_driver = { probe_periph_init, "probe", TAILQ_HEAD_INITIALIZER(probe_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(probe, probe_driver); typedef enum { PROBE_TUR, PROBE_INQUIRY, /* this counts as DV0 for Basic Domain Validation */ PROBE_FULL_INQUIRY, PROBE_REPORT_LUNS, PROBE_MODE_SENSE, PROBE_SUPPORTED_VPD_LIST, PROBE_DEVICE_ID, PROBE_EXTENDED_INQUIRY, PROBE_SERIAL_NUM, PROBE_TUR_FOR_NEGOTIATION, PROBE_INQUIRY_BASIC_DV1, PROBE_INQUIRY_BASIC_DV2, PROBE_DV_EXIT, PROBE_DONE, PROBE_INVALID } probe_action; static char *probe_action_text[] = { "PROBE_TUR", "PROBE_INQUIRY", "PROBE_FULL_INQUIRY", "PROBE_REPORT_LUNS", "PROBE_MODE_SENSE", "PROBE_SUPPORTED_VPD_LIST", "PROBE_DEVICE_ID", "PROBE_EXTENDED_INQUIRY", "PROBE_SERIAL_NUM", "PROBE_TUR_FOR_NEGOTIATION", "PROBE_INQUIRY_BASIC_DV1", "PROBE_INQUIRY_BASIC_DV2", "PROBE_DV_EXIT", "PROBE_DONE", "PROBE_INVALID" }; #define PROBE_SET_ACTION(softc, newaction) \ do { \ char **text; \ text = probe_action_text; \ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ ("Probe %s to %s\n", text[(softc)->action], \ text[(newaction)])); \ (softc)->action = (newaction); \ } while(0) typedef enum { PROBE_INQUIRY_CKSUM = 0x01, PROBE_SERIAL_CKSUM = 0x02, PROBE_NO_ANNOUNCE = 0x04, PROBE_EXTLUN = 0x08 } probe_flags; typedef struct { TAILQ_HEAD(, ccb_hdr) request_ccbs; probe_action action; union ccb saved_ccb; probe_flags flags; MD5_CTX context; u_int8_t digest[16]; struct cam_periph *periph; } probe_softc; static const char quantum[] = "QUANTUM"; static const char sony[] = "SONY"; static const char west_digital[] = "WDIGTL"; static const char samsung[] = "SAMSUNG"; static const char seagate[] = "SEAGATE"; static const char microp[] = "MICROP"; static struct scsi_quirk_entry scsi_quirk_table[] = { { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Unfortunately, the Quantum Atlas III has the same * problem as the Atlas II drives above. * Reported by: "Johan Granlund" * * For future reference, the drive with the problem was: * QUANTUM QM39100TD-SW N1B0 * * It's possible that Quantum will fix the problem in later * firmware revisions. If that happens, the quirk entry * will need to be made specific to the firmware revisions * with the problem. * */ /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* * 18 Gig Atlas III, same problem as the 9G version. * Reported by: Andre Albsmeier * * * For future reference, the drive with the problem was: * QUANTUM QM318000TD-S N491 */ /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* * Broken tagged queuing drive * Reported by: Bret Ford * and: Martin Renters */ { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, /* * The Seagate Medalist Pro drives have very poor write * performance with anything more than 2 tags. * * Reported by: Paul van der Zwan * Drive: * * Reported by: Jeremy Lea * Drive: * * No one has actually reported that the 9G version * (ST39140*) of the Medalist Pro has the same problem, but * we're assuming that it does because the 4G and 6.5G * versions of the drive are broken. */ { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { /* * Experiences command timeouts under load with a * tag count higher than 55. */ { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST3146855LW", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/55 }, { /* * Slow when tagged queueing is enabled. Write performance * steadily drops off with more and more concurrent * transactions. Best sequential write performance with * tagged queueing turned off and write caching turned on. * * PR: kern/10398 * Submitted by: Hideaki Okada * Drive: DCAS-34330 w/ "S65A" firmware. * * The drive with the problem had the "S65A" firmware * revision, and has also been reported (by Stephen J. * Roznowski ) for a drive with the "S61A" * firmware revision. * * Although no one has reported problems with the 2 gig * version of the DCAS drive, the assumption is that it * has the same problems as the 4 gig version. Therefore * this quirk entries disables tagged queueing for all * DCAS drives. */ { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* This does not support other than LUN 0 */ { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 }, { /* * Broken tagged queuing drive. * Submitted by: * NAKAJI Hiroyuki * in PR kern/9535 */ { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Slow when tagged queueing is enabled. (1.5MB/sec versus * 8MB/sec.) * Submitted by: Andrew Gallatin * Best performance with these drives is achieved with * tagged queueing turned off, and write caching turned on. */ { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Slow when tagged queueing is enabled. (1.5MB/sec versus * 8MB/sec.) * Submitted by: Andrew Gallatin * Best performance with these drives is achieved with * tagged queueing turned off, and write caching turned on. */ { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Doesn't handle queue full condition correctly, * so we need to limit maxtags to what the device * can handle instead of determining this automatically. */ { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, /*quirks*/0, /*mintags*/2, /*maxtags*/32 }, { /* Really only one LUN */ { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* I can't believe we need a quirk for DPT volumes. */ { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/255 }, { /* * Many Sony CDROM drives don't like multi-LUN probing. */ { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * This drive doesn't like multiple LUN probing. * Submitted by: Parag Patel */ { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * The 8200 doesn't like multi-lun probing, and probably * don't like serial number requests either. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", "EXB-8200*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * Let's try the same as above, but for a drive that says * it's an IPL-6860 but is actually an EXB 8200. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", "IPL-6860*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * These Hitachi drives don't like multi-lun probing. * The PR submitter has a DK319H, but says that the Linux * kernel has a similar work-around for the DK312 and DK314, * so all DK31* drives are quirked here. * PR: misc/18793 * Submitted by: Paul Haddad */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 }, { /* * The Hitachi CJ series with J8A8 firmware apparently has * problems with tagged commands. * PR: 23536 * Reported by: amagai@nue.org */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * These are the large storage arrays. * Submitted by: William Carrel */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" }, CAM_QUIRK_HILUNS, 2, 1024 }, { /* * This old revision of the TDC3600 is also SCSI-1, and * hangs upon serial number probing. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 3600", "U07:" }, CAM_QUIRK_NOVPDS, /*mintags*/0, /*maxtags*/0 }, { /* * Would repond to all LUNs if asked for. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", "CP150", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * Would repond to all LUNs if asked for. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", "96X2*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* Submitted by: Matthew Dodd */ { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* Submitted by: Matthew Dodd */ { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* TeraSolutions special settings for TRC-22 RAID */ { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" }, /*quirks*/0, /*mintags*/55, /*maxtags*/255 }, { /* Veritas Storage Appliance */ { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" }, CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024 }, { /* * Would respond to all LUNs. Device type and removable * flag are jumper-selectable. */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix", "Tahiti 1", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* EasyRAID E5A aka. areca ARC-6010 */ { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" }, CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255 }, { { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { { T_DIRECT, SIP_MEDIA_REMOVABLE, "Garmin", "*", "*" }, CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 }, { { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "STORAGE DEVICE*", "120?" }, CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 }, { { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "MassStorageClass", "1533" }, CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 }, { /* Default tagged queuing parameters for all devices */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, /*quirks*/0, /*mintags*/2, /*maxtags*/255 }, }; static cam_status proberegister(struct cam_periph *periph, void *arg); static void probeschedule(struct cam_periph *probe_periph); static void probestart(struct cam_periph *periph, union ccb *start_ccb); static void proberequestdefaultnegotiation(struct cam_periph *periph); static int proberequestbackoff(struct cam_periph *periph, struct cam_ed *device); static void probedone(struct cam_periph *periph, union ccb *done_ccb); static void probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new, probe_flags flags); static void probecleanup(struct cam_periph *periph); static void scsi_find_quirk(struct cam_ed *device); static void scsi_scan_bus(struct cam_periph *periph, union ccb *ccb); static void scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *ccb); static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); static struct cam_ed * scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static void scsi_devise_transport(struct cam_path *path); static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update); static void scsi_toggle_tags(struct cam_path *path); static void scsi_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static void scsi_action(union ccb *start_ccb); static void scsi_announce_periph(struct cam_periph *periph); static void scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb); static void scsi_proto_announce(struct cam_ed *device); static void scsi_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb); static void scsi_proto_denounce(struct cam_ed *device); static void scsi_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb); static void scsi_proto_debug_out(union ccb *ccb); static void _scsi_announce_periph(struct cam_periph *, u_int *, u_int *, struct ccb_trans_settings *); static struct xpt_xport_ops scsi_xport_ops = { .alloc_device = scsi_alloc_device, .action = scsi_action, .async = scsi_dev_async, .announce = scsi_announce_periph, .announce_sbuf = scsi_announce_periph_sbuf, }; #define SCSI_XPT_XPORT(x, X) \ static struct xpt_xport scsi_xport_ ## x = { \ .xport = XPORT_ ## X, \ .name = #x, \ .ops = &scsi_xport_ops, \ }; \ CAM_XPT_XPORT(scsi_xport_ ## x); SCSI_XPT_XPORT(spi, SPI); SCSI_XPT_XPORT(sas, SAS); SCSI_XPT_XPORT(fc, FC); SCSI_XPT_XPORT(usb, USB); SCSI_XPT_XPORT(iscsi, ISCSI); SCSI_XPT_XPORT(srp, SRP); SCSI_XPT_XPORT(ppb, PPB); #undef SCSI_XPORT_XPORT static struct xpt_proto_ops scsi_proto_ops = { .announce = scsi_proto_announce, .announce_sbuf = scsi_proto_announce_sbuf, .denounce = scsi_proto_denounce, .denounce_sbuf = scsi_proto_denounce_sbuf, .debug_out = scsi_proto_debug_out, }; static struct xpt_proto scsi_proto = { .proto = PROTO_SCSI, .name = "scsi", .ops = &scsi_proto_ops, }; CAM_XPT_PROTO(scsi_proto); static void probe_periph_init() { } static cam_status proberegister(struct cam_periph *periph, void *arg) { union ccb *request_ccb; /* CCB representing the probe request */ cam_status status; probe_softc *softc; request_ccb = (union ccb *)arg; if (request_ccb == NULL) { printf("proberegister: no probe CCB, " "can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT); if (softc == NULL) { printf("proberegister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } TAILQ_INIT(&softc->request_ccbs); TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->flags = 0; periph->softc = softc; softc->periph = periph; softc->action = PROBE_INVALID; status = cam_periph_acquire(periph); if (status != CAM_REQ_CMP) { return (status); } CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); scsi_devise_transport(periph->path); /* * Ensure we've waited at least a bus settle * delay before attempting to probe the device. * For HBAs that don't do bus resets, this won't make a difference. */ cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, scsi_delay); probeschedule(periph); return(CAM_REQ_CMP); } static void probeschedule(struct cam_periph *periph) { struct ccb_pathinq cpi; union ccb *ccb; probe_softc *softc; softc = (probe_softc *)periph->softc; ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* * If a device has gone away and another device, or the same one, * is back in the same place, it should have a unit attention * condition pending. It will not report the unit attention in * response to an inquiry, which may leave invalid transfer * negotiations in effect. The TUR will reveal the unit attention * condition. Only send the TUR for lun 0, since some devices * will get confused by commands other than inquiry to non-existent * luns. If you think a device has gone away start your scan from * lun 0. This will insure that any bogus transfer settings are * invalidated. * * If we haven't seen the device before and the controller supports * some kind of transfer negotiation, negotiate with the first * sent command if no bus reset was performed at startup. This * ensures that the device is not confused by transfer negotiation * settings left over by loader or BIOS action. */ if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) && (ccb->ccb_h.target_lun == 0)) { PROBE_SET_ACTION(softc, PROBE_TUR); } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { proberequestdefaultnegotiation(periph); PROBE_SET_ACTION(softc, PROBE_INQUIRY); } else { PROBE_SET_ACTION(softc, PROBE_INQUIRY); } if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) softc->flags |= PROBE_NO_ANNOUNCE; else softc->flags &= ~PROBE_NO_ANNOUNCE; if (cpi.hba_misc & PIM_EXTLUNS) softc->flags |= PROBE_EXTLUN; else softc->flags &= ~PROBE_EXTLUN; xpt_schedule(periph, CAM_PRIORITY_XPT); } static void probestart(struct cam_periph *periph, union ccb *start_ccb) { /* Probe the device that our peripheral driver points to */ struct ccb_scsiio *csio; probe_softc *softc; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); softc = (probe_softc *)periph->softc; csio = &start_ccb->csio; again: switch (softc->action) { case PROBE_TUR: case PROBE_TUR_FOR_NEGOTIATION: case PROBE_DV_EXIT: { scsi_test_unit_ready(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, /*timeout*/60000); break; } case PROBE_INQUIRY: case PROBE_FULL_INQUIRY: case PROBE_INQUIRY_BASIC_DV1: case PROBE_INQUIRY_BASIC_DV2: { u_int inquiry_len; struct scsi_inquiry_data *inq_buf; inq_buf = &periph->path->device->inq_data; /* * If the device is currently configured, we calculate an * MD5 checksum of the inquiry data, and if the serial number * length is greater than 0, add the serial number data * into the checksum as well. Once the inquiry and the * serial number check finish, we attempt to figure out * whether we still have the same device. */ if (((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) && ((softc->flags & PROBE_INQUIRY_CKSUM) == 0)) { MD5Init(&softc->context); MD5Update(&softc->context, (unsigned char *)inq_buf, sizeof(struct scsi_inquiry_data)); softc->flags |= PROBE_INQUIRY_CKSUM; if (periph->path->device->serial_num_len > 0) { MD5Update(&softc->context, periph->path->device->serial_num, periph->path->device->serial_num_len); softc->flags |= PROBE_SERIAL_CKSUM; } MD5Final(softc->digest, &softc->context); } if (softc->action == PROBE_INQUIRY) inquiry_len = SHORT_INQUIRY_LENGTH; else inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf); /* * Some parallel SCSI devices fail to send an * ignore wide residue message when dealing with * odd length inquiry requests. Round up to be * safe. */ inquiry_len = roundup2(inquiry_len, 2); if (softc->action == PROBE_INQUIRY_BASIC_DV1 || softc->action == PROBE_INQUIRY_BASIC_DV2) { inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT); } if (inq_buf == NULL) { xpt_print(periph->path, "malloc failure- skipping Basic" "Domain Validation\n"); PROBE_SET_ACTION(softc, PROBE_DV_EXIT); scsi_test_unit_ready(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, /*timeout*/60000); break; } scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)inq_buf, inquiry_len, /*evpd*/FALSE, /*page_code*/0, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } case PROBE_REPORT_LUNS: { void *rp; rp = malloc(periph->path->target->rpl_size, M_CAMXPT, M_NOWAIT | M_ZERO); if (rp == NULL) { struct scsi_inquiry_data *inq_buf; inq_buf = &periph->path->device->inq_data; xpt_print(periph->path, "Unable to alloc report luns storage\n"); if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); goto again; } scsi_report_luns(csio, 5, probedone, MSG_SIMPLE_Q_TAG, RPL_REPORT_DEFAULT, rp, periph->path->target->rpl_size, SSD_FULL_SIZE, 60000); break; break; } case PROBE_MODE_SENSE: { void *mode_buf; int mode_buf_len; mode_buf_len = sizeof(struct scsi_mode_header_6) + sizeof(struct scsi_mode_blk_desc) + sizeof(struct scsi_control_page); mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT); if (mode_buf != NULL) { scsi_mode_sense(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, /*dbd*/FALSE, SMS_PAGE_CTRL_CURRENT, SMS_CONTROL_MODE_PAGE, mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60000); break; } xpt_print(periph->path, "Unable to mode sense control page - " "malloc failure\n"); PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); } /* FALLTHROUGH */ case PROBE_SUPPORTED_VPD_LIST: { struct scsi_vpd_supported_page_list *vpd_list; struct cam_ed *device; vpd_list = NULL; device = periph->path->device; if ((SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOVPDS) == 0) vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT, M_NOWAIT | M_ZERO); if (vpd_list != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)vpd_list, sizeof(*vpd_list), /*evpd*/TRUE, SVPD_SUPPORTED_PAGE_LIST, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } done: /* * We'll have to do without, let our probedone * routine finish up for us. */ start_ccb->csio.data_ptr = NULL; cam_freeze_devq(periph->path); cam_periph_doacquire(periph); probedone(periph, start_ccb); return; } case PROBE_DEVICE_ID: { struct scsi_vpd_device_id *devid; devid = NULL; if (scsi_vpd_supported_page(periph, SVPD_DEVICE_ID)) devid = malloc(SVPD_DEVICE_ID_MAX_SIZE, M_CAMXPT, M_NOWAIT | M_ZERO); if (devid != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)devid, SVPD_DEVICE_ID_MAX_SIZE, /*evpd*/TRUE, SVPD_DEVICE_ID, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } goto done; } case PROBE_EXTENDED_INQUIRY: { struct scsi_vpd_extended_inquiry_data *ext_inq; ext_inq = NULL; if (scsi_vpd_supported_page(periph, SVPD_EXTENDED_INQUIRY_DATA)) ext_inq = malloc(sizeof(*ext_inq), M_CAMXPT, M_NOWAIT | M_ZERO); if (ext_inq != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)ext_inq, sizeof(*ext_inq), /*evpd*/TRUE, SVPD_EXTENDED_INQUIRY_DATA, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } /* * We'll have to do without, let our probedone * routine finish up for us. */ goto done; } case PROBE_SERIAL_NUM: { struct scsi_vpd_unit_serial_number *serial_buf; struct cam_ed* device; serial_buf = NULL; device = periph->path->device; if (device->serial_num != NULL) { free(device->serial_num, M_CAMXPT); device->serial_num = NULL; device->serial_num_len = 0; } if (scsi_vpd_supported_page(periph, SVPD_UNIT_SERIAL_NUMBER)) serial_buf = (struct scsi_vpd_unit_serial_number *) malloc(sizeof(*serial_buf), M_CAMXPT, M_NOWAIT|M_ZERO); if (serial_buf != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (u_int8_t *)serial_buf, sizeof(*serial_buf), /*evpd*/TRUE, SVPD_UNIT_SERIAL_NUMBER, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } goto done; } default: panic("probestart: invalid action state 0x%x\n", softc->action); } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; cam_periph_doacquire(periph); xpt_action(start_ccb); } static void proberequestdefaultnegotiation(struct cam_periph *periph) { struct ccb_trans_settings cts; xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { return; } cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); } /* * Backoff Negotiation Code- only pertinent for SPI devices. */ static int proberequestbackoff(struct cam_periph *periph, struct cam_ed *device) { struct ccb_trans_settings cts; struct ccb_trans_settings_spi *spi; memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { if (bootverbose) { xpt_print(periph->path, "failed to get current device settings\n"); } return (0); } if (cts.transport != XPORT_SPI) { if (bootverbose) { xpt_print(periph->path, "not SPI transport\n"); } return (0); } spi = &cts.xport_specific.spi; /* * We cannot renegotiate sync rate if we don't have one. */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { if (bootverbose) { xpt_print(periph->path, "no sync rate known\n"); } return (0); } /* * We'll assert that we don't have to touch PPR options- the * SIM will see what we do with period and offset and adjust * the PPR options as appropriate. */ /* * A sync rate with unknown or zero offset is nonsensical. * A sync period of zero means Async. */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0 || spi->sync_offset == 0 || spi->sync_period == 0) { if (bootverbose) { xpt_print(periph->path, "no sync rate available\n"); } return (0); } if (device->flags & CAM_DEV_DV_HIT_BOTTOM) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("hit async: giving up on DV\n")); return (0); } /* * Jump sync_period up by one, but stop at 5MHz and fall back to Async. * We don't try to remember 'last' settings to see if the SIM actually * gets into the speed we want to set. We check on the SIM telling * us that a requested speed is bad, but otherwise don't try and * check the speed due to the asynchronous and handshake nature * of speed setting. */ spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; for (;;) { spi->sync_period++; if (spi->sync_period >= 0xf) { spi->sync_period = 0; spi->sync_offset = 0; CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("setting to async for DV\n")); /* * Once we hit async, we don't want to try * any more settings. */ device->flags |= CAM_DEV_DV_HIT_BOTTOM; } else if (bootverbose) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("DV: period 0x%x\n", spi->sync_period)); printf("setting period to 0x%x\n", spi->sync_period); } cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { break; } CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("DV: failed to set period 0x%x\n", spi->sync_period)); if (spi->sync_period == 0) { return (0); } } return (1); } #define CCB_COMPLETED_OK(ccb) (((ccb).status & CAM_STATUS_MASK) == CAM_REQ_CMP) static void probedone(struct cam_periph *periph, union ccb *done_ccb) { probe_softc *softc; struct cam_path *path; struct scsi_inquiry_data *inq_buf; u_int32_t priority; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); softc = (probe_softc *)periph->softc; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; cam_periph_assert(periph, MA_OWNED); switch (softc->action) { case PROBE_TUR: { if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, SF_NO_PRINT, NULL) == ERESTART) { outr: /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } PROBE_SET_ACTION(softc, PROBE_INQUIRY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); out: /* Drop freeze taken due to CAM_DEV_QFREEZE and release. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_release_locked(periph); return; } case PROBE_INQUIRY: case PROBE_FULL_INQUIRY: { if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { u_int8_t periph_qual; path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; scsi_find_quirk(path->device); inq_buf = &path->device->inq_data; periph_qual = SID_QUAL(inq_buf); if (periph_qual == SID_QUAL_LU_CONNECTED || periph_qual == SID_QUAL_LU_OFFLINE) { u_int8_t len; /* * We conservatively request only * SHORT_INQUIRY_LEN bytes of inquiry * information during our first try * at sending an INQUIRY. If the device * has more information to give, * perform a second request specifying * the amount of information the device * is willing to give. */ len = inq_buf->additional_length + offsetof(struct scsi_inquiry_data, additional_length) + 1; if (softc->action == PROBE_INQUIRY && len > SHORT_INQUIRY_LENGTH) { PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } scsi_devise_transport(path); if (path->device->lun_id == 0 && SID_ANSI_REV(inq_buf) > SCSI_REV_SPC2 && (SCSI_QUIRK(path->device)->quirks & CAM_QUIRK_NORPTLUNS) == 0) { PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS); /* * Start with room for *one* lun. */ periph->path->target->rpl_size = 16; } else if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } else if (path->device->lun_id == 0 && SID_ANSI_REV(inq_buf) >= SCSI_REV_SPC2 && (SCSI_QUIRK(path->device)->quirks & CAM_QUIRK_NORPTLUNS) == 0) { PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS); periph->path->target->rpl_size = 16; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } } else if (cam_periph_error(done_ccb, 0, done_ccb->ccb_h.target_lun > 0 ? SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA, &softc->saved_ccb) == ERESTART) { goto outr; } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } path->device->flags &= ~CAM_DEV_INQUIRY_DATA_VALID; } /* * If we get to this point, we got an error status back * from the inquiry and the error status doesn't require * automatically retrying the command. Therefore, the * inquiry failed. If we had inquiry information before * for this device, but this latest inquiry command failed, * the device has probably gone away. If this device isn't * already marked unconfigured, notify the peripheral * drivers that this device is no more. */ if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) /* Send the async notification. */ xpt_async(AC_LOST_DEVICE, path, NULL); PROBE_SET_ACTION(softc, PROBE_INVALID); xpt_release_ccb(done_ccb); break; } case PROBE_REPORT_LUNS: { struct ccb_scsiio *csio; struct scsi_report_luns_data *lp; u_int nlun, maxlun; csio = &done_ccb->csio; lp = (struct scsi_report_luns_data *)csio->data_ptr; nlun = scsi_4btoul(lp->length) / 8; maxlun = (csio->dxfer_len / 8) - 1; if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, done_ccb->ccb_h.target_lun > 0 ? SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA, &softc->saved_ccb) == ERESTART) { goto outr; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { xpt_release_devq(done_ccb->ccb_h.path, 1, TRUE); } free(lp, M_CAMXPT); lp = NULL; } else if (nlun > maxlun) { /* * Reallocate and retry to cover all luns */ CAM_DEBUG(path, CAM_DEBUG_PROBE, ("Probe: reallocating REPORT_LUNS for %u luns\n", nlun)); free(lp, M_CAMXPT); path->target->rpl_size = (nlun << 3) + 8; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } else if (nlun == 0) { /* * If there don't appear to be any luns, bail. */ free(lp, M_CAMXPT); lp = NULL; } else { lun_id_t lun; int idx; CAM_DEBUG(path, CAM_DEBUG_PROBE, ("Probe: %u lun(s) reported\n", nlun)); CAM_GET_LUN(lp, 0, lun); /* * If the first lun is not lun 0, then either there * is no lun 0 in the list, or the list is unsorted. */ if (lun != 0) { for (idx = 0; idx < nlun; idx++) { CAM_GET_LUN(lp, idx, lun); if (lun == 0) { break; } } if (idx != nlun) { uint8_t tlun[8]; memcpy(tlun, lp->luns[0].lundata, 8); memcpy(lp->luns[0].lundata, lp->luns[idx].lundata, 8); memcpy(lp->luns[idx].lundata, tlun, 8); CAM_DEBUG(path, CAM_DEBUG_PROBE, ("lun 0 in position %u\n", idx)); } } /* * If we have an old lun list, We can either * retest luns that appear to have been dropped, * or just nuke them. We'll opt for the latter. * This function will also install the new list * in the target structure. */ probe_purge_old(path, lp, softc->flags); lp = NULL; } inq_buf = &path->device->inq_data; if (path->device->flags & CAM_DEV_INQUIRY_DATA_VALID && (SID_QUAL(inq_buf) == SID_QUAL_LU_CONNECTED || SID_QUAL(inq_buf) == SID_QUAL_LU_OFFLINE)) { if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } if (lp) { free(lp, M_CAMXPT); } PROBE_SET_ACTION(softc, PROBE_INVALID); xpt_release_ccb(done_ccb); break; } case PROBE_MODE_SENSE: { struct ccb_scsiio *csio; struct scsi_mode_header_6 *mode_hdr; csio = &done_ccb->csio; mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { struct scsi_control_page *page; u_int8_t *offset; offset = ((u_int8_t *)&mode_hdr[1]) + mode_hdr->blk_desc_len; page = (struct scsi_control_page *)offset; path->device->queue_flags = page->queue_flags; } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT, &softc->saved_ccb) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } xpt_release_ccb(done_ccb); free(mode_hdr, M_CAMXPT); PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); xpt_schedule(periph, priority); goto out; } case PROBE_SUPPORTED_VPD_LIST: { struct ccb_scsiio *csio; struct scsi_vpd_supported_page_list *page_list; csio = &done_ccb->csio; page_list = (struct scsi_vpd_supported_page_list *)csio->data_ptr; if (path->device->supported_vpds != NULL) { free(path->device->supported_vpds, M_CAMXPT); path->device->supported_vpds = NULL; path->device->supported_vpds_len = 0; } if (page_list == NULL) { /* * Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { /* Got vpd list */ path->device->supported_vpds_len = page_list->length + SVPD_SUPPORTED_PAGES_HDR_LEN; path->device->supported_vpds = (uint8_t *)page_list; xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_DEVICE_ID); xpt_schedule(periph, priority); goto out; } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT, &softc->saved_ccb) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } if (page_list) free(page_list, M_CAMXPT); /* No VPDs available, skip to device check. */ csio->data_ptr = NULL; goto probe_device_check; } case PROBE_DEVICE_ID: { struct scsi_vpd_device_id *devid; struct ccb_scsiio *csio; uint32_t length = 0; csio = &done_ccb->csio; devid = (struct scsi_vpd_device_id *)csio->data_ptr; /* Clean up from previous instance of this device */ if (path->device->device_id != NULL) { path->device->device_id_len = 0; free(path->device->device_id, M_CAMXPT); path->device->device_id = NULL; } if (devid == NULL) { /* Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { length = scsi_2btoul(devid->length); if (length != 0) { /* * NB: device_id_len is actual response * size, not buffer size. */ path->device->device_id_len = length + SVPD_DEVICE_ID_HDR_LEN; path->device->device_id = (uint8_t *)devid; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA, &softc->saved_ccb) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* Free the device id space if we don't use it */ if (devid && length == 0) free(devid, M_CAMXPT); xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_EXTENDED_INQUIRY); xpt_schedule(periph, priority); goto out; } case PROBE_EXTENDED_INQUIRY: { struct scsi_vpd_extended_inquiry_data *ext_inq; struct ccb_scsiio *csio; int32_t length = 0; csio = &done_ccb->csio; ext_inq = (struct scsi_vpd_extended_inquiry_data *) csio->data_ptr; if (path->device->ext_inq != NULL) { path->device->ext_inq_len = 0; free(path->device->ext_inq, M_CAMXPT); path->device->ext_inq = NULL; } if (ext_inq == NULL) { /* Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { length = scsi_2btoul(ext_inq->page_length) + __offsetof(struct scsi_vpd_extended_inquiry_data, flags1); length = min(length, sizeof(*ext_inq)); length -= csio->resid; if (length > 0) { path->device->ext_inq_len = length; path->device->ext_inq = (uint8_t *)ext_inq; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA, &softc->saved_ccb) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* Free the device id space if we don't use it */ if (ext_inq && length <= 0) free(ext_inq, M_CAMXPT); xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM); xpt_schedule(periph, priority); goto out; } probe_device_check: case PROBE_SERIAL_NUM: { struct ccb_scsiio *csio; struct scsi_vpd_unit_serial_number *serial_buf; u_int32_t priority; int changed; int have_serialnum; changed = 1; have_serialnum = 0; csio = &done_ccb->csio; priority = done_ccb->ccb_h.pinfo.priority; serial_buf = (struct scsi_vpd_unit_serial_number *)csio->data_ptr; if (serial_buf == NULL) { /* * Don't process the command as it was never sent */ } else if (cam_ccb_status(done_ccb) == CAM_REQ_CMP && (serial_buf->length > 0)) { have_serialnum = 1; path->device->serial_num = (u_int8_t *)malloc((serial_buf->length + 1), M_CAMXPT, M_NOWAIT); if (path->device->serial_num != NULL) { int start, slen; start = strspn(serial_buf->serial_num, " "); slen = serial_buf->length - start; if (slen <= 0) { /* * SPC5r05 says that an all-space serial * number means no product serial number * is available */ slen = 0; } memcpy(path->device->serial_num, &serial_buf->serial_num[start], slen); path->device->serial_num_len = slen; path->device->serial_num[slen] = '\0'; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT, &softc->saved_ccb) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* * Let's see if we have seen this device before. */ if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { MD5_CTX context; u_int8_t digest[16]; MD5Init(&context); MD5Update(&context, (unsigned char *)&path->device->inq_data, sizeof(struct scsi_inquiry_data)); if (have_serialnum) MD5Update(&context, path->device->serial_num, path->device->serial_num_len); MD5Final(digest, &context); if (bcmp(softc->digest, digest, 16) == 0) changed = 0; /* * XXX Do we need to do a TUR in order to ensure * that the device really hasn't changed??? */ if ((changed != 0) && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) xpt_async(AC_LOST_DEVICE, path, NULL); } if (serial_buf != NULL) free(serial_buf, M_CAMXPT); if (changed != 0) { /* * Now that we have all the necessary * information to safely perform transfer * negotiations... Controllers don't perform * any negotiation or tagged queuing until * after the first XPT_SET_TRAN_SETTINGS ccb is * received. So, on a new device, just retrieve * the user settings, and set them as the current * settings to set the device up. */ proberequestdefaultnegotiation(periph); xpt_release_ccb(done_ccb); /* * Perform a TUR to allow the controller to * perform any necessary transfer negotiation. */ PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION); xpt_schedule(periph, priority); goto out; } xpt_release_ccb(done_ccb); break; } case PROBE_TUR_FOR_NEGOTIATION: case PROBE_DV_EXIT: if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { cam_periph_error(done_ccb, 0, SF_NO_PRINT | SF_NO_RECOVERY | SF_NO_RETRY, NULL); } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* * Do Domain Validation for lun 0 on devices that claim * to support Synchronous Transfer modes. */ if (softc->action == PROBE_TUR_FOR_NEGOTIATION && done_ccb->ccb_h.target_lun == 0 && (path->device->inq_data.flags & SID_Sync) != 0 && (path->device->flags & CAM_DEV_IN_DV) == 0) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Begin Domain Validation\n")); path->device->flags |= CAM_DEV_IN_DV; xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV1); xpt_schedule(periph, priority); goto out; } if (softc->action == PROBE_DV_EXIT) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Leave Domain Validation\n")); } if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } path->device->flags &= ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM); if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { /* Inform the XPT that a new device has been found */ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); xpt_release_ccb(done_ccb); break; case PROBE_INQUIRY_BASIC_DV1: case PROBE_INQUIRY_BASIC_DV2: { struct scsi_inquiry_data *nbuf; struct ccb_scsiio *csio; if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { cam_periph_error(done_ccb, 0, SF_NO_PRINT | SF_NO_RECOVERY | SF_NO_RETRY, NULL); } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } csio = &done_ccb->csio; nbuf = (struct scsi_inquiry_data *)csio->data_ptr; if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) { xpt_print(path, "inquiry data fails comparison at DV%d step\n", softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2); if (proberequestbackoff(periph, path->device)) { path->device->flags &= ~CAM_DEV_IN_DV; PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION); } else { /* give up */ PROBE_SET_ACTION(softc, PROBE_DV_EXIT); } free(nbuf, M_CAMXPT); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } free(nbuf, M_CAMXPT); if (softc->action == PROBE_INQUIRY_BASIC_DV1) { PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV2); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } if (softc->action == PROBE_INQUIRY_BASIC_DV2) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Leave Domain Validation Successfully\n")); } if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } path->device->flags &= ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM); if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { /* Inform the XPT that a new device has been found */ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); xpt_release_ccb(done_ccb); break; } default: panic("probedone: invalid action state 0x%x\n", softc->action); } done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); done_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(done_ccb); if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_release_locked(periph); cam_periph_invalidate(periph); cam_periph_release_locked(periph); } else { probeschedule(periph); goto out; } } static void probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new, probe_flags flags) { struct cam_path *tp; struct scsi_report_luns_data *old; u_int idx1, idx2, nlun_old, nlun_new; lun_id_t this_lun; u_int8_t *ol, *nl; if (path->target == NULL) { return; } mtx_lock(&path->target->luns_mtx); old = path->target->luns; path->target->luns = new; mtx_unlock(&path->target->luns_mtx); if (old == NULL) return; nlun_old = scsi_4btoul(old->length) / 8; nlun_new = scsi_4btoul(new->length) / 8; /* * We are not going to assume sorted lists. Deal. */ for (idx1 = 0; idx1 < nlun_old; idx1++) { ol = old->luns[idx1].lundata; for (idx2 = 0; idx2 < nlun_new; idx2++) { nl = new->luns[idx2].lundata; if (memcmp(nl, ol, 8) == 0) { break; } } if (idx2 < nlun_new) { continue; } /* * An 'old' item not in the 'new' list. * Nuke it. Except that if it is lun 0, * that would be what the probe state * machine is currently working on, * so we won't do that. */ CAM_GET_LUN(old, idx1, this_lun); if (this_lun == 0) { continue; } /* * We also cannot nuke it if it is * not in a lun format we understand * and replace the LUN with a "simple" LUN * if that is all the HBA supports. */ if (!(flags & PROBE_EXTLUN)) { if (!CAM_CAN_GET_SIMPLE_LUN(old, idx1)) continue; CAM_GET_SIMPLE_LUN(old, idx1, this_lun); } if (xpt_create_path(&tp, NULL, xpt_path_path_id(path), xpt_path_target_id(path), this_lun) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, tp, NULL); xpt_free_path(tp); } } free(old, M_CAMXPT); } static void probecleanup(struct cam_periph *periph) { free(periph->softc, M_CAMXPT); } static void scsi_find_quirk(struct cam_ed *device) { struct scsi_quirk_entry *quirk; caddr_t match; match = cam_quirkmatch((caddr_t)&device->inq_data, (caddr_t)scsi_quirk_table, nitems(scsi_quirk_table), sizeof(*scsi_quirk_table), scsi_inquiry_match); if (match == NULL) panic("xpt_find_quirk: device didn't match wildcard entry!!"); quirk = (struct scsi_quirk_entry *)match; device->quirk = quirk; device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; } static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS) { int error, val; val = cam_srch_hi; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (val == 0 || val == 1) { cam_srch_hi = val; return (0); } else { return (EINVAL); } } typedef struct { union ccb *request_ccb; struct ccb_pathinq *cpi; int counter; int lunindex[0]; } scsi_scan_bus_info; /* * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. * As the scan progresses, scsi_scan_bus is used as the * callback on completion function. */ static void scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) { struct mtx *mtx; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("scsi_scan_bus\n")); switch (request_ccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_SCAN_TGT: { scsi_scan_bus_info *scan_info; union ccb *work_ccb, *reset_ccb; struct cam_path *path; u_int i; u_int low_target, max_target; u_int initiator_id; /* Find out the characteristics of the bus */ work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); return; } xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_PATH_INQ; xpt_action(work_ccb); if (work_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = work_ccb->ccb_h.status; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { /* * Can't scan the bus on an adapter that * cannot perform the initiator role. */ request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } /* We may need to reset bus first, if we haven't done it yet. */ if ((work_ccb->cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) && !(work_ccb->cpi.hba_misc & PIM_NOBUSRESET) && !timevalisset(&request_ccb->ccb_h.path->bus->last_reset) && (reset_ccb = xpt_alloc_ccb_nowait()) != NULL) { xpt_setup_ccb(&reset_ccb->ccb_h, request_ccb->ccb_h.path, CAM_PRIORITY_NONE); reset_ccb->ccb_h.func_code = XPT_RESET_BUS; xpt_action(reset_ccb); if (reset_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = reset_ccb->ccb_h.status; xpt_free_ccb(reset_ccb); xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } xpt_free_ccb(reset_ccb); } /* Save some state for use while we probe for devices */ scan_info = (scsi_scan_bus_info *) malloc(sizeof(scsi_scan_bus_info) + (work_ccb->cpi.max_target * sizeof (u_int)), M_CAMXPT, M_ZERO|M_NOWAIT); if (scan_info == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("SCAN start for %p\n", scan_info)); scan_info->request_ccb = request_ccb; scan_info->cpi = &work_ccb->cpi; /* Cache on our stack so we can work asynchronously */ max_target = scan_info->cpi->max_target; low_target = 0; initiator_id = scan_info->cpi->initiator_id; /* * We can scan all targets in parallel, or do it sequentially. */ if (request_ccb->ccb_h.func_code == XPT_SCAN_TGT) { max_target = low_target = request_ccb->ccb_h.target_id; scan_info->counter = 0; } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) { max_target = 0; scan_info->counter = 0; } else { scan_info->counter = scan_info->cpi->max_target + 1; if (scan_info->cpi->initiator_id < scan_info->counter) { scan_info->counter--; } } mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); mtx_unlock(mtx); for (i = low_target; i <= max_target; i++) { cam_status status; if (i == initiator_id) continue; status = xpt_create_path(&path, NULL, request_ccb->ccb_h.path_id, i, 0); if (status != CAM_REQ_CMP) { printf("scsi_scan_bus: xpt_create_path failed" " with status %#x, bus scan halted\n", status); free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = status; xpt_free_ccb(work_ccb); xpt_done(request_ccb); break; } work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { xpt_free_ccb((union ccb *)scan_info->cpi); free(scan_info, M_CAMXPT); xpt_free_path(path); request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); break; } xpt_setup_ccb(&work_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_SCAN_LUN; work_ccb->ccb_h.cbfcnp = scsi_scan_bus; work_ccb->ccb_h.flags |= CAM_UNLOCKED; work_ccb->ccb_h.ppriv_ptr0 = scan_info; work_ccb->crcn.flags = request_ccb->crcn.flags; xpt_action(work_ccb); } mtx_lock(mtx); break; } case XPT_SCAN_LUN: { cam_status status; struct cam_path *path, *oldpath; scsi_scan_bus_info *scan_info; struct cam_et *target; struct cam_ed *device, *nextdev; int next_target; path_id_t path_id; target_id_t target_id; lun_id_t lun_id; oldpath = request_ccb->ccb_h.path; status = cam_ccb_status(request_ccb); scan_info = (scsi_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; path_id = request_ccb->ccb_h.path_id; target_id = request_ccb->ccb_h.target_id; lun_id = request_ccb->ccb_h.target_lun; target = request_ccb->ccb_h.path->target; next_target = 1; mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); mtx_lock(mtx); mtx_lock(&target->luns_mtx); if (target->luns) { lun_id_t first; u_int nluns = scsi_4btoul(target->luns->length) / 8; /* * Make sure we skip over lun 0 if it's the first member * of the list as we've actually just finished probing * it. */ CAM_GET_LUN(target->luns, 0, first); if (first == 0 && scan_info->lunindex[target_id] == 0) { scan_info->lunindex[target_id]++; } /* * Skip any LUNs that the HBA can't deal with. */ while (scan_info->lunindex[target_id] < nluns) { if (scan_info->cpi->hba_misc & PIM_EXTLUNS) { CAM_GET_LUN(target->luns, scan_info->lunindex[target_id], lun_id); break; } if (CAM_CAN_GET_SIMPLE_LUN(target->luns, scan_info->lunindex[target_id])) { CAM_GET_SIMPLE_LUN(target->luns, scan_info->lunindex[target_id], lun_id); break; } scan_info->lunindex[target_id]++; } if (scan_info->lunindex[target_id] < nluns) { mtx_unlock(&target->luns_mtx); next_target = 0; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("next lun to try at index %u is %jx\n", scan_info->lunindex[target_id], (uintmax_t)lun_id)); scan_info->lunindex[target_id]++; } else { mtx_unlock(&target->luns_mtx); /* We're done with scanning all luns. */ } } else { mtx_unlock(&target->luns_mtx); device = request_ccb->ccb_h.path->device; /* Continue sequential LUN scan if: */ /* -- we have more LUNs that need recheck */ mtx_lock(&target->bus->eb_mtx); nextdev = device; while ((nextdev = TAILQ_NEXT(nextdev, links)) != NULL) if ((nextdev->flags & CAM_DEV_UNCONFIGURED) == 0) break; mtx_unlock(&target->bus->eb_mtx); if (nextdev != NULL) { next_target = 0; /* -- stop if CAM_QUIRK_NOLUNS is set. */ } else if (SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOLUNS) { next_target = 1; /* -- this LUN is connected and its SCSI version * allows more LUNs. */ } else if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) { if (lun_id < (CAM_SCSI2_MAXLUN-1) || CAN_SRCH_HI_DENSE(device)) next_target = 0; /* -- this LUN is disconnected, its SCSI version * allows more LUNs and we guess they may be. */ } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) { if (lun_id < (CAM_SCSI2_MAXLUN-1) || CAN_SRCH_HI_SPARSE(device)) next_target = 0; } if (next_target == 0) { lun_id++; if (lun_id > scan_info->cpi->max_lun) next_target = 1; } } /* * Check to see if we scan any further luns. */ if (next_target) { int done; /* * Free the current request path- we're done with it. */ xpt_free_path(oldpath); hop_again: done = 0; if (scan_info->request_ccb->ccb_h.func_code == XPT_SCAN_TGT) { done = 1; } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) { scan_info->counter++; if (scan_info->counter == scan_info->cpi->initiator_id) { scan_info->counter++; } if (scan_info->counter >= scan_info->cpi->max_target+1) { done = 1; } } else { scan_info->counter--; if (scan_info->counter == 0) { done = 1; } } if (done) { mtx_unlock(mtx); xpt_free_ccb(request_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("SCAN done for %p\n", scan_info)); free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(request_ccb); break; } if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) { mtx_unlock(mtx); xpt_free_ccb(request_ccb); break; } status = xpt_create_path(&path, NULL, scan_info->request_ccb->ccb_h.path_id, scan_info->counter, 0); if (status != CAM_REQ_CMP) { mtx_unlock(mtx); printf("scsi_scan_bus: xpt_create_path failed" " with status %#x, bus scan halted\n", status); xpt_free_ccb(request_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = status; xpt_done(request_ccb); break; } xpt_setup_ccb(&request_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.cbfcnp = scsi_scan_bus; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.ppriv_ptr0 = scan_info; request_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; } else { status = xpt_create_path(&path, NULL, path_id, target_id, lun_id); /* * Free the old request path- we're done with it. We * do this *after* creating the new path so that * we don't remove a target that has our lun list * in the case that lun 0 is not present. */ xpt_free_path(oldpath); if (status != CAM_REQ_CMP) { printf("scsi_scan_bus: xpt_create_path failed " "with status %#x, halting LUN scan\n", status); goto hop_again; } xpt_setup_ccb(&request_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.cbfcnp = scsi_scan_bus; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.ppriv_ptr0 = scan_info; request_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; } mtx_unlock(mtx); xpt_action(request_ccb); break; } default: break; } } static void scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *request_ccb) { struct ccb_pathinq cpi; cam_status status; struct cam_path *new_path; struct cam_periph *old_periph; int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("scsi_scan_lun\n")); xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status != CAM_REQ_CMP) { if (request_ccb != NULL) { request_ccb->ccb_h.status = cpi.ccb_h.status; xpt_done(request_ccb); } return; } if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { /* * Can't scan the bus on an adapter that * cannot perform the initiator role. */ if (request_ccb != NULL) { request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(request_ccb); } return; } if (request_ccb == NULL) { request_ccb = xpt_alloc_ccb_nowait(); if (request_ccb == NULL) { xpt_print(path, "scsi_scan_lun: can't allocate CCB, " "can't continue\n"); return; } status = xpt_create_path(&new_path, NULL, path->bus->path_id, path->target->target_id, path->device->lun_id); if (status != CAM_REQ_CMP) { xpt_print(path, "scsi_scan_lun: can't create path, " "can't continue\n"); xpt_free_ccb(request_ccb); return; } xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT); request_ccb->ccb_h.cbfcnp = xptscandone; request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->crcn.flags = flags; } lock = (xpt_path_owned(path) == 0); if (lock) xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "probe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { probe_softc *softc; softc = (probe_softc *)old_periph->softc; TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); } else { request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(request_ccb); } } else { status = cam_periph_alloc(proberegister, NULL, probecleanup, probestart, "probe", CAM_PERIPH_BIO, request_ccb->ccb_h.path, NULL, 0, request_ccb); if (status != CAM_REQ_CMP) { xpt_print(path, "scsi_scan_lun: cam_alloc_periph " "returned an error, can't continue probe\n"); request_ccb->ccb_h.status = status; xpt_done(request_ccb); } } if (lock) xpt_path_unlock(path); } static void xptscandone(struct cam_periph *periph, union ccb *done_ccb) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } static struct cam_ed * scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct scsi_quirk_entry *quirk; struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); /* * Take the default quirk entry until we have inquiry * data and can determine a better quirk to use. */ quirk = &scsi_quirk_table[nitems(scsi_quirk_table) - 1]; device->quirk = (void *)quirk; device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; bzero(&device->inq_data, sizeof(device->inq_data)); device->inq_flags = 0; device->queue_flags = 0; device->serial_num = NULL; device->serial_num_len = 0; device->device_id = NULL; device->device_id_len = 0; device->supported_vpds = NULL; device->supported_vpds_len = 0; return (device); } static void scsi_devise_transport(struct cam_path *path) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct scsi_inquiry_data *inq_buf; /* Get transport information from the SIM */ xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); inq_buf = NULL; if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) inq_buf = &path->device->inq_data; path->device->protocol = PROTO_SCSI; path->device->protocol_version = inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version; path->device->transport = cpi.transport; path->device->transport_version = cpi.transport_version; /* * Any device not using SPI3 features should * be considered SPI2 or lower. */ if (inq_buf != NULL) { if (path->device->transport == XPORT_SPI && (inq_buf->spi3data & SID_SPI_MASK) == 0 && path->device->transport_version > 2) path->device->transport_version = 2; } else { struct cam_ed* otherdev; for (otherdev = TAILQ_FIRST(&path->target->ed_entries); otherdev != NULL; otherdev = TAILQ_NEXT(otherdev, links)) { if (otherdev != path->device) break; } if (otherdev != NULL) { /* * Initially assume the same versioning as * prior luns for this target. */ path->device->protocol_version = otherdev->protocol_version; path->device->transport_version = otherdev->transport_version; } else { /* Until we know better, opt for safety */ path->device->protocol_version = 2; if (path->device->transport == XPORT_SPI) path->device->transport_version = 2; else path->device->transport_version = 0; } } /* * XXX * For a device compliant with SPC-2 we should be able * to determine the transport version supported by * scrutinizing the version descriptors in the * inquiry buffer. */ /* Tell the controller what we think */ xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.transport = path->device->transport; cts.transport_version = path->device->transport_version; cts.protocol = path->device->protocol; cts.protocol_version = path->device->protocol_version; cts.proto_specific.valid = 0; cts.xport_specific.valid = 0; xpt_action((union ccb *)&cts); } static void scsi_dev_advinfo(union ccb *start_ccb) { struct cam_ed *device; struct ccb_dev_advinfo *cdai; off_t amt; start_ccb->ccb_h.status = CAM_REQ_INVALID; device = start_ccb->ccb_h.path->device; cdai = &start_ccb->cdai; switch(cdai->buftype) { case CDAI_TYPE_SCSI_DEVID: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->device_id_len; if (device->device_id_len == 0) break; amt = device->device_id_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->device_id, amt); break; case CDAI_TYPE_SERIAL_NUM: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->serial_num_len; if (device->serial_num_len == 0) break; amt = device->serial_num_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->serial_num, amt); break; case CDAI_TYPE_PHYS_PATH: if (cdai->flags & CDAI_FLAG_STORE) { if (device->physpath != NULL) { free(device->physpath, M_CAMXPT); device->physpath = NULL; device->physpath_len = 0; } /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->physpath == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } device->physpath_len = cdai->bufsiz; memcpy(device->physpath, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->physpath_len; if (device->physpath_len == 0) break; amt = device->physpath_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->physpath, amt); } break; case CDAI_TYPE_RCAPLONG: if (cdai->flags & CDAI_FLAG_STORE) { if (device->rcap_buf != NULL) { free(device->rcap_buf, M_CAMXPT); device->rcap_buf = NULL; } device->rcap_len = cdai->bufsiz; /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->rcap_buf = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->rcap_buf == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } memcpy(device->rcap_buf, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->rcap_len; if (device->rcap_len == 0) break; amt = device->rcap_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->rcap_buf, amt); } break; case CDAI_TYPE_EXT_INQ: /* * We fetch extended inquiry data during probe, if * available. We don't allow changing it. */ if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->ext_inq_len; if (device->ext_inq_len == 0) break; amt = device->ext_inq_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->ext_inq, amt); break; default: return; } start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); } } static void scsi_action(union ccb *start_ccb) { switch (start_ccb->ccb_h.func_code) { case XPT_SET_TRAN_SETTINGS: { scsi_set_transfer_settings(&start_ccb->cts, start_ccb->ccb_h.path, /*async_update*/FALSE); break; } case XPT_SCAN_BUS: case XPT_SCAN_TGT: scsi_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); break; case XPT_SCAN_LUN: scsi_scan_lun(start_ccb->ccb_h.path->periph, start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; case XPT_DEV_ADVINFO: { scsi_dev_advinfo(start_ccb); break; } default: xpt_action_default(start_ccb); break; } } static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update) { struct ccb_pathinq cpi; struct ccb_trans_settings cur_cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_scsi *cur_scsi; struct scsi_inquiry_data *inq_data; struct cam_ed *device; if (path == NULL || (device = path->device) == NULL) { cts->ccb_h.status = CAM_PATH_INVALID; xpt_done((union ccb *)cts); return; } if (cts->protocol == PROTO_UNKNOWN || cts->protocol == PROTO_UNSPECIFIED) { cts->protocol = device->protocol; cts->protocol_version = device->protocol_version; } if (cts->protocol_version == PROTO_VERSION_UNKNOWN || cts->protocol_version == PROTO_VERSION_UNSPECIFIED) cts->protocol_version = device->protocol_version; if (cts->protocol != device->protocol) { xpt_print(path, "Uninitialized Protocol %x:%x?\n", cts->protocol, device->protocol); cts->protocol = device->protocol; } if (cts->protocol_version > device->protocol_version) { if (bootverbose) { xpt_print(path, "Down reving Protocol " "Version from %d to %d?\n", cts->protocol_version, device->protocol_version); } cts->protocol_version = device->protocol_version; } if (cts->transport == XPORT_UNKNOWN || cts->transport == XPORT_UNSPECIFIED) { cts->transport = device->transport; cts->transport_version = device->transport_version; } if (cts->transport_version == XPORT_VERSION_UNKNOWN || cts->transport_version == XPORT_VERSION_UNSPECIFIED) cts->transport_version = device->transport_version; if (cts->transport != device->transport) { xpt_print(path, "Uninitialized Transport %x:%x?\n", cts->transport, device->transport); cts->transport = device->transport; } if (cts->transport_version > device->transport_version) { if (bootverbose) { xpt_print(path, "Down reving Transport " "Version from %d to %d?\n", cts->transport_version, device->transport_version); } cts->transport_version = device->transport_version; } /* * Nothing more of interest to do unless * this is a device connected via the * SCSI protocol. */ if (cts->protocol != PROTO_SCSI) { if (async_update == FALSE) xpt_action_default((union ccb *)cts); return; } inq_data = &device->inq_data; scsi = &cts->proto_specific.scsi; xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* SCSI specific sanity checking */ if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 || (device->mintags == 0)) { /* * Can't tag on hardware that doesn't support tags, * doesn't have it enabled, or has broken tag support. */ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } if (async_update == FALSE) { /* * Perform sanity checking against what the * controller and device can do. */ xpt_setup_ccb(&cur_cts.ccb_h, path, CAM_PRIORITY_NONE); cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cur_cts.type = cts->type; xpt_action((union ccb *)&cur_cts); if (cam_ccb_status((union ccb *)&cur_cts) != CAM_REQ_CMP) { return; } cur_scsi = &cur_cts.proto_specific.scsi; if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) { scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB; } if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0) scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } /* SPI specific sanity checking */ if (cts->transport == XPORT_SPI && async_update == FALSE) { u_int spi3caps; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings_spi *cur_spi; spi = &cts->xport_specific.spi; cur_spi = &cur_cts.xport_specific.spi; /* Fill in any gaps in what the user gave us */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = cur_spi->sync_period; if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = 0; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = cur_spi->sync_offset; if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = 0; if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) spi->ppr_options = cur_spi->ppr_options; if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) spi->ppr_options = 0; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) spi->bus_width = cur_spi->bus_width; if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) spi->bus_width = 0; if ((spi->valid & CTS_SPI_VALID_DISC) == 0) { spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB; } if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0) spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 && (inq_data->flags & SID_Sync) == 0 && cts->type == CTS_TYPE_CURRENT_SETTINGS) || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) { /* Force async */ spi->sync_period = 0; spi->sync_offset = 0; } switch (spi->bus_width) { case MSG_EXT_WDTR_BUS_32_BIT: if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 || (inq_data->flags & SID_WBus32) != 0 || cts->type == CTS_TYPE_USER_SETTINGS) && (cpi.hba_inquiry & PI_WIDE_32) != 0) break; /* Fall Through to 16-bit */ case MSG_EXT_WDTR_BUS_16_BIT: if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 || (inq_data->flags & SID_WBus16) != 0 || cts->type == CTS_TYPE_USER_SETTINGS) && (cpi.hba_inquiry & PI_WIDE_16) != 0) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* Fall Through to 8-bit */ default: /* New bus width?? */ case MSG_EXT_WDTR_BUS_8_BIT: /* All targets can do this */ spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } spi3caps = cpi.xport_specific.spi.ppr_options; if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 && cts->type == CTS_TYPE_CURRENT_SETTINGS) spi3caps &= inq_data->spi3data; if ((spi3caps & SID_SPI_CLOCK_DT) == 0) spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; if ((spi3caps & SID_SPI_IUS) == 0) spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; if ((spi3caps & SID_SPI_QAS) == 0) spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ; /* No SPI Transfer settings are allowed unless we are wide */ if (spi->bus_width == 0) spi->ppr_options = 0; if ((spi->valid & CTS_SPI_VALID_DISC) && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) { /* * Can't tag queue without disconnection. */ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; scsi->valid |= CTS_SCSI_VALID_TQ; } /* * If we are currently performing tagged transactions to * this device and want to change its negotiation parameters, * go non-tagged for a bit to give the controller a chance to * negotiate unhampered by tag messages. */ if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (device->inq_flags & SID_CmdQue) != 0 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE| CTS_SPI_VALID_SYNC_OFFSET| CTS_SPI_VALID_BUS_WIDTH)) != 0) scsi_toggle_tags(path); } if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) { int device_tagenb; /* * If we are transitioning from tags to no-tags or * vice-versa, we need to carefully freeze and restart * the queue so that we don't overlap tagged and non-tagged * commands. We also temporarily stop tags if there is * a change in transfer negotiation settings to allow * "tag-less" negotiation. */ if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (device->inq_flags & SID_CmdQue) != 0) device_tagenb = TRUE; else device_tagenb = FALSE; if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 && device_tagenb == FALSE) || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0 && device_tagenb == TRUE)) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) { /* * Delay change to use tags until after a * few commands have gone to this device so * the controller has time to perform transfer * negotiations without tagged messages getting * in the way. */ device->tag_delay_count = CAM_TAG_DELAY_COUNT; device->flags |= CAM_DEV_TAG_AFTER_COUNT; } else { xpt_stop_tags(path); } } } if (async_update == FALSE) xpt_action_default((union ccb *)cts); } static void scsi_toggle_tags(struct cam_path *path) { struct cam_ed *dev; /* * Give controllers a chance to renegotiate * before starting tag operations. We * "toggle" tagged queuing off then on * which causes the tag enable command delay * counter to come into effect. */ dev = path->device; if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || ((dev->inq_flags & SID_CmdQue) != 0 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) { struct ccb_trans_settings cts; xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.protocol = PROTO_SCSI; cts.protocol_version = PROTO_VERSION_UNSPECIFIED; cts.transport = XPORT_UNSPECIFIED; cts.transport_version = XPORT_VERSION_UNSPECIFIED; cts.proto_specific.scsi.flags = 0; cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; scsi_set_transfer_settings(&cts, path, /*async_update*/TRUE); cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; scsi_set_transfer_settings(&cts, path, /*async_update*/TRUE); } } /* * Handle any per-device event notifications that require action by the XPT. */ static void scsi_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { cam_status status; struct cam_path newpath; /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; /* * We need our own path with wildcards expanded to * handle certain types of events. */ if ((async_code == AC_SENT_BDR) || (async_code == AC_BUS_RESET) || (async_code == AC_INQ_CHANGED)) status = xpt_compile_path(&newpath, NULL, bus->path_id, target->target_id, device->lun_id); else status = CAM_REQ_CMP_ERR; if (status == CAM_REQ_CMP) { /* * Allow transfer negotiation to occur in a * tag free environment and after settle delay. */ if (async_code == AC_SENT_BDR || async_code == AC_BUS_RESET) { cam_freeze_devq(&newpath); cam_release_devq(&newpath, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/scsi_delay, /*getcount_only*/0); scsi_toggle_tags(&newpath); } if (async_code == AC_INQ_CHANGED) { /* * We've sent a start unit command, or * something similar to a device that * may have caused its inquiry data to * change. So we re-scan the device to * refresh the inquiry data for it. */ scsi_scan_lun(newpath.periph, &newpath, CAM_EXPECT_INQ_CHANGE, NULL); } xpt_release_path(&newpath); } else if (async_code == AC_LOST_DEVICE && (device->flags & CAM_DEV_UNCONFIGURED) == 0) { device->flags |= CAM_DEV_UNCONFIGURED; xpt_release_device(device); } else if (async_code == AC_TRANSFER_NEG) { struct ccb_trans_settings *settings; struct cam_path path; settings = (struct ccb_trans_settings *)async_arg; xpt_compile_path(&path, NULL, bus->path_id, target->target_id, device->lun_id); scsi_set_transfer_settings(settings, &path, /*async_update*/TRUE); xpt_release_path(&path); } } static void _scsi_announce_periph(struct cam_periph *periph, u_int *speed, u_int *freq, struct ccb_trans_settings *cts) { struct ccb_pathinq cpi; struct cam_path *path = periph->path; cam_periph_assert(periph, MA_OWNED); xpt_setup_ccb(&cts->ccb_h, path, CAM_PRIORITY_NORMAL); cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts->type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)cts); if (cam_ccb_status((union ccb *)cts) != CAM_REQ_CMP) return; /* Ask the SIM for its base transfer speed */ xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* Report connection speed */ *speed = cpi.base_transfer_speed; *freq = 0; if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0 && spi->sync_offset != 0) { *freq = scsi_calc_syncsrate(spi->sync_period); *speed = *freq; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) *speed *= (0x01 << spi->bus_width); } if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; if (fc->valid & CTS_FC_VALID_SPEED) *speed = fc->bitrate; } if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) *speed = sas->bitrate; } } static void scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) { struct ccb_trans_settings cts; u_int speed, freq, mb; _scsi_announce_periph(periph, &speed, &freq, &cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) sbuf_printf(sb, "%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else sbuf_printf(sb, "%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about SPI connections */ if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi; spi = &cts.xport_specific.spi; if (freq != 0) { sbuf_printf(sb, " (%d.%03dMHz%s, offset %d", freq / 1000, freq % 1000, (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 ? " DT" : "", spi->sync_offset); } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 && spi->bus_width > 0) { if (freq != 0) { sbuf_printf(sb, ", "); } else { sbuf_printf(sb, " ("); } sbuf_printf(sb, "%dbit)", 8 * (0x01 << spi->bus_width)); } else if (freq != 0) { sbuf_printf(sb, ")"); } } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc; fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) sbuf_printf(sb, " WWNN 0x%llx", (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) sbuf_printf(sb, " WWPN 0x%llx", (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) sbuf_printf(sb, " PortID 0x%x", fc->port); } sbuf_printf(sb, "\n"); } static void scsi_announce_periph(struct cam_periph *periph) { struct ccb_trans_settings cts; u_int speed, freq, mb; _scsi_announce_periph(periph, &speed, &freq, &cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) printf("%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else printf("%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about SPI connections */ if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi; spi = &cts.xport_specific.spi; if (freq != 0) { printf(" (%d.%03dMHz%s, offset %d", freq / 1000, freq % 1000, (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 ? " DT" : "", spi->sync_offset); } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 && spi->bus_width > 0) { if (freq != 0) { printf(", "); } else { printf(" ("); } printf("%dbit)", 8 * (0x01 << spi->bus_width)); } else if (freq != 0) { printf(")"); } } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc; fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) printf(" WWNN 0x%llx", (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) printf(" WWPN 0x%llx", (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) printf(" PortID 0x%x", fc->port); } printf("\n"); } static void scsi_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) { scsi_print_inquiry_sbuf(sb, &device->inq_data); } static void scsi_proto_announce(struct cam_ed *device) { scsi_print_inquiry(&device->inq_data); } static void scsi_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) { scsi_print_inquiry_short_sbuf(sb, &device->inq_data); } static void scsi_proto_denounce(struct cam_ed *device) { scsi_print_inquiry_short(&device->inq_data); } static void scsi_proto_debug_out(union ccb *ccb) { char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; struct cam_ed *device; if (ccb->ccb_h.func_code != XPT_SCSI_IO) return; device = ccb->ccb_h.path->device; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_CDB,("%s. CDB: %s\n", scsi_op_desc(scsiio_cdb_ptr(&ccb->csio)[0], &device->inq_data), scsi_cdb_string(scsiio_cdb_ptr(&ccb->csio), cdb_str, sizeof(cdb_str)))); } Index: head/sys/cam/scsi/smp_all.c =================================================================== --- head/sys/cam/scsi/smp_all.c (revision 326264) +++ head/sys/cam/scsi/smp_all.c (revision 326265) @@ -1,619 +1,621 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2010 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test/sys/cam/scsi/smp_all.c#4 $ */ /* * Serial Management Protocol helper functions. */ #include __FBSDID("$FreeBSD$"); #include #include #ifdef _KERNEL #include #include #include #else /* _KERNEL */ #include #include #include #include #include #endif /* _KERNEL */ #include #include #include #include #include #ifndef _KERNEL #include #endif static char *smp_yesno(int val); static char * smp_yesno(int val) { char *str; if (val) str = "Yes"; else str = "No"; return (str); } struct smp_error_table_entry { uint8_t function_result; const char *desc; }; /* List current as of SPL Revision 7 */ static struct smp_error_table_entry smp_error_table[] = { {SMP_FR_ACCEPTED, "SMP Function Accepted"}, {SMP_FR_UNKNOWN_FUNC, "Unknown SMP Function"}, {SMP_FR_FUNCTION_FAILED, "SMP Function Failed"}, {SMP_FR_INVALID_REQ_FRAME_LEN, "Invalid Request Frame Length"}, {SMP_FR_INVALID_EXP_CHG_CNT, "Invalid Expander Change Count"}, {SMP_FR_BUSY, "Busy"}, {SMP_FR_INCOMPLETE_DESC_LIST, "Incomplete Descriptor List"}, {SMP_FR_PHY_DOES_NOT_EXIST, "Phy Does Not Exist"}, {SMP_FR_INDEX_DOES_NOT_EXIST, "Index Does Not Exist"}, {SMP_FR_PHY_DOES_NOT_SUP_SATA, "Phy Does Not Support SATA"}, {SMP_FR_UNKNOWN_PHY_OP, "Unknown Phy Operation"}, {SMP_FR_UNKNOWN_PHY_TEST_FUNC, "Unknown Phy Test Function"}, {SMP_FR_PHY_TEST_FUNC_INPROG, "Phy Test Function In Progress"}, {SMP_FR_PHY_VACANT, "Phy Vacant"}, {SMP_FR_UNKNOWN_PHY_EVENT_SRC, "Unknown Phy Event Source"}, {SMP_FR_UNKNOWN_DESC_TYPE, "Unknown Descriptor Type"}, {SMP_FR_UNKNOWN_PHY_FILTER, "Unknown Phy Filter"}, {SMP_FR_AFFILIATION_VIOLATION, "Affiliation Violation"}, {SMP_FR_SMP_ZONE_VIOLATION, "SMP Zone Violation"}, {SMP_FR_NO_MGMT_ACCESS_RIGHTS, "No Management Access Rights"}, {SMP_FR_UNKNOWN_ED_ZONING_VAL, "Unknown Enable Disable Zoning Value"}, {SMP_FR_ZONE_LOCK_VIOLATION, "Zone Lock Violation"}, {SMP_FR_NOT_ACTIVATED, "Not Activated"}, {SMP_FR_ZG_OUT_OF_RANGE, "Zone Group Out of Range"}, {SMP_FR_NO_PHYS_PRESENCE, "No Physical Presence"}, {SMP_FR_SAVING_NOT_SUP, "Saving Not Supported"}, {SMP_FR_SRC_ZONE_DNE, "Source Zone Group Does Not Exist"}, {SMP_FR_DISABLED_PWD_NOT_SUP, "Disabled Password Not Supported"} }; const char * smp_error_desc(int function_result) { int i; for (i = 0; i < nitems(smp_error_table); i++){ if (function_result == smp_error_table[i].function_result) return (smp_error_table[i].desc); } return ("Reserved Function Result"); } /* List current as of SPL Revision 7 */ struct smp_cmd_table_entry { uint8_t cmd_num; const char *desc; } smp_cmd_table[] = { {SMP_FUNC_REPORT_GENERAL, "REPORT GENERAL"}, {SMP_FUNC_REPORT_MANUF_INFO, "REPORT MANUFACTURER INFORMATION"}, {SMP_FUNC_REPORT_SC_STATUS, "REPORT SELF-CONFIGURATION STATUS"}, {SMP_FUNC_REPORT_ZONE_PERM_TBL, "REPORT ZONE PERMISSION TABLE"}, {SMP_FUNC_REPORT_BROADCAST, "REPORT BROADCAST"}, {SMP_FUNC_DISCOVER, "DISCOVER"}, {SMP_FUNC_REPORT_PHY_ERR_LOG, "REPORT PHY ERROR LOG"}, {SMP_FUNC_REPORT_PHY_SATA, "REPORT PHY SATA"}, {SMP_FUNC_REPORT_ROUTE_INFO, "REPORT ROUTE INFORMATION"}, {SMP_FUNC_REPORT_PHY_EVENT, "REPORT PHY EVENT"}, {SMP_FUNC_DISCOVER_LIST, "DISCOVER LIST"}, {SMP_FUNC_REPORT_PHY_EVENT_LIST, "REPORT PHY EVENT LIST"}, {SMP_FUNC_REPORT_EXP_RTL, "REPORT EXPANDER ROUTE TABLE LIST"}, {SMP_FUNC_CONFIG_GENERAL, "CONFIGURE GENERAL"}, {SMP_FUNC_ENABLE_DISABLE_ZONING, "ENABLE DISABLE ZONING"}, {SMP_FUNC_ZONED_BROADCAST, "ZONED BROADCAST"}, {SMP_FUNC_ZONE_LOCK, "ZONE LOCK"}, {SMP_FUNC_ZONE_ACTIVATE, "ZONE ACTIVATE"}, {SMP_FUNC_ZONE_UNLOCK, "ZONE UNLOCK"}, {SMP_FUNC_CONFIG_ZM_PWD, "CONFIGURE ZONE MANAGER PASSWORD"}, {SMP_FUNC_CONFIG_ZONE_PHY_INFO, "CONFIGURE ZONE PHY INFORMATION"}, {SMP_FUNC_CONFIG_ZONE_PERM_TBL, "CONFIGURE ZONE PERMISSION TABLE"}, {SMP_FUNC_CONFIG_ROUTE_INFO, "CONFIGURE ROUTE INFORMATION"}, {SMP_FUNC_PHY_CONTROL, "PHY CONTROL"}, {SMP_FUNC_PHY_TEST_FUNC, "PHY TEST FUNCTION"}, {SMP_FUNC_CONFIG_PHY_EVENT, "CONFIGURE PHY EVENT"} }; const char * smp_command_desc(uint8_t cmd_num) { int i; for (i = 0; i < nitems(smp_cmd_table) && smp_cmd_table[i].cmd_num <= cmd_num; i++) { if (cmd_num == smp_cmd_table[i].cmd_num) return (smp_cmd_table[i].desc); } /* * 0x40 to 0x7f and 0xc0 to 0xff are the vendor specific SMP * command ranges. */ if (((cmd_num >= 0x40) && (cmd_num <= 0x7f)) || (cmd_num >= 0xc0)) { return ("Vendor Specific SMP Command"); } else { return ("Unknown SMP Command"); } } /* * Decode a SMP request buffer into a string of hexadecimal numbers. * * smp_request: SMP request * request_len: length of the SMP request buffer, may be reduced if the * caller only wants part of the buffer printed * sb: sbuf(9) buffer * line_prefix: prefix for new lines, or an empty string ("") * first_line_len: length left on first line * line_len: total length of subsequent lines, 0 for no additional lines * if there are no additional lines, first line will get ... * at the end if there is additional data */ void smp_command_decode(uint8_t *smp_request, int request_len, struct sbuf *sb, char *line_prefix, int first_line_len, int line_len) { int i, cur_len; for (i = 0, cur_len = first_line_len; i < request_len; i++) { /* * Each byte takes 3 characters. As soon as we go less * than 6 (meaning we have at least 3 and at most 5 * characters left), check to see whether the subsequent * line length (line_len) is long enough to bother with. * If the user set it to 0, or some other length that isn't * enough to hold at least the prefix and one byte, put ... * on the first line to indicate that there is more data * and bail out. */ if ((cur_len < 6) && (line_len < (strlen(line_prefix) + 3))) { sbuf_printf(sb, "..."); return; } if (cur_len < 3) { sbuf_printf(sb, "\n%s", line_prefix); cur_len = line_len - strlen(line_prefix); } sbuf_printf(sb, "%02x ", smp_request[i]); cur_len = cur_len - 3; } } void smp_command_sbuf(struct ccb_smpio *smpio, struct sbuf *sb, char *line_prefix, int first_line_len, int line_len) { sbuf_printf(sb, "%s. ", smp_command_desc(smpio->smp_request[1])); /* * Acccount for the command description and the period and space * after the command description. */ first_line_len -= strlen(smp_command_desc(smpio->smp_request[1])) + 2; smp_command_decode(smpio->smp_request, smpio->smp_request_len, sb, line_prefix, first_line_len, line_len); } /* * Print SMP error output. For userland commands, we need the cam_device * structure so we can get the path information from the CCB. */ #ifdef _KERNEL void smp_error_sbuf(struct ccb_smpio *smpio, struct sbuf *sb) #else /* !_KERNEL*/ void smp_error_sbuf(struct cam_device *device, struct ccb_smpio *smpio, struct sbuf *sb) #endif /* _KERNEL/!_KERNEL */ { char path_str[64]; #ifdef _KERNEL xpt_path_string(smpio->ccb_h.path, path_str, sizeof(path_str)); #else cam_path_string(device, path_str, sizeof(path_str)); #endif smp_command_sbuf(smpio, sb, path_str, 80 - strlen(path_str), 80); sbuf_printf(sb, "\n"); sbuf_cat(sb, path_str); sbuf_printf(sb, "SMP Error: %s (0x%x)\n", smp_error_desc(smpio->smp_response[2]), smpio->smp_response[2]); } /* * Decode the SMP REPORT GENERAL response. The format is current as of SPL * Revision 7, but the parsing should be backward compatible for older * versions of the spec. */ void smp_report_general_sbuf(struct smp_report_general_response *response, int response_len, struct sbuf *sb) { sbuf_printf(sb, "Report General\n"); sbuf_printf(sb, "Response Length: %d words (%d bytes)\n", response->response_len, response->response_len * SMP_WORD_LEN); sbuf_printf(sb, "Expander Change Count: %d\n", scsi_2btoul(response->expander_change_count)); sbuf_printf(sb, "Expander Route Indexes: %d\n", scsi_2btoul(response->expander_route_indexes)); sbuf_printf(sb, "Long Response: %s\n", smp_yesno(response->long_response & SMP_RG_LONG_RESPONSE)); sbuf_printf(sb, "Number of Phys: %d\n", response->num_phys); sbuf_printf(sb, "Table to Table Supported: %s\n", smp_yesno(response->config_bits0 & SMP_RG_TABLE_TO_TABLE_SUP)); sbuf_printf(sb, "Zone Configuring: %s\n", smp_yesno(response->config_bits0 & SMP_RG_ZONE_CONFIGURING)); sbuf_printf(sb, "Self Configuring: %s\n", smp_yesno(response->config_bits0 & SMP_RG_SELF_CONFIGURING)); sbuf_printf(sb, "STP Continue AWT: %s\n", smp_yesno(response->config_bits0 & SMP_RG_STP_CONTINUE_AWT)); sbuf_printf(sb, "Open Reject Retry Supported: %s\n", smp_yesno(response->config_bits0 & SMP_RG_OPEN_REJECT_RETRY_SUP)); sbuf_printf(sb, "Configures Others: %s\n", smp_yesno(response->config_bits0 & SMP_RG_CONFIGURES_OTHERS)); sbuf_printf(sb, "Configuring: %s\n", smp_yesno(response->config_bits0 & SMP_RG_CONFIGURING)); sbuf_printf(sb, "Externally Configurable Route Table: %s\n", smp_yesno(response->config_bits0 & SMP_RG_CONFIGURING)); sbuf_printf(sb, "Enclosure Logical Identifier: 0x%016jx\n", (uintmax_t)scsi_8btou64(response->encl_logical_id)); /* * If the response->response_len is 0, then we don't have the * extended information. Also, if the user didn't allocate enough * space for the full request, don't try to parse it. */ if ((response->response_len == 0) || (response_len < (sizeof(struct smp_report_general_response) - sizeof(response->crc)))) return; sbuf_printf(sb, "STP Bus Inactivity Time Limit: %d\n", scsi_2btoul(response->stp_bus_inact_time_limit)); sbuf_printf(sb, "STP Maximum Connect Time Limit: %d\n", scsi_2btoul(response->stp_max_conn_time_limit)); sbuf_printf(sb, "STP SMP I_T Nexus Loss Time: %d\n", scsi_2btoul(response->stp_smp_it_nexus_loss_time)); sbuf_printf(sb, "Number of Zone Groups: %d\n", (response->config_bits1 & SMP_RG_NUM_ZONE_GROUPS_MASK) >> SMP_RG_NUM_ZONE_GROUPS_SHIFT); sbuf_printf(sb, "Zone Locked: %s\n", smp_yesno(response->config_bits1 & SMP_RG_ZONE_LOCKED)); sbuf_printf(sb, "Physical Presence Supported: %s\n", smp_yesno(response->config_bits1 & SMP_RG_PP_SUPPORTED)); sbuf_printf(sb, "Physical Presence Asserted: %s\n", smp_yesno(response->config_bits1 & SMP_RG_PP_ASSERTED)); sbuf_printf(sb, "Zoning Supported: %s\n", smp_yesno(response->config_bits1 & SMP_RG_ZONING_SUPPORTED)); sbuf_printf(sb, "Zoning Enabled: %s\n", smp_yesno(response->config_bits1 & SMP_RG_ZONING_ENABLED)); sbuf_printf(sb, "Saving: %s\n", smp_yesno(response->config_bits2 & SMP_RG_SAVING)); sbuf_printf(sb, "Saving Zone Manager Password Supported: %s\n", smp_yesno(response->config_bits2 & SMP_RG_SAVING_ZM_PWD_SUP)); sbuf_printf(sb, "Saving Zone Phy Information Supported: %s\n", smp_yesno(response->config_bits2 & SMP_RG_SAVING_PHY_INFO_SUP)); sbuf_printf(sb, "Saving Zone Permission Table Supported: %s\n", smp_yesno(response->config_bits2 & SMP_RG_SAVING_ZPERM_TAB_SUP)); sbuf_printf(sb, "Saving Zoning Enabled Supported: %s\n", smp_yesno(response->config_bits2 & SMP_RG_SAVING_ZENABLED_SUP)); sbuf_printf(sb, "Maximum Number of Routed SAS Addresses: %d\n", scsi_2btoul(response->max_num_routed_addrs)); sbuf_printf(sb, "Active Zone Manager SAS Address: 0x%016jx\n", scsi_8btou64(response->active_zm_address)); sbuf_printf(sb, "Zone Inactivity Time Limit: %d\n", scsi_2btoul(response->zone_lock_inact_time_limit)); sbuf_printf(sb, "First Enclosure Connector Element Index: %d\n", response->first_encl_conn_el_index); sbuf_printf(sb, "Number of Enclosure Connector Element Indexes: %d\n", response->num_encl_conn_el_indexes); sbuf_printf(sb, "Reduced Functionality: %s\n", smp_yesno(response->reduced_functionality & SMP_RG_REDUCED_FUNCTIONALITY)); sbuf_printf(sb, "Time to Reduced Functionality: %d\n", response->time_to_reduced_func); sbuf_printf(sb, "Initial Time to Reduced Functionality: %d\n", response->initial_time_to_reduced_func); sbuf_printf(sb, "Maximum Reduced Functionality Time: %d\n", response->max_reduced_func_time); sbuf_printf(sb, "Last Self-Configuration Status Descriptor Index: %d\n", scsi_2btoul(response->last_sc_stat_desc_index)); sbuf_printf(sb, "Maximum Number of Storated Self-Configuration " "Status Descriptors: %d\n", scsi_2btoul(response->max_sc_stat_descs)); sbuf_printf(sb, "Last Phy Event List Descriptor Index: %d\n", scsi_2btoul(response->last_phy_evl_desc_index)); sbuf_printf(sb, "Maximum Number of Stored Phy Event List " "Descriptors: %d\n", scsi_2btoul(response->max_stored_pel_descs)); sbuf_printf(sb, "STP Reject to Open Limit: %d\n", scsi_2btoul(response->stp_reject_to_open_limit)); } /* * Decode the SMP REPORT MANUFACTURER INFORMATION response. The format is * current as of SPL Revision 7, but the parsing should be backward * compatible for older versions of the spec. */ void smp_report_manuf_info_sbuf(struct smp_report_manuf_info_response *response, int response_len, struct sbuf *sb) { char vendor[16], product[48], revision[16]; char comp_vendor[16]; sbuf_printf(sb, "Report Manufacturer Information\n"); sbuf_printf(sb, "Expander Change count: %d\n", scsi_2btoul(response->expander_change_count)); sbuf_printf(sb, "SAS 1.1 Format: %s\n", smp_yesno(response->sas_11_format & SMP_RMI_SAS11_FORMAT)); cam_strvis(vendor, response->vendor, sizeof(response->vendor), sizeof(vendor)); cam_strvis(product, response->product, sizeof(response->product), sizeof(product)); cam_strvis(revision, response->revision, sizeof(response->revision), sizeof(revision)); sbuf_printf(sb, "<%s %s %s>\n", vendor, product, revision); if ((response->sas_11_format & SMP_RMI_SAS11_FORMAT) == 0) { uint8_t *curbyte; int line_start, line_cursor; sbuf_printf(sb, "Vendor Specific Data:\n"); /* * Print out the bytes roughly in the style of hd(1), but * without the extra ASCII decoding. Hexadecimal line * numbers on the left, and 16 bytes per line, with an * extra space after the first 8 bytes. * * It would be nice if this sort of thing were available * in a library routine. */ for (curbyte = (uint8_t *)&response->comp_vendor, line_start= 1, line_cursor = 0; curbyte < (uint8_t *)&response->crc; curbyte++, line_cursor++) { if (line_start != 0) { sbuf_printf(sb, "%08lx ", (unsigned long)(curbyte - (uint8_t *)response)); line_start = 0; line_cursor = 0; } sbuf_printf(sb, "%02x", *curbyte); if (line_cursor == 15) { sbuf_printf(sb, "\n"); line_start = 1; } else sbuf_printf(sb, " %s", (line_cursor == 7) ? " " : ""); } if (line_cursor != 16) sbuf_printf(sb, "\n"); return; } cam_strvis(comp_vendor, response->comp_vendor, sizeof(response->comp_vendor), sizeof(comp_vendor)); sbuf_printf(sb, "Component Vendor: %s\n", comp_vendor); sbuf_printf(sb, "Component ID: %#x\n", scsi_2btoul(response->comp_id)); sbuf_printf(sb, "Component Revision: %#x\n", response->comp_revision); sbuf_printf(sb, "Vendor Specific: 0x%016jx\n", (uintmax_t)scsi_8btou64(response->vendor_specific)); } /* * Compose a SMP REPORT GENERAL request and put it into a CCB. This is * current as of SPL Revision 7. */ void smp_report_general(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), struct smp_report_general_request *request, int request_len, uint8_t *response, int response_len, int long_response, uint32_t timeout) { cam_fill_smpio(smpio, retries, cbfcnp, /*flags*/CAM_DIR_BOTH, (uint8_t *)request, request_len - SMP_CRC_LEN, response, response_len, timeout); bzero(request, sizeof(*request)); request->frame_type = SMP_FRAME_TYPE_REQUEST; request->function = SMP_FUNC_REPORT_GENERAL; request->response_len = long_response ? SMP_RG_RESPONSE_LEN : 0; request->request_len = 0; } /* * Compose a SMP DISCOVER request and put it into a CCB. This is current * as of SPL Revision 7. */ void smp_discover(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), struct smp_discover_request *request, int request_len, uint8_t *response, int response_len, int long_response, int ignore_zone_group, int phy, uint32_t timeout) { cam_fill_smpio(smpio, retries, cbfcnp, /*flags*/CAM_DIR_BOTH, (uint8_t *)request, request_len - SMP_CRC_LEN, response, response_len, timeout); bzero(request, sizeof(*request)); request->frame_type = SMP_FRAME_TYPE_REQUEST; request->function = SMP_FUNC_DISCOVER; request->response_len = long_response ? SMP_DIS_RESPONSE_LEN : 0; request->request_len = long_response ? SMP_DIS_REQUEST_LEN : 0; if (ignore_zone_group != 0) request->ignore_zone_group |= SMP_DIS_IGNORE_ZONE_GROUP; request->phy = phy; } /* * Compose a SMP REPORT MANUFACTURER INFORMATION request and put it into a * CCB. This is current as of SPL Revision 7. */ void smp_report_manuf_info(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), struct smp_report_manuf_info_request *request, int request_len, uint8_t *response, int response_len, int long_response, uint32_t timeout) { cam_fill_smpio(smpio, retries, cbfcnp, /*flags*/CAM_DIR_BOTH, (uint8_t *)request, request_len - SMP_CRC_LEN, response, response_len, timeout); bzero(request, sizeof(*request)); request->frame_type = SMP_FRAME_TYPE_REQUEST; request->function = SMP_FUNC_REPORT_MANUF_INFO; request->response_len = long_response ? SMP_RMI_RESPONSE_LEN : 0; request->request_len = long_response ? SMP_RMI_REQUEST_LEN : 0; } /* * Compose a SMP PHY CONTROL request and put it into a CCB. This is * current as of SPL Revision 7. */ void smp_phy_control(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), struct smp_phy_control_request *request, int request_len, uint8_t *response, int response_len, int long_response, uint32_t expected_exp_change_count, int phy, int phy_op, int update_pp_timeout_val, uint64_t attached_device_name, int prog_min_prl, int prog_max_prl, int slumber_partial, int pp_timeout_value, uint32_t timeout) { cam_fill_smpio(smpio, retries, cbfcnp, /*flags*/CAM_DIR_BOTH, (uint8_t *)request, request_len - SMP_CRC_LEN, response, response_len, timeout); bzero(request, sizeof(*request)); request->frame_type = SMP_FRAME_TYPE_REQUEST; request->function = SMP_FUNC_PHY_CONTROL; request->response_len = long_response ? SMP_PC_RESPONSE_LEN : 0; request->request_len = long_response ? SMP_PC_REQUEST_LEN : 0; scsi_ulto2b(expected_exp_change_count, request->expected_exp_chg_cnt); request->phy = phy; request->phy_operation = phy_op; if (update_pp_timeout_val != 0) request->update_pp_timeout |= SMP_PC_UPDATE_PP_TIMEOUT; scsi_u64to8b(attached_device_name, request->attached_device_name); request->prog_min_phys_link_rate = (prog_min_prl << SMP_PC_PROG_MIN_PL_RATE_SHIFT) & SMP_PC_PROG_MIN_PL_RATE_MASK; request->prog_max_phys_link_rate = (prog_max_prl << SMP_PC_PROG_MAX_PL_RATE_SHIFT) & SMP_PC_PROG_MAX_PL_RATE_MASK; request->config_bits0 = slumber_partial; request->pp_timeout_value = pp_timeout_value; } Index: head/sys/cam/scsi/smp_all.h =================================================================== --- head/sys/cam/scsi/smp_all.h (revision 326264) +++ head/sys/cam/scsi/smp_all.h (revision 326265) @@ -1,520 +1,522 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2010 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test/sys/cam/scsi/smp_all.h#4 $ * $FreeBSD$ */ /* * Serial Management Protocol definitions. */ #ifndef _SCSI_SMP_ALL_H #define _SCSI_SMP_ALL_H 1 #define SMP_FRAME_TYPE_REQUEST 0x40 #define SMP_FRAME_TYPE_RESPONSE 0x41 #define SMP_WORD_LEN 4 #define SMP_CRC_LEN 4 /* * SMP Functions (current as of SPL Revision 7) */ /* 0x00 to 0x7f: SMP input functions */ /* 0x00 to 0x0f: General SMP input functions */ #define SMP_FUNC_REPORT_GENERAL 0x00 #define SMP_FUNC_REPORT_MANUF_INFO 0x01 #define SMP_FUNC_REPORT_SC_STATUS 0x03 #define SMP_FUNC_REPORT_ZONE_PERM_TBL 0x04 #define SMP_FUNC_REPORT_ZONE_MAN_PWD 0x05 #define SMP_FUNC_REPORT_BROADCAST 0x06 /* 0x10 to 0x1f: Phy-based SMP input functions */ #define SMP_FUNC_DISCOVER 0x10 #define SMP_FUNC_REPORT_PHY_ERR_LOG 0x11 #define SMP_FUNC_REPORT_PHY_SATA 0x12 #define SMP_FUNC_REPORT_ROUTE_INFO 0x13 #define SMP_FUNC_REPORT_PHY_EVENT 0x14 /* 0x20 to 0x2f: Descriptor list-based SMP input functions */ #define SMP_FUNC_DISCOVER_LIST 0x20 #define SMP_FUNC_REPORT_PHY_EVENT_LIST 0x21 #define SMP_FUNC_REPORT_EXP_RTL 0x22 /* 0x30 to 0x3f: Reserved for SMP input functions */ /* 0x40 to 0x7f: Vendor specific */ /* 0x80 to 0xff: SMP output functions */ /* 0x80 to 0x8f: General SMP output functions */ #define SMP_FUNC_CONFIG_GENERAL 0x80 #define SMP_FUNC_ENABLE_DISABLE_ZONING 0x81 #define SMP_FUNC_ZONED_BROADCAST 0x85 #define SMP_FUNC_ZONE_LOCK 0x86 #define SMP_FUNC_ZONE_ACTIVATE 0x87 #define SMP_FUNC_ZONE_UNLOCK 0x88 #define SMP_FUNC_CONFIG_ZM_PWD 0x89 #define SMP_FUNC_CONFIG_ZONE_PHY_INFO 0x8a #define SMP_FUNC_CONFIG_ZONE_PERM_TBL 0x8b /* 0x90 to 0x9f: Phy-based SMP output functions */ #define SMP_FUNC_CONFIG_ROUTE_INFO 0x90 #define SMP_FUNC_PHY_CONTROL 0x91 #define SMP_FUNC_PHY_TEST_FUNC 0x92 #define SMP_FUNC_CONFIG_PHY_EVENT 0x93 /* 0xa0 to 0xbf: Reserved for SMP output functions */ /* 0xc0 to 0xff: Vendor specific */ /* * Function Results (current as of SPL Revision 7) */ #define SMP_FR_ACCEPTED 0x00 #define SMP_FR_UNKNOWN_FUNC 0x01 #define SMP_FR_FUNCTION_FAILED 0x02 #define SMP_FR_INVALID_REQ_FRAME_LEN 0x03 #define SMP_FR_INVALID_EXP_CHG_CNT 0x04 #define SMP_FR_BUSY 0x05 #define SMP_FR_INCOMPLETE_DESC_LIST 0x06 #define SMP_FR_PHY_DOES_NOT_EXIST 0x10 #define SMP_FR_INDEX_DOES_NOT_EXIST 0x11 #define SMP_FR_PHY_DOES_NOT_SUP_SATA 0x12 #define SMP_FR_UNKNOWN_PHY_OP 0x13 #define SMP_FR_UNKNOWN_PHY_TEST_FUNC 0x14 #define SMP_FR_PHY_TEST_FUNC_INPROG 0x15 #define SMP_FR_PHY_VACANT 0x16 #define SMP_FR_UNKNOWN_PHY_EVENT_SRC 0x17 #define SMP_FR_UNKNOWN_DESC_TYPE 0x18 #define SMP_FR_UNKNOWN_PHY_FILTER 0x19 #define SMP_FR_AFFILIATION_VIOLATION 0x1a #define SMP_FR_SMP_ZONE_VIOLATION 0x20 #define SMP_FR_NO_MGMT_ACCESS_RIGHTS 0x21 #define SMP_FR_UNKNOWN_ED_ZONING_VAL 0x22 #define SMP_FR_ZONE_LOCK_VIOLATION 0x23 #define SMP_FR_NOT_ACTIVATED 0x24 #define SMP_FR_ZG_OUT_OF_RANGE 0x25 #define SMP_FR_NO_PHYS_PRESENCE 0x26 #define SMP_FR_SAVING_NOT_SUP 0x27 #define SMP_FR_SRC_ZONE_DNE 0x28 #define SMP_FR_DISABLED_PWD_NOT_SUP 0x29 /* * REPORT GENERAL request and response, current as of SPL Revision 7. */ struct smp_report_general_request { uint8_t frame_type; uint8_t function; uint8_t response_len; uint8_t request_len; uint8_t crc[4]; }; struct smp_report_general_response { uint8_t frame_type; uint8_t function; uint8_t function_result; uint8_t response_len; #define SMP_RG_RESPONSE_LEN 0x11 uint8_t expander_change_count[2]; uint8_t expander_route_indexes[2]; uint8_t long_response; #define SMP_RG_LONG_RESPONSE 0x80 uint8_t num_phys; uint8_t config_bits0; #define SMP_RG_TABLE_TO_TABLE_SUP 0x80 #define SMP_RG_ZONE_CONFIGURING 0x40 #define SMP_RG_SELF_CONFIGURING 0x20 #define SMP_RG_STP_CONTINUE_AWT 0x10 #define SMP_RG_OPEN_REJECT_RETRY_SUP 0x08 #define SMP_RG_CONFIGURES_OTHERS 0x04 #define SMP_RG_CONFIGURING 0x02 #define SMP_RG_EXT_CONFIG_ROUTE_TABLE 0x01 uint8_t reserved0; uint8_t encl_logical_id[8]; uint8_t reserved1[8]; uint8_t reserved2[2]; uint8_t stp_bus_inact_time_limit[2]; uint8_t stp_max_conn_time_limit[2]; uint8_t stp_smp_it_nexus_loss_time[2]; uint8_t config_bits1; #define SMP_RG_NUM_ZONE_GROUPS_MASK 0xc0 #define SMP_RG_NUM_ZONE_GROUPS_SHIFT 6 #define SMP_RG_ZONE_LOCKED 0x10 #define SMP_RG_PP_SUPPORTED 0x08 #define SMP_RG_PP_ASSERTED 0x04 #define SMP_RG_ZONING_SUPPORTED 0x02 #define SMP_RG_ZONING_ENABLED 0x01 uint8_t config_bits2; #define SMP_RG_SAVING 0x10 #define SMP_RG_SAVING_ZM_PWD_SUP 0x08 #define SMP_RG_SAVING_PHY_INFO_SUP 0x04 #define SMP_RG_SAVING_ZPERM_TAB_SUP 0x02 #define SMP_RG_SAVING_ZENABLED_SUP 0x01 uint8_t max_num_routed_addrs[2]; uint8_t active_zm_address[8]; uint8_t zone_lock_inact_time_limit[2]; uint8_t reserved3[2]; uint8_t reserved4; uint8_t first_encl_conn_el_index; uint8_t num_encl_conn_el_indexes; uint8_t reserved5; uint8_t reduced_functionality; #define SMP_RG_REDUCED_FUNCTIONALITY 0x80 uint8_t time_to_reduced_func; uint8_t initial_time_to_reduced_func; uint8_t max_reduced_func_time; uint8_t last_sc_stat_desc_index[2]; uint8_t max_sc_stat_descs[2]; uint8_t last_phy_evl_desc_index[2]; uint8_t max_stored_pel_descs[2]; uint8_t stp_reject_to_open_limit[2]; uint8_t reserved6[2]; uint8_t crc[4]; }; /* * REPORT MANUFACTURER INFORMATION request and response, current as of SPL * Revision 7. */ struct smp_report_manuf_info_request { uint8_t frame_type; uint8_t function; uint8_t response_len; uint8_t request_len; #define SMP_RMI_REQUEST_LEN 0x00 uint8_t crc[4]; }; struct smp_report_manuf_info_response { uint8_t frame_type; uint8_t function; uint8_t function_result; uint8_t response_len; #define SMP_RMI_RESPONSE_LEN 0x0e uint8_t expander_change_count[2]; uint8_t reserved0[2]; uint8_t sas_11_format; #define SMP_RMI_SAS11_FORMAT 0x01 uint8_t reserved1[3]; uint8_t vendor[8]; uint8_t product[16]; uint8_t revision[4]; uint8_t comp_vendor[8]; uint8_t comp_id[2]; uint8_t comp_revision; uint8_t reserved2; uint8_t vendor_specific[8]; uint8_t crc[4]; }; /* * DISCOVER request and response, current as of SPL Revision 7. */ struct smp_discover_request { uint8_t frame_type; uint8_t function; uint8_t response_len; uint8_t request_len; #define SMP_DIS_REQUEST_LEN 0x02 uint8_t reserved0[4]; uint8_t ignore_zone_group; #define SMP_DIS_IGNORE_ZONE_GROUP 0x01 uint8_t phy; uint8_t reserved1[2]; uint8_t crc[4]; }; struct smp_discover_response { uint8_t frame_type; uint8_t function; uint8_t function_result; uint8_t response_len; #define SMP_DIS_RESPONSE_LEN 0x20 uint8_t expander_change_count[2]; uint8_t reserved0[3]; uint8_t phy; uint8_t reserved1[2]; uint8_t attached_device; #define SMP_DIS_AD_TYPE_MASK 0x70 #define SMP_DIS_AD_TYPE_NONE 0x00 #define SMP_DIS_AD_TYPE_SAS_SATA 0x10 #define SMP_DIS_AD_TYPE_EXP 0x20 #define SMP_DIS_AD_TYPE_EXP_OLD 0x30 #define SMP_DIS_ATTACH_REASON_MASK 0x0f uint8_t neg_logical_link_rate; #define SMP_DIS_LR_MASK 0x0f #define SMP_DIS_LR_DISABLED 0x01 #define SMP_DIS_LR_PHY_RES_PROB 0x02 #define SMP_DIS_LR_SPINUP_HOLD 0x03 #define SMP_DIS_LR_PORT_SEL 0x04 #define SMP_DIS_LR_RESET_IN_PROG 0x05 #define SMP_DIS_LR_UNSUP_PHY_ATTACHED 0x06 #define SMP_DIS_LR_G1_15GBPS 0x08 #define SMP_DIS_LR_G2_30GBPS 0x09 #define SMP_DIS_LR_G3_60GBPS 0x0a uint8_t config_bits0; #define SMP_DIS_ATTACHED_SSP_INIT 0x08 #define SMP_DIS_ATTACHED_STP_INIT 0x04 #define SMP_DIS_ATTACHED_SMP_INIT 0x02 #define SMP_DIS_ATTACHED_SATA_HOST 0x01 uint8_t config_bits1; #define SMP_DIS_ATTACHED_SATA_PORTSEL 0x80 #define SMP_DIS_STP_BUFFER_TOO_SMALL 0x10 #define SMP_DIS_ATTACHED_SSP_TARG 0x08 #define SMP_DIS_ATTACHED_STP_TARG 0x04 #define SMP_DIS_ATTACHED_SMP_TARG 0x02 #define SMP_DIS_ATTACHED_SATA_DEV 0x01 uint8_t sas_address[8]; uint8_t attached_sas_address[8]; uint8_t attached_phy_id; uint8_t config_bits2; #define SMP_DIS_ATT_SLUMB_CAP 0x10 #define SMP_DIS_ATT_PAR_CAP 0x08 #define SMP_DIS_ATT_IN_ZPSDS_PER 0x04 #define SMP_DIS_ATT_REQ_IN_ZPSDS 0x02 #define SMP_DIS_ATT_BREAK_RPL_CAP 0x01 uint8_t reserved2[6]; uint8_t link_rate0; #define SMP_DIS_PROG_MIN_LR_MASK 0xf0 #define SMP_DIS_PROG_MIN_LR_SHIFT 4 #define SMP_DIS_HARD_MIN_LR_MASK 0x0f uint8_t link_rate1; #define SMP_DIS_PROG_MAX_LR_MAX 0xf0 #define SMP_DIS_PROG_MAX_LR_SHIFT 4 #define SMP_DIS_HARD_MAX_LR_MASK 0x0f uint8_t phy_change_count; uint8_t pp_timeout; #define SMP_DIS_VIRTUAL_PHY 0x80 #define SMP_DIS_PP_TIMEOUT_MASK 0x0f uint8_t routing_attr; uint8_t conn_type; uint8_t conn_el_index; uint8_t conn_phys_link; uint8_t config_bits3; #define SMP_DIS_PHY_POW_COND_MASK 0xc0 #define SMP_DIS_PHY_POW_COND_SHIFT 6 #define SMP_DIS_SAS_SLUMB_CAP 0x08 #define SMP_DIS_SAS_PART_CAP 0x04 #define SMP_DIS_SATA_SLUMB_CAP 0x02 #define SMP_DIS_SATA_PART_CAP 0x01 uint8_t config_bits4; #define SMP_DIS_SAS_SLUMB_ENB 0x08 #define SMP_DIS_SAS_PART_ENB 0x04 #define SMP_DIS_SATA_SLUMB_ENB 0x02 #define SMP_DIS_SATA_PART_ENB 0x01 uint8_t vendor_spec[2]; uint8_t attached_dev_name[8]; uint8_t config_bits5; #define SMP_DIS_REQ_IN_ZPSDS_CHG 0x40 #define SMP_DIS_IN_ZPSDS_PER 0x20 #define SMP_DIS_REQ_IN_ZPSDS 0x10 #define SMP_DIS_ZG_PER 0x04 #define SMP_DIS_IN_ZPSDS 0x02 #define SMP_DIS_ZONING_ENB 0x01 uint8_t reserved3[2]; uint8_t zone_group; uint8_t self_config_status; uint8_t self_config_levels_comp; uint8_t reserved4[2]; uint8_t self_config_sas_addr[8]; uint8_t prog_phy_cap[4]; uint8_t current_phy_cap[4]; uint8_t attached_phy_cap[4]; uint8_t reserved5[6]; uint8_t neg_phys_link_rate; #define SMP_DIS_REASON_MASK 0xf0 #define SMP_DIS_REASON_SHIFT 4 #define SMP_DIS_PHYS_LR_MASK 0x0f uint8_t config_bits6; #define SMP_DIS_OPTICAL_MODE_ENB 0x04 #define SMP_DIS_NEG_SSC 0x02 #define SMP_DIS_HW_MUX_SUP 0x01 uint8_t config_bits7; #define SMP_DIS_DEF_IN_ZPSDS_PER 0x20 #define SMP_DIS_DEF_REQ_IN_ZPSDS 0x10 #define SMP_DIS_DEF_ZG_PER 0x04 #define SMP_DIS_DEF_ZONING_ENB 0x01 uint8_t reserved6; uint8_t reserved7; uint8_t default_zone_group; uint8_t config_bits8; #define SMP_DIS_SAVED_IN_ZPSDS_PER 0x20 #define SMP_DIS_SAVED_REQ_IN_SPSDS 0x10 #define SMP_DIS_SAVED_ZG_PER 0x04 #define SMP_DIS_SAVED_ZONING_ENB 0x01 uint8_t reserved8; uint8_t reserved9; uint8_t saved_zone_group; uint8_t config_bits9; #define SMP_DIS_SHADOW_IN_ZPSDS_PER 0x20 #define SMP_DIS_SHADOW_IN_REQ_IN_ZPSDS 0x10 #define SMP_DIS_SHADOW_ZG_PER 0x04 uint8_t reserved10; uint8_t reserved11; uint8_t shadow_zone_group; uint8_t device_slot_num; uint8_t device_slot_group_num; uint8_t device_slot_group_out_conn[6]; uint8_t stp_buffer_size[2]; uint8_t reserved12; uint8_t reserved13; uint8_t crc[4]; }; /* * PHY CONTROL request and response. Current as of SPL Revision 7. */ struct smp_phy_control_request { uint8_t frame_type; uint8_t function; uint8_t response_len; #define SMP_PC_RESPONSE_LEN 0x00 uint8_t request_len; #define SMP_PC_REQUEST_LEN 0x09 uint8_t expected_exp_chg_cnt[2]; uint8_t reserved0[3]; uint8_t phy; uint8_t phy_operation; #define SMP_PC_PHY_OP_NOP 0x00 #define SMP_PC_PHY_OP_LINK_RESET 0x01 #define SMP_PC_PHY_OP_HARD_RESET 0x02 #define SMP_PC_PHY_OP_DISABLE 0x03 #define SMP_PC_PHY_OP_CLEAR_ERR_LOG 0x05 #define SMP_PC_PHY_OP_CLEAR_AFFILIATON 0x06 #define SMP_PC_PHY_OP_TRANS_SATA_PSS 0x07 #define SMP_PC_PHY_OP_CLEAR_STP_ITN_LS 0x08 #define SMP_PC_PHY_OP_SET_ATT_DEV_NAME 0x09 uint8_t update_pp_timeout; #define SMP_PC_UPDATE_PP_TIMEOUT 0x01 uint8_t reserved1[12]; uint8_t attached_device_name[8]; uint8_t prog_min_phys_link_rate; #define SMP_PC_PROG_MIN_PL_RATE_MASK 0xf0 #define SMP_PC_PROG_MIN_PL_RATE_SHIFT 4 uint8_t prog_max_phys_link_rate; #define SMP_PC_PROG_MAX_PL_RATE_MASK 0xf0 #define SMP_PC_PROG_MAX_PL_RATE_SHIFT 4 uint8_t config_bits0; #define SMP_PC_SP_NC 0x00 #define SMP_PC_SP_DISABLE 0x02 #define SMP_PC_SP_ENABLE 0x01 #define SMP_PC_SAS_SLUMBER_NC 0x00 #define SMP_PC_SAS_SLUMBER_DISABLE 0x80 #define SMP_PC_SAS_SLUMBER_ENABLE 0x40 #define SMP_PC_SAS_SLUMBER_MASK 0xc0 #define SMP_PC_SAS_SLUMBER_SHIFT 6 #define SMP_PC_SAS_PARTIAL_NC 0x00 #define SMP_PC_SAS_PARTIAL_DISABLE 0x20 #define SMP_PC_SAS_PARTIAL_ENABLE 0x10 #define SMP_PC_SAS_PARTIAL_MASK 0x30 #define SMP_PC_SAS_PARTIAL_SHIFT 4 #define SMP_PC_SATA_SLUMBER_NC 0x00 #define SMP_PC_SATA_SLUMBER_DISABLE 0x08 #define SMP_PC_SATA_SLUMBER_ENABLE 0x04 #define SMP_PC_SATA_SLUMBER_MASK 0x0c #define SMP_PC_SATA_SLUMBER_SHIFT 2 #define SMP_PC_SATA_PARTIAL_NC 0x00 #define SMP_PC_SATA_PARTIAL_DISABLE 0x02 #define SMP_PC_SATA_PARTIAL_ENABLE 0x01 #define SMP_PC_SATA_PARTIAL_MASK 0x03 #define SMP_PC_SATA_PARTIAL_SHIFT 0 uint8_t reserved2; uint8_t pp_timeout_value; #define SMP_PC_PP_TIMEOUT_MASK 0x0f uint8_t reserved3[3]; uint8_t crc[4]; }; struct smp_phy_control_response { uint8_t frame_type; uint8_t function; uint8_t function_result; uint8_t response_len; #define SMP_PC_RESPONSE_LEN 0x00 uint8_t crc[4]; }; __BEGIN_DECLS const char *smp_error_desc(int function_result); const char *smp_command_desc(uint8_t cmd_num); void smp_command_decode(uint8_t *smp_request, int request_len, struct sbuf *sb, char *line_prefix, int first_line_len, int line_len); void smp_command_sbuf(struct ccb_smpio *smpio, struct sbuf *sb, char *line_prefix, int first_line_len, int line_len); #ifdef _KERNEL void smp_error_sbuf(struct ccb_smpio *smpio, struct sbuf *sb); #else /* !_KERNEL*/ void smp_error_sbuf(struct cam_device *device, struct ccb_smpio *smpio, struct sbuf *sb); #endif /* _KERNEL/!_KERNEL */ void smp_report_general_sbuf(struct smp_report_general_response *response, int response_len, struct sbuf *sb); void smp_report_manuf_info_sbuf(struct smp_report_manuf_info_response *response, int response_len, struct sbuf *sb); void smp_report_general(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), struct smp_report_general_request *request, int request_len, uint8_t *response, int response_len, int long_response, uint32_t timeout); void smp_discover(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), struct smp_discover_request *request, int request_len, uint8_t *response, int response_len, int long_response, int ignore_zone_group, int phy, uint32_t timeout); void smp_report_manuf_info(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), struct smp_report_manuf_info_request *request, int request_len, uint8_t *response, int response_len, int long_response, uint32_t timeout); void smp_phy_control(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), struct smp_phy_control_request *request, int request_len, uint8_t *response, int response_len, int long_response, uint32_t expected_exp_change_count, int phy, int phy_op, int update_pp_timeout_val, uint64_t attached_device_name, int prog_min_prl, int prog_max_prl, int slumber_partial, int pp_timeout_value, uint32_t timeout); __END_DECLS #endif /*_SCSI_SMP_ALL_H*/