diff --git a/sbin/camcontrol/camcontrol.c b/sbin/camcontrol/camcontrol.c index a2e65055fcaa..19684c044ef5 100644 --- a/sbin/camcontrol/camcontrol.c +++ b/sbin/camcontrol/camcontrol.c @@ -1,10779 +1,10792 @@ /* * Copyright (c) 1997-2007 Kenneth D. Merry * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "camcontrol.h" #include "nvmecontrol_ext.h" typedef enum { CAM_CMD_NONE, CAM_CMD_DEVLIST, CAM_CMD_TUR, CAM_CMD_INQUIRY, CAM_CMD_STARTSTOP, CAM_CMD_RESCAN, CAM_CMD_READ_DEFECTS, CAM_CMD_MODE_PAGE, CAM_CMD_SCSI_CMD, CAM_CMD_DEVTREE, CAM_CMD_USAGE, CAM_CMD_DEBUG, CAM_CMD_RESET, CAM_CMD_FORMAT, CAM_CMD_TAG, CAM_CMD_RATE, CAM_CMD_DETACH, CAM_CMD_REPORTLUNS, CAM_CMD_READCAP, CAM_CMD_IDENTIFY, CAM_CMD_IDLE, CAM_CMD_STANDBY, CAM_CMD_SLEEP, CAM_CMD_SMP_CMD, CAM_CMD_SMP_RG, CAM_CMD_SMP_PC, CAM_CMD_SMP_PHYLIST, CAM_CMD_SMP_MANINFO, CAM_CMD_DOWNLOAD_FW, CAM_CMD_SECURITY, CAM_CMD_HPA, CAM_CMD_SANITIZE, CAM_CMD_PERSIST, CAM_CMD_APM, CAM_CMD_AAM, CAM_CMD_ATTRIB, CAM_CMD_OPCODES, CAM_CMD_REPROBE, CAM_CMD_ZONE, CAM_CMD_EPC, CAM_CMD_TIMESTAMP, CAM_CMD_MMCSD_CMD, CAM_CMD_POWER_MODE, CAM_CMD_DEVTYPE, CAM_CMD_AMA, CAM_CMD_DEPOP, CAM_CMD_REQSENSE } cam_cmd; typedef enum { CAM_ARG_NONE = 0x00000000, CAM_ARG_VERBOSE = 0x00000001, CAM_ARG_DEVICE = 0x00000002, CAM_ARG_BUS = 0x00000004, CAM_ARG_TARGET = 0x00000008, CAM_ARG_LUN = 0x00000010, CAM_ARG_EJECT = 0x00000020, CAM_ARG_UNIT = 0x00000040, /* unused 0x00000080 */ /* unused 0x00000100 */ /* unused 0x00000200 */ /* unused 0x00000400 */ /* unused 0x00000800 */ CAM_ARG_GET_SERIAL = 0x00001000, CAM_ARG_GET_STDINQ = 0x00002000, CAM_ARG_GET_XFERRATE = 0x00004000, CAM_ARG_INQ_MASK = 0x00007000, /* unused 0x00008000 */ /* unused 0x00010000 */ CAM_ARG_TIMEOUT = 0x00020000, CAM_ARG_CMD_IN = 0x00040000, CAM_ARG_CMD_OUT = 0x00080000, /* unused 0x00100000 */ CAM_ARG_ERR_RECOVER = 0x00200000, CAM_ARG_RETRIES = 0x00400000, CAM_ARG_START_UNIT = 0x00800000, CAM_ARG_DEBUG_INFO = 0x01000000, CAM_ARG_DEBUG_TRACE = 0x02000000, CAM_ARG_DEBUG_SUBTRACE = 0x04000000, CAM_ARG_DEBUG_CDB = 0x08000000, CAM_ARG_DEBUG_XPT = 0x10000000, CAM_ARG_DEBUG_PERIPH = 0x20000000, CAM_ARG_DEBUG_PROBE = 0x40000000, /* unused 0x80000000 */ } cam_argmask; struct camcontrol_opts { const char *optname; uint32_t cmdnum; cam_argmask argnum; const char *subopt; }; struct ata_set_max_pwd { uint16_t reserved1; uint8_t password[32]; uint16_t reserved2[239]; }; static struct scsi_nv task_attrs[] = { { "simple", MSG_SIMPLE_Q_TAG }, { "head", MSG_HEAD_OF_Q_TAG }, { "ordered", MSG_ORDERED_Q_TAG }, { "iwr", MSG_IGN_WIDE_RESIDUE }, { "aca", MSG_ACA_TASK } }; static const char scsicmd_opts[] = "a:c:dfi:o:r"; static const char readdefect_opts[] = "f:GPqsS:X"; static const char negotiate_opts[] = "acD:M:O:qR:T:UW:"; static const char smprg_opts[] = "l"; static const char smppc_opts[] = "a:A:d:lm:M:o:p:s:S:T:"; static const char smpphylist_opts[] = "lq"; static char pwd_opt; static struct camcontrol_opts option_table[] = { {"tur", CAM_CMD_TUR, CAM_ARG_NONE, NULL}, {"inquiry", CAM_CMD_INQUIRY, CAM_ARG_NONE, "DSR"}, {"identify", CAM_CMD_IDENTIFY, CAM_ARG_NONE, NULL}, {"start", CAM_CMD_STARTSTOP, CAM_ARG_START_UNIT, NULL}, {"stop", CAM_CMD_STARTSTOP, CAM_ARG_NONE, NULL}, {"load", CAM_CMD_STARTSTOP, CAM_ARG_START_UNIT | CAM_ARG_EJECT, NULL}, {"eject", CAM_CMD_STARTSTOP, CAM_ARG_EJECT, NULL}, {"reportluns", CAM_CMD_REPORTLUNS, CAM_ARG_NONE, "clr:"}, {"readcapacity", CAM_CMD_READCAP, CAM_ARG_NONE, "bhHlNqs"}, {"reprobe", CAM_CMD_REPROBE, CAM_ARG_NONE, NULL}, {"rescan", CAM_CMD_RESCAN, CAM_ARG_NONE, NULL}, {"reset", CAM_CMD_RESET, CAM_ARG_NONE, NULL}, {"cmd", CAM_CMD_SCSI_CMD, CAM_ARG_NONE, scsicmd_opts}, {"mmcsdcmd", CAM_CMD_MMCSD_CMD, CAM_ARG_NONE, "c:a:F:f:Wb:l:41S:I"}, {"command", CAM_CMD_SCSI_CMD, CAM_ARG_NONE, scsicmd_opts}, {"smpcmd", CAM_CMD_SMP_CMD, CAM_ARG_NONE, "r:R:"}, {"smprg", CAM_CMD_SMP_RG, CAM_ARG_NONE, smprg_opts}, {"smpreportgeneral", CAM_CMD_SMP_RG, CAM_ARG_NONE, smprg_opts}, {"smppc", CAM_CMD_SMP_PC, CAM_ARG_NONE, smppc_opts}, {"smpphycontrol", CAM_CMD_SMP_PC, CAM_ARG_NONE, smppc_opts}, {"smpplist", CAM_CMD_SMP_PHYLIST, CAM_ARG_NONE, smpphylist_opts}, {"smpphylist", CAM_CMD_SMP_PHYLIST, CAM_ARG_NONE, smpphylist_opts}, {"smpmaninfo", CAM_CMD_SMP_MANINFO, CAM_ARG_NONE, "l"}, {"defects", CAM_CMD_READ_DEFECTS, CAM_ARG_NONE, readdefect_opts}, {"defectlist", CAM_CMD_READ_DEFECTS, CAM_ARG_NONE, readdefect_opts}, {"devlist", CAM_CMD_DEVTREE, CAM_ARG_NONE, "-b"}, {"devtype", CAM_CMD_DEVTYPE, CAM_ARG_NONE, ""}, {"periphlist", CAM_CMD_DEVLIST, CAM_ARG_NONE, NULL}, {"modepage", CAM_CMD_MODE_PAGE, CAM_ARG_NONE, "6bdelm:DLP:"}, {"tags", CAM_CMD_TAG, CAM_ARG_NONE, "N:q"}, {"negotiate", CAM_CMD_RATE, CAM_ARG_NONE, negotiate_opts}, {"rate", CAM_CMD_RATE, CAM_ARG_NONE, negotiate_opts}, {"debug", CAM_CMD_DEBUG, CAM_ARG_NONE, "IPTSXcp"}, {"format", CAM_CMD_FORMAT, CAM_ARG_NONE, "qrwy"}, {"sanitize", CAM_CMD_SANITIZE, CAM_ARG_NONE, "a:c:IP:qrUwy"}, {"idle", CAM_CMD_IDLE, CAM_ARG_NONE, "t:"}, {"standby", CAM_CMD_STANDBY, CAM_ARG_NONE, "t:"}, {"sleep", CAM_CMD_SLEEP, CAM_ARG_NONE, ""}, {"powermode", CAM_CMD_POWER_MODE, CAM_ARG_NONE, ""}, {"apm", CAM_CMD_APM, CAM_ARG_NONE, "l:"}, {"aam", CAM_CMD_AAM, CAM_ARG_NONE, "l:"}, {"fwdownload", CAM_CMD_DOWNLOAD_FW, CAM_ARG_NONE, "f:qsy"}, {"security", CAM_CMD_SECURITY, CAM_ARG_NONE, "d:e:fh:k:l:qs:T:U:y"}, {"hpa", CAM_CMD_HPA, CAM_ARG_NONE, "Pflp:qs:U:y"}, {"ama", CAM_CMD_AMA, CAM_ARG_NONE, "fqs:"}, {"persist", CAM_CMD_PERSIST, CAM_ARG_NONE, "ai:I:k:K:o:ps:ST:U"}, {"attrib", CAM_CMD_ATTRIB, CAM_ARG_NONE, "a:ce:F:p:r:s:T:w:V:"}, {"opcodes", CAM_CMD_OPCODES, CAM_ARG_NONE, "No:s:T"}, {"zone", CAM_CMD_ZONE, CAM_ARG_NONE, "ac:l:No:P:"}, {"epc", CAM_CMD_EPC, CAM_ARG_NONE, "c:dDeHp:Pr:sS:T:"}, {"timestamp", CAM_CMD_TIMESTAMP, CAM_ARG_NONE, "f:mrsUT:"}, {"depop", CAM_CMD_DEPOP, CAM_ARG_NONE, "ac:de:ls"}, {"sense", CAM_CMD_REQSENSE, CAM_ARG_NONE, "Dx"}, {"help", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {"-?", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {"-h", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; struct cam_devitem { struct device_match_result dev_match; int num_periphs; struct periph_match_result *periph_matches; struct scsi_vpd_device_id *device_id; int device_id_len; STAILQ_ENTRY(cam_devitem) links; }; struct cam_devlist { STAILQ_HEAD(, cam_devitem) dev_queue; path_id_t path_id; }; static cam_argmask arglist; static const char *devtype_names[] = { "none", "scsi", "satl", "ata", "nvme", "mmcsd", "unknown", }; camcontrol_optret getoption(struct camcontrol_opts *table, char *arg, uint32_t *cmdnum, cam_argmask *argnum, const char **subopt); static int getdevlist(struct cam_device *device); static int getdevtree(int argc, char **argv, char *combinedopt); static int getdevtype(struct cam_device *device); static int print_dev_scsi(struct device_match_result *dev_result, char *tmpstr); static int print_dev_ata(struct device_match_result *dev_result, char *tmpstr); static int print_dev_semb(struct device_match_result *dev_result, char *tmpstr); static int print_dev_mmcsd(struct device_match_result *dev_result, char *tmpstr); static int print_dev_nvme(struct device_match_result *dev_result, char *tmpstr); static int requestsense(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout); static int testunitready(struct cam_device *device, int task_attr, int retry_count, int timeout, int quiet); static int scsistart(struct cam_device *device, int startstop, int loadeject, int task_attr, int retry_count, int timeout); static int scsiinquiry(struct cam_device *device, int task_attr, int retry_count, int timeout); static int scsiserial(struct cam_device *device, int task_attr, int retry_count, int timeout); static int parse_btl(char *tstr, path_id_t *bus, target_id_t *target, lun_id_t *lun, cam_argmask *arglst); static int reprobe(struct cam_device *device); static int dorescan_or_reset(int argc, char **argv, int rescan); static int rescan_or_reset_bus(path_id_t bus, int rescan); static int scanlun_or_reset_dev(path_id_t bus, target_id_t target, lun_id_t lun, int scan); static int readdefects(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout); static void modepage(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout); static int scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout); static int smpcmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int mmcsdcmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpreportgeneral(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpphycontrol(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpmaninfo(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int getdevid(struct cam_devitem *item); static int buildbusdevlist(struct cam_devlist *devlist); static void freebusdevlist(struct cam_devlist *devlist); static struct cam_devitem *findsasdevice(struct cam_devlist *devlist, uint64_t sasaddr); static int smpphylist(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int tagcontrol(struct cam_device *device, int argc, char **argv, char *combinedopt); static void cts_print(struct cam_device *device, struct ccb_trans_settings *cts); static void cpi_print(struct ccb_pathinq *cpi); static int get_cpi(struct cam_device *device, struct ccb_pathinq *cpi); static int get_cgd(struct cam_device *device, struct ccb_getdev *cgd); static int get_print_cts(struct cam_device *device, int user_settings, int quiet, struct ccb_trans_settings *cts); static int ratecontrol(struct cam_device *device, int task_attr, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int scsiformat(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout); static int sanitize(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout); static int scsireportluns(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout); static int scsireadcapacity(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout); static int atapm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int atasecurity(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int atahpa(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int ataama(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int scsiprintoneopcode(struct cam_device *device, int req_opcode, int sa_set, int req_sa, uint8_t *buf, uint32_t valid_len); static int scsiprintopcodes(struct cam_device *device, int td_req, uint8_t *buf, uint32_t valid_len); static int scsiopcodes(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout, int verbose); #ifndef min #define min(a,b) (((a)<(b))?(a):(b)) #endif #ifndef max #define max(a,b) (((a)>(b))?(a):(b)) #endif camcontrol_optret getoption(struct camcontrol_opts *table, char *arg, uint32_t *cmdnum, cam_argmask *argnum, const char **subopt) { struct camcontrol_opts *opts; int num_matches = 0; for (opts = table; (opts != NULL) && (opts->optname != NULL); opts++) { if (strncmp(opts->optname, arg, strlen(arg)) == 0) { *cmdnum = opts->cmdnum; *argnum = opts->argnum; *subopt = opts->subopt; if (++num_matches > 1) return (CC_OR_AMBIGUOUS); } } if (num_matches > 0) return (CC_OR_FOUND); else return (CC_OR_NOT_FOUND); } static int getdevlist(struct cam_device *device) { union ccb *ccb; char status[32]; int error = 0; ccb = cam_getccb(device); ccb->ccb_h.func_code = XPT_GDEVLIST; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 1; ccb->cgdl.index = 0; ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { if (cam_send_ccb(device, ccb) < 0) { warn("error getting device list"); cam_freeccb(ccb); return (1); } status[0] = '\0'; switch (ccb->cgdl.status) { case CAM_GDEVLIST_MORE_DEVS: strcpy(status, "MORE"); break; case CAM_GDEVLIST_LAST_DEVICE: strcpy(status, "LAST"); break; case CAM_GDEVLIST_LIST_CHANGED: strcpy(status, "CHANGED"); break; case CAM_GDEVLIST_ERROR: strcpy(status, "ERROR"); error = 1; break; } fprintf(stdout, "%s%d: generation: %d index: %d status: %s\n", ccb->cgdl.periph_name, ccb->cgdl.unit_number, ccb->cgdl.generation, ccb->cgdl.index, status); /* * If the list has changed, we need to start over from the * beginning. */ if (ccb->cgdl.status == CAM_GDEVLIST_LIST_CHANGED) ccb->cgdl.index = 0; } cam_freeccb(ccb); return (error); } static int getdevtree(int argc, char **argv, char *combinedopt) { union ccb ccb; int bufsize, fd; unsigned int i; int need_close = 0; int error = 0; int skip_device = 0; int busonly = 0; int c; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'b': if ((arglist & CAM_ARG_VERBOSE) == 0) busonly = 1; break; default: break; } } if ((fd = open(XPT_DEVICE, O_RDWR)) == -1) { warn("couldn't open %s", XPT_DEVICE); return (1); } bzero(&ccb, sizeof(union ccb)); ccb.ccb_h.path_id = CAM_XPT_PATH_ID; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.ccb_h.func_code = XPT_DEV_MATCH; bufsize = sizeof(struct dev_match_result) * 100; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = (struct dev_match_result *)malloc(bufsize); if (ccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); close(fd); return (1); } ccb.cdm.num_matches = 0; /* * We fetch all nodes, since we display most of them in the default * case, and all in the verbose case. */ ccb.cdm.num_patterns = 0; ccb.cdm.pattern_buf_len = 0; /* * We do the ioctl multiple times if necessary, in case there are * more than 100 nodes in the EDT. */ do { if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("error sending CAMIOCOMMAND ioctl"); error = 1; break; } if ((ccb.ccb_h.status != CAM_REQ_CMP) || ((ccb.cdm.status != CAM_DEV_MATCH_LAST) && (ccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", ccb.ccb_h.status, ccb.cdm.status); error = 1; break; } for (i = 0; i < ccb.cdm.num_matches; i++) { switch (ccb.cdm.matches[i].type) { case DEV_MATCH_BUS: { struct bus_match_result *bus_result; /* * Only print the bus information if the * user turns on the verbose flag. */ if ((busonly == 0) && (arglist & CAM_ARG_VERBOSE) == 0) break; bus_result = &ccb.cdm.matches[i].result.bus_result; if (need_close) { fprintf(stdout, ")\n"); need_close = 0; } fprintf(stdout, "scbus%d on %s%d bus %d%s\n", bus_result->path_id, bus_result->dev_name, bus_result->unit_number, bus_result->bus_id, (busonly ? "" : ":")); break; } case DEV_MATCH_DEVICE: { struct device_match_result *dev_result; char tmpstr[256]; if (busonly == 1) break; dev_result = &ccb.cdm.matches[i].result.device_result; if ((dev_result->flags & DEV_RESULT_UNCONFIGURED) && ((arglist & CAM_ARG_VERBOSE) == 0)) { skip_device = 1; break; } else skip_device = 0; if (dev_result->protocol == PROTO_SCSI) { if (print_dev_scsi(dev_result, &tmpstr[0]) != 0) { skip_device = 1; break; } } else if (dev_result->protocol == PROTO_ATA || dev_result->protocol == PROTO_SATAPM) { if (print_dev_ata(dev_result, &tmpstr[0]) != 0) { skip_device = 1; break; } } else if (dev_result->protocol == PROTO_MMCSD){ if (print_dev_mmcsd(dev_result, &tmpstr[0]) != 0) { skip_device = 1; break; } } else if (dev_result->protocol == PROTO_SEMB) { if (print_dev_semb(dev_result, &tmpstr[0]) != 0) { skip_device = 1; break; } } else if (dev_result->protocol == PROTO_NVME) { if (print_dev_nvme(dev_result, &tmpstr[0]) != 0) { skip_device = 1; break; } } else { sprintf(tmpstr, "<>"); } if (need_close) { fprintf(stdout, ")\n"); need_close = 0; } fprintf(stdout, "%-33s at scbus%d " "target %d lun %jx (", tmpstr, dev_result->path_id, dev_result->target_id, (uintmax_t)dev_result->target_lun); need_close = 1; break; } case DEV_MATCH_PERIPH: { struct periph_match_result *periph_result; periph_result = &ccb.cdm.matches[i].result.periph_result; if (busonly || skip_device != 0) break; if (need_close > 1) fprintf(stdout, ","); fprintf(stdout, "%s%d", periph_result->periph_name, periph_result->unit_number); need_close++; break; } default: fprintf(stdout, "unknown match type\n"); break; } } } while ((ccb.ccb_h.status == CAM_REQ_CMP) && (ccb.cdm.status == CAM_DEV_MATCH_MORE)); if (need_close) fprintf(stdout, ")\n"); free(ccb.cdm.matches); close(fd); return (error); } static int getdevtype(struct cam_device *cam_dev) { camcontrol_devtype dt; int error; /* * Get the device type and report it, request no I/O be done to do this. */ error = get_device_type(cam_dev, -1, 0, 0, &dt); if (error != 0 || (unsigned)dt > CC_DT_UNKNOWN) { fprintf(stdout, "illegal\n"); return (1); } fprintf(stdout, "%s\n", devtype_names[dt]); return (0); } static int print_dev_scsi(struct device_match_result *dev_result, char *tmpstr) { char vendor[16], product[48], revision[16]; cam_strvis(vendor, dev_result->inq_data.vendor, sizeof(dev_result->inq_data.vendor), sizeof(vendor)); cam_strvis(product, dev_result->inq_data.product, sizeof(dev_result->inq_data.product), sizeof(product)); cam_strvis(revision, dev_result->inq_data.revision, sizeof(dev_result->inq_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s %s>", vendor, product, revision); return (0); } static int print_dev_ata(struct device_match_result *dev_result, char *tmpstr) { char product[48], revision[16]; cam_strvis(product, dev_result->ident_data.model, sizeof(dev_result->ident_data.model), sizeof(product)); cam_strvis(revision, dev_result->ident_data.revision, sizeof(dev_result->ident_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s>", product, revision); return (0); } static int print_dev_semb(struct device_match_result *dev_result, char *tmpstr) { struct sep_identify_data *sid; char vendor[16], product[48], revision[16], fw[5]; sid = (struct sep_identify_data *)&dev_result->ident_data; cam_strvis(vendor, sid->vendor_id, sizeof(sid->vendor_id), sizeof(vendor)); cam_strvis(product, sid->product_id, sizeof(sid->product_id), sizeof(product)); cam_strvis(revision, sid->product_rev, sizeof(sid->product_rev), sizeof(revision)); cam_strvis(fw, sid->firmware_rev, sizeof(sid->firmware_rev), sizeof(fw)); sprintf(tmpstr, "<%s %s %s %s>", vendor, product, revision, fw); return (0); } static int print_dev_mmcsd(struct device_match_result *dev_result, char *tmpstr) { union ccb *ccb; struct ccb_dev_advinfo *advi; struct cam_device *dev; struct mmc_params mmc_ident_data; dev = cam_open_btl(dev_result->path_id, dev_result->target_id, dev_result->target_lun, O_RDWR, NULL); if (dev == NULL) { warnx("%s", cam_errbuf); return (1); } ccb = cam_getccb(dev); if (ccb == NULL) { warnx("couldn't allocate CCB"); cam_close_device(dev); return (1); } advi = &ccb->cdai; advi->ccb_h.flags = CAM_DIR_IN; advi->ccb_h.func_code = XPT_DEV_ADVINFO; advi->flags = CDAI_FLAG_NONE; advi->buftype = CDAI_TYPE_MMC_PARAMS; advi->bufsiz = sizeof(struct mmc_params); advi->buf = (uint8_t *)&mmc_ident_data; if (cam_send_ccb(dev, ccb) < 0) { warn("error sending XPT_DEV_ADVINFO CCB"); cam_freeccb(ccb); cam_close_device(dev); return (1); } if (strlen(mmc_ident_data.model) > 0) { sprintf(tmpstr, "<%s>", mmc_ident_data.model); } else { sprintf(tmpstr, "<%s card>", mmc_ident_data.card_features & CARD_FEATURE_SDIO ? "SDIO" : "unknown"); } cam_freeccb(ccb); cam_close_device(dev); return (0); } static int nvme_get_cdata(struct cam_device *dev, struct nvme_controller_data *cdata) { union ccb *ccb; struct ccb_dev_advinfo *advi; ccb = cam_getccb(dev); if (ccb == NULL) { warnx("couldn't allocate CCB"); cam_close_device(dev); return (1); } advi = &ccb->cdai; advi->ccb_h.flags = CAM_DIR_IN; advi->ccb_h.func_code = XPT_DEV_ADVINFO; advi->flags = CDAI_FLAG_NONE; advi->buftype = CDAI_TYPE_NVME_CNTRL; advi->bufsiz = sizeof(struct nvme_controller_data); advi->buf = (uint8_t *)cdata; if (cam_send_ccb(dev, ccb) < 0) { warn("error sending XPT_DEV_ADVINFO CCB"); cam_freeccb(ccb); cam_close_device(dev); return(1); } if (advi->ccb_h.status != CAM_REQ_CMP) { warnx("got CAM error %#x", advi->ccb_h.status); cam_freeccb(ccb); cam_close_device(dev); return(1); } cam_freeccb(ccb); return 0; } static int print_dev_nvme(struct device_match_result *dev_result, char *tmpstr) { struct cam_device *dev; struct nvme_controller_data cdata; char vendor[64], product[64]; dev = cam_open_btl(dev_result->path_id, dev_result->target_id, dev_result->target_lun, O_RDWR, NULL); if (dev == NULL) { warnx("%s", cam_errbuf); return (1); } if (nvme_get_cdata(dev, &cdata)) return (1); cam_strvis(vendor, cdata.mn, sizeof(cdata.mn), sizeof(vendor)); cam_strvis(product, cdata.fr, sizeof(cdata.fr), sizeof(product)); sprintf(tmpstr, "<%s %s>", vendor, product); cam_close_device(dev); return (0); } static int requestsense(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout) { int c; int descriptor_sense = 0; int do_hexdump = 0; struct scsi_sense_data sense; union ccb *ccb = NULL; int error = 0; size_t returned_bytes; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'D': descriptor_sense = 1; break; case 'x': do_hexdump = 1; break; default: break; } } ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return (1); } /* cam_getccb cleans up the header, caller has to zero the payload */ CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); bzero(&sense, sizeof(sense)); scsi_request_sense(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*data_ptr*/ (void *)&sense, /*dxfer_len*/ sizeof(sense), /*tag_action*/ task_attr, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 60000); if (descriptor_sense != 0) { struct scsi_request_sense *cdb; cdb = (struct scsi_request_sense *)&ccb->csio.cdb_io.cdb_bytes; cdb->byte2 |= SRS_DESC; } ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending REQUEST SENSE command"); cam_freeccb(ccb); error = 1; goto bailout; } /* * REQUEST SENSE is not generally supposed to fail. But there can * be transport or other errors that might cause it to fail. It * may also fail if the user asks for descriptor sense and the * device doesn't support it. So we check the CCB status here to see. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("REQUEST SENSE failed"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto bailout; } returned_bytes = ccb->csio.dxfer_len - ccb->csio.resid; if (do_hexdump != 0) { hexdump(&sense, returned_bytes, NULL, 0); } else { char path_str[80]; struct sbuf *sb; cam_path_string(device, path_str, sizeof(path_str)); sb = sbuf_new_auto(); if (sb == NULL) { warnx("%s: cannot allocate sbuf", __func__); error = 1; goto bailout; } scsi_sense_only_sbuf(&sense, returned_bytes, sb, path_str, &device->inq_data, scsiio_cdb_ptr(&ccb->csio), ccb->csio.cdb_len); sbuf_finish(sb); printf("%s", sbuf_data(sb)); sbuf_delete(sb); } bailout: if (ccb != NULL) cam_freeccb(ccb); return (error); } static int testunitready(struct cam_device *device, int task_attr, int retry_count, int timeout, int quiet) { int error = 0; union ccb *ccb; ccb = cam_getccb(device); scsi_test_unit_ready(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ task_attr, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { if (quiet == 0) warn("error sending TEST UNIT READY command"); cam_freeccb(ccb); return (1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { if (quiet == 0) fprintf(stdout, "Unit is ready\n"); } else { if (quiet == 0) fprintf(stdout, "Unit is not ready\n"); error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); return (error); } static int scsistart(struct cam_device *device, int startstop, int loadeject, int task_attr, int retry_count, int timeout) { union ccb *ccb; int error = 0; ccb = cam_getccb(device); /* * If we're stopping, send an ordered tag so the drive in question * will finish any previously queued writes before stopping. If * the device isn't capable of tagged queueing, or if tagged * queueing is turned off, the tag action is a no-op. We override * the default simple tag, although this also has the effect of * overriding the user's wishes if he wanted to specify a simple * tag. */ if ((startstop == 0) && (task_attr == MSG_SIMPLE_Q_TAG)) task_attr = MSG_ORDERED_Q_TAG; scsi_start_stop(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ task_attr, /* start/stop */ startstop, /* load_eject */ loadeject, /* immediate */ 0, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 120000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending START STOP UNIT command"); cam_freeccb(ccb); return (1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) if (startstop) { fprintf(stdout, "Unit started successfully"); if (loadeject) fprintf(stdout,", Media loaded\n"); else fprintf(stdout,"\n"); } else { fprintf(stdout, "Unit stopped successfully"); if (loadeject) fprintf(stdout, ", Media ejected\n"); else fprintf(stdout, "\n"); } else { error = 1; if (startstop) fprintf(stdout, "Error received from start unit command\n"); else fprintf(stdout, "Error received from stop unit command\n"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); return (error); } int scsidoinquiry(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout) { int c; int error = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'D': arglist |= CAM_ARG_GET_STDINQ; break; case 'R': arglist |= CAM_ARG_GET_XFERRATE; break; case 'S': arglist |= CAM_ARG_GET_SERIAL; break; default: break; } } /* * If the user didn't specify any inquiry options, he wants all of * them. */ if ((arglist & CAM_ARG_INQ_MASK) == 0) arglist |= CAM_ARG_INQ_MASK; if (arglist & CAM_ARG_GET_STDINQ) error = scsiinquiry(device, task_attr, retry_count, timeout); if (error != 0) return (error); if (arglist & CAM_ARG_GET_SERIAL) scsiserial(device, task_attr, retry_count, timeout); if (arglist & CAM_ARG_GET_XFERRATE) error = camxferrate(device); return (error); } static int scsiinquiry(struct cam_device *device, int task_attr, int retry_count, int timeout) { union ccb *ccb; struct scsi_inquiry_data *inq_buf; int error = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return (1); } inq_buf = (struct scsi_inquiry_data *)malloc( sizeof(struct scsi_inquiry_data)); if (inq_buf == NULL) { cam_freeccb(ccb); warnx("can't malloc memory for inquiry\n"); return (1); } bzero(inq_buf, sizeof(*inq_buf)); /* * Note that although the size of the inquiry buffer is the full * 256 bytes specified in the SCSI spec, we only tell the device * that we have allocated SHORT_INQUIRY_LENGTH bytes. There are * two reasons for this: * * - The SCSI spec says that when a length field is only 1 byte, * a value of 0 will be interpreted as 256. Therefore * scsi_inquiry() will convert an inq_len (which is passed in as * a uint32_t, but the field in the CDB is only 1 byte) of 256 * to 0. Evidently, very few devices meet the spec in that * regard. Some devices, like many Seagate disks, take the 0 as * 0, and don't return any data. One Pioneer DVD-R drive * returns more data than the command asked for. * * So, since there are numerous devices that just don't work * right with the full inquiry size, we don't send the full size. * * - The second reason not to use the full inquiry data length is * that we don't need it here. The only reason we issue a * standard inquiry is to get the vendor name, device name, * and revision so scsi_print_inquiry() can print them. * * If, at some point in the future, more inquiry data is needed for * some reason, this code should use a procedure similar to the * probe code. i.e., issue a short inquiry, and determine from * the additional length passed back from the device how much * inquiry data the device supports. Once the amount the device * supports is determined, issue an inquiry for that amount and no * more. * * KDM, 2/18/2000 */ scsi_inquiry(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ task_attr, /* inq_buf */ (uint8_t *)inq_buf, /* inq_len */ SHORT_INQUIRY_LENGTH, /* evpd */ 0, /* page_code */ 0, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending INQUIRY command"); cam_freeccb(ccb); return (1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); if (error != 0) { free(inq_buf); return (error); } fprintf(stdout, "%s%d: ", device->device_name, device->dev_unit_num); scsi_print_inquiry(inq_buf); free(inq_buf); return (0); } static int scsiserial(struct cam_device *device, int task_attr, int retry_count, int timeout) { union ccb *ccb; struct scsi_vpd_unit_serial_number *serial_buf; char serial_num[SVPD_SERIAL_NUM_SIZE + 1]; int error = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return (1); } serial_buf = (struct scsi_vpd_unit_serial_number *) malloc(sizeof(*serial_buf)); if (serial_buf == NULL) { cam_freeccb(ccb); warnx("can't malloc memory for serial number"); return (1); } scsi_inquiry(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /* tag_action */ task_attr, /* inq_buf */ (uint8_t *)serial_buf, /* inq_len */ sizeof(*serial_buf), /* evpd */ 1, /* page_code */ SVPD_UNIT_SERIAL_NUMBER, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending INQUIRY command"); cam_freeccb(ccb); free(serial_buf); return (1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); if (error != 0) { free(serial_buf); return (error); } bcopy(serial_buf->serial_num, serial_num, serial_buf->length); serial_num[serial_buf->length] = '\0'; if ((arglist & CAM_ARG_GET_STDINQ) || (arglist & CAM_ARG_GET_XFERRATE)) fprintf(stdout, "%s%d: Serial Number ", device->device_name, device->dev_unit_num); fprintf(stdout, "%.60s\n", serial_num); free(serial_buf); return (0); } int camxferrate(struct cam_device *device) { struct ccb_pathinq cpi; uint32_t freq = 0; uint32_t speed = 0; union ccb *ccb; u_int mb; int retval = 0; if ((retval = get_cpi(device, &cpi)) != 0) return (1); ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return (1); } ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; ccb->cts.type = CTS_TYPE_CURRENT_SETTINGS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char error_string[] = "error getting transfer settings"; if (retval < 0) warn(error_string); else warnx(error_string); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto xferrate_bailout; } speed = cpi.base_transfer_speed; freq = 0; if (ccb->cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &ccb->cts.xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { freq = scsi_calc_syncsrate(spi->sync_period); speed = freq; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { speed *= (0x01 << spi->bus_width); } } else if (ccb->cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &ccb->cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_SPEED) speed = fc->bitrate; } else if (ccb->cts.transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &ccb->cts.xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) speed = sas->bitrate; } else if (ccb->cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &ccb->cts.xport_specific.ata; if (pata->valid & CTS_ATA_VALID_MODE) speed = ata_mode2speed(pata->mode); } else if (ccb->cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &ccb->cts.xport_specific.sata; if (sata->valid & CTS_SATA_VALID_REVISION) speed = ata_revision2speed(sata->revision); } mb = speed / 1000; if (mb > 0) { fprintf(stdout, "%s%d: %d.%03dMB/s transfers", device->device_name, device->dev_unit_num, mb, speed % 1000); } else { fprintf(stdout, "%s%d: %dKB/s transfers", device->device_name, device->dev_unit_num, speed); } if (ccb->cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &ccb->cts.xport_specific.spi; if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) fprintf(stdout, " (%d.%03dMHz, offset %d", freq / 1000, freq % 1000, spi->sync_offset); if (((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) && (spi->bus_width > 0)) { if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) { fprintf(stdout, ", "); } else { fprintf(stdout, " ("); } fprintf(stdout, "%dbit)", 8 * (0x01 << spi->bus_width)); } else if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) { fprintf(stdout, ")"); } } else if (ccb->cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &ccb->cts.xport_specific.ata; printf(" ("); if (pata->valid & CTS_ATA_VALID_MODE) printf("%s, ", ata_mode2string(pata->mode)); if ((pata->valid & CTS_ATA_VALID_ATAPI) && pata->atapi != 0) printf("ATAPI %dbytes, ", pata->atapi); if (pata->valid & CTS_ATA_VALID_BYTECOUNT) printf("PIO %dbytes", pata->bytecount); printf(")"); } else if (ccb->cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &ccb->cts.xport_specific.sata; printf(" ("); if (sata->valid & CTS_SATA_VALID_REVISION) printf("SATA %d.x, ", sata->revision); else printf("SATA, "); if (sata->valid & CTS_SATA_VALID_MODE) printf("%s, ", ata_mode2string(sata->mode)); if ((sata->valid & CTS_SATA_VALID_ATAPI) && sata->atapi != 0) printf("ATAPI %dbytes, ", sata->atapi); if (sata->valid & CTS_SATA_VALID_BYTECOUNT) printf("PIO %dbytes", sata->bytecount); printf(")"); } if (ccb->cts.protocol == PROTO_SCSI) { struct ccb_trans_settings_scsi *scsi = &ccb->cts.proto_specific.scsi; if (scsi->valid & CTS_SCSI_VALID_TQ) { if (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) { fprintf(stdout, ", Command Queueing Enabled"); } } } fprintf(stdout, "\n"); xferrate_bailout: cam_freeccb(ccb); return (retval); } static void atahpa_print(struct ata_params *parm, u_int64_t hpasize, int header) { uint32_t lbasize = (uint32_t)parm->lba_size_1 | ((uint32_t)parm->lba_size_2 << 16); u_int64_t lbasize48 = ((u_int64_t)parm->lba_size48_1) | ((u_int64_t)parm->lba_size48_2 << 16) | ((u_int64_t)parm->lba_size48_3 << 32) | ((u_int64_t)parm->lba_size48_4 << 48); if (header) { printf("\nFeature " "Support Enabled Value\n"); } printf("Host Protected Area (HPA) "); if (parm->support.command1 & ATA_SUPPORT_PROTECTED) { u_int64_t lba = lbasize48 ? lbasize48 : lbasize; printf("yes %s %ju/%ju\n", (hpasize > lba) ? "yes" : "no ", lba, hpasize); printf("HPA - Security "); if (parm->support.command2 & ATA_SUPPORT_MAXSECURITY) printf("yes %s\n", (parm->enabled.command2 & ATA_SUPPORT_MAXSECURITY) ? "yes" : "no "); else printf("no\n"); } else { printf("no\n"); } } static void ataama_print(struct ata_params *parm, u_int64_t nativesize, int header) { uint32_t lbasize = (uint32_t)parm->lba_size_1 | ((uint32_t)parm->lba_size_2 << 16); u_int64_t lbasize48 = ((u_int64_t)parm->lba_size48_1) | ((u_int64_t)parm->lba_size48_2 << 16) | ((u_int64_t)parm->lba_size48_3 << 32) | ((u_int64_t)parm->lba_size48_4 << 48); if (header) { printf("\nFeature " "Support Enabled Value\n"); } printf("Accessible Max Address Config "); if (parm->support2 & ATA_SUPPORT_AMAX_ADDR) { u_int64_t lba = lbasize48 ? lbasize48 : lbasize; printf("yes %s %ju/%ju\n", (nativesize > lba) ? "yes" : "no ", lba, nativesize); } else { printf("no\n"); } } static int atasata(struct ata_params *parm) { if (parm->satacapabilities != 0xffff && parm->satacapabilities != 0x0000) return 1; return 0; } static void atacapprint(struct ata_params *parm) { const char *proto; uint32_t lbasize = (uint32_t)parm->lba_size_1 | ((uint32_t)parm->lba_size_2 << 16); u_int64_t lbasize48 = ((u_int64_t)parm->lba_size48_1) | ((u_int64_t)parm->lba_size48_2 << 16) | ((u_int64_t)parm->lba_size48_3 << 32) | ((u_int64_t)parm->lba_size48_4 << 48); printf("\n"); printf("protocol "); proto = (parm->config == ATA_PROTO_CFA) ? "CFA" : (parm->config & ATA_PROTO_ATAPI) ? "ATAPI" : "ATA"; if (ata_version(parm->version_major) == 0) { printf("%s", proto); } else if (ata_version(parm->version_major) <= 7) { printf("%s-%d", proto, ata_version(parm->version_major)); } else if (ata_version(parm->version_major) == 8) { printf("%s8-ACS", proto); } else { printf("ACS-%d %s", ata_version(parm->version_major) - 7, proto); } if (parm->satacapabilities && parm->satacapabilities != 0xffff) { if (parm->satacapabilities & ATA_SATA_GEN3) printf(" SATA 3.x\n"); else if (parm->satacapabilities & ATA_SATA_GEN2) printf(" SATA 2.x\n"); else if (parm->satacapabilities & ATA_SATA_GEN1) printf(" SATA 1.x\n"); else printf(" SATA\n"); } else printf("\n"); printf("device model %.40s\n", parm->model); printf("firmware revision %.8s\n", parm->revision); printf("serial number %.20s\n", parm->serial); if (parm->enabled.extension & ATA_SUPPORT_64BITWWN) { printf("WWN %04x%04x%04x%04x\n", parm->wwn[0], parm->wwn[1], parm->wwn[2], parm->wwn[3]); } printf("additional product id %.8s\n", parm->product_id); if (parm->enabled.extension & ATA_SUPPORT_MEDIASN) { printf("media serial number %.30s\n", parm->media_serial); } printf("cylinders %d\n", parm->cylinders); printf("heads %d\n", parm->heads); printf("sectors/track %d\n", parm->sectors); printf("sector size logical %u, physical %lu, offset %lu\n", ata_logical_sector_size(parm), (unsigned long)ata_physical_sector_size(parm), (unsigned long)ata_logical_sector_offset(parm)); if (parm->config == ATA_PROTO_CFA || (parm->support.command2 & ATA_SUPPORT_CFA)) printf("CFA supported\n"); printf("LBA%ssupported ", parm->capabilities1 & ATA_SUPPORT_LBA ? " " : " not "); if (lbasize) printf("%d sectors\n", lbasize); else printf("\n"); printf("LBA48%ssupported ", parm->support.command2 & ATA_SUPPORT_ADDRESS48 ? " " : " not "); if (lbasize48) printf("%ju sectors\n", (uintmax_t)lbasize48); else printf("\n"); printf("PIO supported PIO"); switch (ata_max_pmode(parm)) { case ATA_PIO4: printf("4"); break; case ATA_PIO3: printf("3"); break; case ATA_PIO2: printf("2"); break; case ATA_PIO1: printf("1"); break; default: printf("0"); } if ((parm->capabilities1 & ATA_SUPPORT_IORDY) == 0) printf(" w/o IORDY"); printf("\n"); printf("DMA%ssupported ", parm->capabilities1 & ATA_SUPPORT_DMA ? " " : " not "); if (parm->capabilities1 & ATA_SUPPORT_DMA) { if (parm->mwdmamodes & 0xff) { printf("WDMA"); if (parm->mwdmamodes & 0x04) printf("2"); else if (parm->mwdmamodes & 0x02) printf("1"); else if (parm->mwdmamodes & 0x01) printf("0"); printf(" "); } if ((parm->atavalid & ATA_FLAG_88) && (parm->udmamodes & 0xff)) { printf("UDMA"); if (parm->udmamodes & 0x40) printf("6"); else if (parm->udmamodes & 0x20) printf("5"); else if (parm->udmamodes & 0x10) printf("4"); else if (parm->udmamodes & 0x08) printf("3"); else if (parm->udmamodes & 0x04) printf("2"); else if (parm->udmamodes & 0x02) printf("1"); else if (parm->udmamodes & 0x01) printf("0"); printf(" "); } } printf("\n"); if (parm->media_rotation_rate == 1) { printf("media RPM non-rotating\n"); } else if (parm->media_rotation_rate >= 0x0401 && parm->media_rotation_rate <= 0xFFFE) { printf("media RPM %d\n", parm->media_rotation_rate); } printf("Zoned-Device Commands "); switch (parm->support3 & ATA_SUPPORT_ZONE_MASK) { case ATA_SUPPORT_ZONE_DEV_MANAGED: printf("device managed\n"); break; case ATA_SUPPORT_ZONE_HOST_AWARE: printf("host aware\n"); break; default: printf("no\n"); } printf("\nFeature " "Support Enabled Value Vendor\n"); printf("read ahead %s %s\n", parm->support.command1 & ATA_SUPPORT_LOOKAHEAD ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_LOOKAHEAD ? "yes" : "no"); printf("write cache %s %s\n", parm->support.command1 & ATA_SUPPORT_WRITECACHE ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_WRITECACHE ? "yes" : "no"); printf("flush cache %s %s\n", parm->support.command2 & ATA_SUPPORT_FLUSHCACHE ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_FLUSHCACHE ? "yes" : "no"); printf("Native Command Queuing (NCQ) "); if (atasata(parm) && (parm->satacapabilities & ATA_SUPPORT_NCQ)) { printf("yes %d tags\n", ATA_QUEUE_LEN(parm->queue) + 1); printf("NCQ Priority Information %s\n", parm->satacapabilities & ATA_SUPPORT_NCQ_PRIO ? "yes" : "no"); printf("NCQ Non-Data Command %s\n", parm->satacapabilities2 & ATA_SUPPORT_NCQ_NON_DATA ? "yes" : "no"); printf("NCQ Streaming %s\n", parm->satacapabilities2 & ATA_SUPPORT_NCQ_STREAM ? "yes" : "no"); printf("Receive & Send FPDMA Queued %s\n", parm->satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED ? "yes" : "no"); printf("NCQ Autosense %s\n", parm->satasupport & ATA_SUPPORT_NCQ_AUTOSENSE ? "yes" : "no"); } else printf("no\n"); printf("SMART %s %s\n", parm->support.command1 & ATA_SUPPORT_SMART ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_SMART ? "yes" : "no"); printf("security %s %s\n", parm->support.command1 & ATA_SUPPORT_SECURITY ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_SECURITY ? "yes" : "no"); printf("power management %s %s\n", parm->support.command1 & ATA_SUPPORT_POWERMGT ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_POWERMGT ? "yes" : "no"); printf("microcode download %s %s\n", parm->support.command2 & ATA_SUPPORT_MICROCODE ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_MICROCODE ? "yes" : "no"); printf("advanced power management %s %s", parm->support.command2 & ATA_SUPPORT_APM ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_APM ? "yes" : "no"); if (parm->support.command2 & ATA_SUPPORT_APM) { printf(" %d/0x%02X\n", parm->apm_value & 0xff, parm->apm_value & 0xff); } else printf("\n"); printf("automatic acoustic management %s %s", parm->support.command2 & ATA_SUPPORT_AUTOACOUSTIC ? "yes" :"no", parm->enabled.command2 & ATA_SUPPORT_AUTOACOUSTIC ? "yes" :"no"); if (parm->support.command2 & ATA_SUPPORT_AUTOACOUSTIC) { printf(" %d/0x%02X %d/0x%02X\n", ATA_ACOUSTIC_CURRENT(parm->acoustic), ATA_ACOUSTIC_CURRENT(parm->acoustic), ATA_ACOUSTIC_VENDOR(parm->acoustic), ATA_ACOUSTIC_VENDOR(parm->acoustic)); } else printf("\n"); printf("media status notification %s %s\n", parm->support.command2 & ATA_SUPPORT_NOTIFY ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_NOTIFY ? "yes" : "no"); printf("power-up in Standby %s %s\n", parm->support.command2 & ATA_SUPPORT_STANDBY ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_STANDBY ? "yes" : "no"); printf("write-read-verify %s %s", parm->support2 & ATA_SUPPORT_WRITEREADVERIFY ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_WRITEREADVERIFY ? "yes" : "no"); if (parm->support2 & ATA_SUPPORT_WRITEREADVERIFY) { printf(" %d/0x%x\n", parm->wrv_mode, parm->wrv_mode); } else printf("\n"); printf("unload %s %s\n", parm->support.extension & ATA_SUPPORT_UNLOAD ? "yes" : "no", parm->enabled.extension & ATA_SUPPORT_UNLOAD ? "yes" : "no"); printf("general purpose logging %s %s\n", parm->support.extension & ATA_SUPPORT_GENLOG ? "yes" : "no", parm->enabled.extension & ATA_SUPPORT_GENLOG ? "yes" : "no"); printf("free-fall %s %s\n", parm->support2 & ATA_SUPPORT_FREEFALL ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_FREEFALL ? "yes" : "no"); printf("sense data reporting %s %s\n", parm->support2 & ATA_SUPPORT_SENSE_REPORT ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_SENSE_REPORT ? "yes" : "no"); printf("extended power conditions %s %s\n", parm->support2 & ATA_SUPPORT_EPC ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_EPC ? "yes" : "no"); printf("device statistics notification %s %s\n", parm->support2 & ATA_SUPPORT_DSN ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_DSN ? "yes" : "no"); printf("Data Set Management (DSM/TRIM) "); if (parm->support_dsm & ATA_SUPPORT_DSM_TRIM) { printf("yes\n"); printf("DSM - max 512byte blocks "); if (parm->max_dsm_blocks == 0x00) printf("yes not specified\n"); else printf("yes %d\n", parm->max_dsm_blocks); printf("DSM - deterministic read "); if (parm->support3 & ATA_SUPPORT_DRAT) { if (parm->support3 & ATA_SUPPORT_RZAT) printf("yes zeroed\n"); else printf("yes any value\n"); } else { printf("no\n"); } } else { printf("no\n"); } printf("Trusted Computing %s\n", ((parm->tcg & 0xc000) == 0x4000) && (parm->tcg & ATA_SUPPORT_TCG) ? "yes" : "no"); printf("encrypts all user data %s\n", parm->support3 & ATA_ENCRYPTS_ALL_USER_DATA ? "yes" : "no"); printf("Sanitize "); if (parm->multi & ATA_SUPPORT_SANITIZE) { printf("yes\t\t%s%s%s\n", parm->multi & ATA_SUPPORT_BLOCK_ERASE_EXT ? "block, " : "", parm->multi & ATA_SUPPORT_OVERWRITE_EXT ? "overwrite, " : "", parm->multi & ATA_SUPPORT_CRYPTO_SCRAMBLE_EXT ? "crypto" : ""); printf("Sanitize - commands allowed %s\n", parm->multi & ATA_SUPPORT_SANITIZE_ALLOWED ? "yes" : "no"); printf("Sanitize - antifreeze lock %s\n", parm->multi & ATA_SUPPORT_ANTIFREEZE_LOCK_EXT ? "yes" : "no"); } else { printf("no\n"); } } static int scsi_cam_pass_16_send(struct cam_device *device, union ccb *ccb) { struct ata_pass_16 *ata_pass_16; struct ata_cmd ata_cmd; ata_pass_16 = (struct ata_pass_16 *)ccb->csio.cdb_io.cdb_bytes; ata_cmd.command = ata_pass_16->command; ata_cmd.control = ata_pass_16->control; ata_cmd.features = ata_pass_16->features; if (arglist & CAM_ARG_VERBOSE) { warnx("sending ATA %s via pass_16 with timeout of %u msecs", ata_op_string(&ata_cmd), ccb->csio.ccb_h.timeout); } /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending ATA %s via pass_16", ata_op_string(&ata_cmd)); return (1); } /* * Consider any non-CAM_REQ_CMP status as error and report it here, * unless caller set AP_FLAG_CHK_COND, in which case it is responsible. */ if (!(ata_pass_16->flags & AP_FLAG_CHK_COND) && (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("ATA %s via pass_16 failed", ata_op_string(&ata_cmd)); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } return (0); } static int ata_cam_send(struct cam_device *device, union ccb *ccb) { if (arglist & CAM_ARG_VERBOSE) { warnx("sending ATA %s with timeout of %u msecs", ata_op_string(&(ccb->ataio.cmd)), ccb->ataio.ccb_h.timeout); } /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending ATA %s", ata_op_string(&(ccb->ataio.cmd))); return (1); } /* * Consider any non-CAM_REQ_CMP status as error and report it here, * unless caller set AP_FLAG_CHK_COND, in which case it is responsible. */ if (!(ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT) && (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("ATA %s failed", ata_op_string(&(ccb->ataio.cmd))); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } return (0); } static int ata_do_pass_16(struct cam_device *device, union ccb *ccb, int retries, uint32_t flags, uint8_t protocol, uint8_t ata_flags, uint8_t tag_action, uint8_t command, uint16_t features, u_int64_t lba, uint16_t sector_count, uint8_t *data_ptr, uint16_t dxfer_len, int timeout) { if (data_ptr != NULL) { if (flags & CAM_DIR_OUT) ata_flags |= AP_FLAG_TDIR_TO_DEV; else ata_flags |= AP_FLAG_TDIR_FROM_DEV; } else { ata_flags |= AP_FLAG_TLEN_NO_DATA; } CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); scsi_ata_pass_16(&ccb->csio, retries, NULL, flags, tag_action, protocol, ata_flags, features, sector_count, lba, command, /*control*/0, data_ptr, dxfer_len, /*sense_len*/SSD_FULL_SIZE, timeout); return scsi_cam_pass_16_send(device, ccb); } static int ata_try_pass_16(struct cam_device *device) { struct ccb_pathinq cpi; if (get_cpi(device, &cpi) != 0) { warnx("couldn't get CPI"); return (-1); } if (cpi.protocol == PROTO_SCSI) { /* possibly compatible with pass_16 */ return (1); } /* likely not compatible with pass_16 */ return (0); } static int ata_do_cmd(struct cam_device *device, union ccb *ccb, int retries, uint32_t flags, uint8_t protocol, uint8_t ata_flags, uint8_t tag_action, uint8_t command, uint16_t features, u_int64_t lba, uint16_t sector_count, uint8_t *data_ptr, uint16_t dxfer_len, int timeout, int force48bit) { int retval; retval = ata_try_pass_16(device); if (retval == -1) return (1); if (retval == 1) { return (ata_do_pass_16(device, ccb, retries, flags, protocol, ata_flags, tag_action, command, features, lba, sector_count, data_ptr, dxfer_len, timeout)); } CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->ataio); cam_fill_ataio(&ccb->ataio, retries, NULL, flags, tag_action, data_ptr, dxfer_len, timeout); if (force48bit || lba > ATA_MAX_28BIT_LBA) ata_48bit_cmd(&ccb->ataio, command, features, lba, sector_count); else ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); if (ata_flags & AP_FLAG_CHK_COND) ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; return ata_cam_send(device, ccb); } static void dump_data(uint16_t *ptr, uint32_t len) { u_int i; for (i = 0; i < len / 2; i++) { if ((i % 8) == 0) printf(" %3d: ", i); printf("%04hx ", ptr[i]); if ((i % 8) == 7) printf("\n"); } if ((i % 8) != 7) printf("\n"); } static int atahpa_proc_resp(struct cam_device *device, union ccb *ccb, u_int64_t *hpasize) { uint8_t error = 0, ata_device = 0, status = 0; uint16_t count = 0; uint64_t lba = 0; int retval; retval = get_ata_status(device, ccb, &error, &count, &lba, &ata_device, &status); if (retval == 1) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } warnx("Can't get ATA command status"); return (retval); } if (status & ATA_STATUS_ERROR) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } if (error & ATA_ERROR_ID_NOT_FOUND) { warnx("Max address has already been set since " "last power-on or hardware reset"); } else if (hpasize == NULL) warnx("Command failed with ATA error"); return (1); } if (hpasize != NULL) { if (retval == 2 || retval == 6) return (1); *hpasize = lba + 1; } return (0); } static int ata_read_native_max(struct cam_device *device, int retry_count, uint32_t timeout, union ccb *ccb, struct ata_params *parm, u_int64_t *hpasize) { int error; u_int cmd, is48bit; uint8_t protocol; is48bit = parm->support.command2 & ATA_SUPPORT_ADDRESS48; protocol = AP_PROTO_NON_DATA; if (is48bit) { cmd = ATA_READ_NATIVE_MAX_ADDRESS48; protocol |= AP_EXTEND; } else { cmd = ATA_READ_NATIVE_MAX_ADDRESS; } error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 10 * 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, hpasize); } static int atahpa_set_max(struct cam_device *device, int retry_count, uint32_t timeout, union ccb *ccb, int is48bit, u_int64_t maxsize, int persist) { int error; u_int cmd; uint8_t protocol; protocol = AP_PROTO_NON_DATA; if (is48bit) { cmd = ATA_SET_MAX_ADDRESS48; protocol |= AP_EXTEND; } else { cmd = ATA_SET_MAX_ADDRESS; } /* lba's are zero indexed so the max lba is requested max - 1 */ if (maxsize) maxsize--; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_MAX_ADDR, /*lba*/maxsize, /*sector_count*/persist, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, NULL); } static int atahpa_password(struct cam_device *device, int retry_count, uint32_t timeout, union ccb *ccb, int is48bit, struct ata_set_max_pwd *pwd) { u_int cmd; uint8_t protocol; protocol = AP_PROTO_PIO_OUT; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; return (ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/protocol, /*ata_flags*/AP_FLAG_BYT_BLOK_BLOCKS | AP_FLAG_TLEN_SECT_CNT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_SET_PWD, /*lba*/0, /*sector_count*/sizeof(*pwd) / 512, /*data_ptr*/(uint8_t*)pwd, /*dxfer_len*/sizeof(*pwd), timeout ? timeout : 1000, is48bit)); } static int atahpa_lock(struct cam_device *device, int retry_count, uint32_t timeout, union ccb *ccb, int is48bit) { u_int cmd; uint8_t protocol; protocol = AP_PROTO_NON_DATA; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; return (ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/0, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_LOCK, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit)); } static int atahpa_unlock(struct cam_device *device, int retry_count, uint32_t timeout, union ccb *ccb, int is48bit, struct ata_set_max_pwd *pwd) { u_int cmd; uint8_t protocol; protocol = AP_PROTO_PIO_OUT; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; return (ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/protocol, /*ata_flags*/AP_FLAG_BYT_BLOK_BLOCKS | AP_FLAG_TLEN_SECT_CNT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_UNLOCK, /*lba*/0, /*sector_count*/sizeof(*pwd) / 512, /*data_ptr*/(uint8_t*)pwd, /*dxfer_len*/sizeof(*pwd), timeout ? timeout : 1000, is48bit)); } static int atahpa_freeze_lock(struct cam_device *device, int retry_count, uint32_t timeout, union ccb *ccb, int is48bit) { u_int cmd; uint8_t protocol; protocol = AP_PROTO_NON_DATA; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; return (ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/0, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_FREEZE, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit)); } static int ata_get_native_max(struct cam_device *device, int retry_count, uint32_t timeout, union ccb *ccb, u_int64_t *nativesize) { int error; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA | AP_EXTEND, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_AMAX_ADDR, /*features*/ATA_AMAX_ADDR_GET, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 30 * 1000, /*force48bit*/1); if (error) return (error); return atahpa_proc_resp(device, ccb, nativesize); } static int ataama_set(struct cam_device *device, int retry_count, uint32_t timeout, union ccb *ccb, u_int64_t maxsize) { int error; /* lba's are zero indexed so the max lba is requested max - 1 */ if (maxsize) maxsize--; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA | AP_EXTEND, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_AMAX_ADDR, /*features*/ATA_AMAX_ADDR_SET, /*lba*/maxsize, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 30 * 1000, /*force48bit*/1); if (error) return (error); return atahpa_proc_resp(device, ccb, NULL); } static int ataama_freeze(struct cam_device *device, int retry_count, uint32_t timeout, union ccb *ccb) { return (ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA | AP_EXTEND, /*ata_flags*/0, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_AMAX_ADDR, /*features*/ATA_AMAX_ADDR_FREEZE, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 30 * 1000, /*force48bit*/1)); } int ata_do_identify(struct cam_device *device, int retry_count, int timeout, union ccb *ccb, struct ata_params** ident_bufp) { struct ata_params *ident_buf; struct ccb_pathinq cpi; struct ccb_getdev cgd; u_int i, error; int16_t *ptr; uint8_t command, retry_command; if (get_cpi(device, &cpi) != 0) { warnx("couldn't get CPI"); return (-1); } /* Neither PROTO_ATAPI or PROTO_SATAPM are used in cpi.protocol */ if (cpi.protocol == PROTO_ATA) { if (get_cgd(device, &cgd) != 0) { warnx("couldn't get CGD"); return (-1); } command = (cgd.protocol == PROTO_ATA) ? ATA_ATA_IDENTIFY : ATA_ATAPI_IDENTIFY; retry_command = 0; } else { /* We don't know which for sure so try both */ command = ATA_ATA_IDENTIFY; retry_command = ATA_ATAPI_IDENTIFY; } ptr = (uint16_t *)calloc(1, sizeof(struct ata_params)); if (ptr == NULL) { warnx("can't calloc memory for identify\n"); return (1); } retry: error = ata_do_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_IN, /*protocol*/AP_PROTO_PIO_IN, /*ata_flags*/AP_FLAG_BYT_BLOK_BLOCKS | AP_FLAG_TLEN_SECT_CNT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/command, /*features*/0, /*lba*/0, /*sector_count*/sizeof(struct ata_params) / 512, /*data_ptr*/(uint8_t *)ptr, /*dxfer_len*/sizeof(struct ata_params), /*timeout*/timeout ? timeout : 30 * 1000, /*force48bit*/0); if (error != 0) { if (retry_command != 0) { command = retry_command; retry_command = 0; goto retry; } free(ptr); return (1); } ident_buf = (struct ata_params *)ptr; ata_param_fixup(ident_buf); error = 1; for (i = 0; i < sizeof(struct ata_params) / 2; i++) { if (ptr[i] != 0) error = 0; } /* check for invalid (all zero) response */ if (error != 0) { warnx("Invalid identify response detected"); free(ptr); return (error); } *ident_bufp = ident_buf; return (0); } static int ataidentify(struct cam_device *device, int retry_count, int timeout) { union ccb *ccb; struct ata_params *ident_buf; u_int64_t hpasize = 0, nativesize = 0; if ((ccb = cam_getccb(device)) == NULL) { warnx("couldn't allocate CCB"); return (1); } if (ata_do_identify(device, retry_count, timeout, ccb, &ident_buf) != 0) { cam_freeccb(ccb); return (1); } if (arglist & CAM_ARG_VERBOSE) { printf("%s%d: Raw identify data:\n", device->device_name, device->dev_unit_num); dump_data((uint16_t *)ident_buf, sizeof(struct ata_params)); } if (ident_buf->support.command1 & ATA_SUPPORT_PROTECTED) { ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize); } if (ident_buf->support2 & ATA_SUPPORT_AMAX_ADDR) { ata_get_native_max(device, retry_count, timeout, ccb, &nativesize); } printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); atacapprint(ident_buf); atahpa_print(ident_buf, hpasize, 0); ataama_print(ident_buf, nativesize, 0); free(ident_buf); cam_freeccb(ccb); return (0); } static int nvmeidentify(struct cam_device *device, int retry_count __unused, int timeout __unused) { struct nvme_controller_data cdata; if (nvme_get_cdata(device, &cdata)) return (1); nvme_print_controller(&cdata); return (0); } static int identify(struct cam_device *device, int retry_count, int timeout) { struct ccb_pathinq cpi; if (get_cpi(device, &cpi) != 0) { warnx("couldn't get CPI"); return (-1); } if (cpi.protocol == PROTO_NVME) { return (nvmeidentify(device, retry_count, timeout)); } return (ataidentify(device, retry_count, timeout)); } enum { ATA_SECURITY_ACTION_PRINT, ATA_SECURITY_ACTION_FREEZE, ATA_SECURITY_ACTION_UNLOCK, ATA_SECURITY_ACTION_DISABLE, ATA_SECURITY_ACTION_ERASE, ATA_SECURITY_ACTION_ERASE_ENHANCED, ATA_SECURITY_ACTION_SET_PASSWORD }; static void atasecurity_print_time(uint16_t tw) { if (tw == 0) printf("unspecified"); else if (tw >= 255) printf("> 508 min"); else printf("%i min", 2 * tw); } static uint32_t atasecurity_erase_timeout_msecs(uint16_t timeout) { if (timeout == 0) return 2 * 3600 * 1000; /* default: two hours */ else if (timeout > 255) return (508 + 60) * 60 * 1000; /* spec says > 508 minutes */ return ((2 * timeout) + 5) * 60 * 1000; /* add a 5min margin */ } static void atasecurity_notify(uint8_t command, struct ata_security_password *pwd) { struct ata_cmd cmd; bzero(&cmd, sizeof(cmd)); cmd.command = command; printf("Issuing %s", ata_op_string(&cmd)); if (pwd != NULL) { /* pwd->password may not be null terminated */ char pass[sizeof(pwd->password)+1]; strlcpy(pass, pwd->password, sizeof(pass)); printf(" password='%s', user='%s'", pass, (pwd->ctrl & ATA_SECURITY_PASSWORD_MASTER) ? "master" : "user"); if (command == ATA_SECURITY_SET_PASSWORD) { printf(", mode='%s'", (pwd->ctrl & ATA_SECURITY_LEVEL_MAXIMUM) ? "maximum" : "high"); } } printf("\n"); } static int atasecurity_freeze(struct cam_device *device, union ccb *ccb, int retry_count, uint32_t timeout, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_FREEZE_LOCK, NULL); return ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*ata_flags*/0, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_FREEZE_LOCK, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout, /*force48bit*/0); } static int atasecurity_unlock(struct cam_device *device, union ccb *ccb, int retry_count, uint32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_UNLOCK, pwd); return ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*ata_flags*/AP_FLAG_BYT_BLOK_BLOCKS | AP_FLAG_TLEN_SECT_CNT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_UNLOCK, /*features*/0, /*lba*/0, /*sector_count*/sizeof(*pwd) / 512, /*data_ptr*/(uint8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*force48bit*/0); } static int atasecurity_disable(struct cam_device *device, union ccb *ccb, int retry_count, uint32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_DISABLE_PASSWORD, pwd); return ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*ata_flags*/AP_FLAG_BYT_BLOK_BLOCKS | AP_FLAG_TLEN_SECT_CNT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_DISABLE_PASSWORD, /*features*/0, /*lba*/0, /*sector_count*/sizeof(*pwd) / 512, /*data_ptr*/(uint8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*force48bit*/0); } static int atasecurity_erase_confirm(struct cam_device *device, struct ata_params* ident_buf) { printf("\nYou are about to ERASE ALL DATA from the following" " device:\n%s%d,%s%d: ", device->device_name, device->dev_unit_num, device->given_dev_name, device->given_unit_number); ata_print_ident(ident_buf); for(;;) { char str[50]; printf("\nAre you SURE you want to ERASE ALL DATA? (yes/no) "); if (fgets(str, sizeof(str), stdin) != NULL) { if (strncasecmp(str, "yes", 3) == 0) { return (1); } else if (strncasecmp(str, "no", 2) == 0) { return (0); } else { printf("Please answer \"yes\" or " "\"no\"\n"); } } } /* NOTREACHED */ return (0); } static int atasecurity_erase(struct cam_device *device, union ccb *ccb, int retry_count, uint32_t timeout, uint32_t erase_timeout, struct ata_security_password *pwd, int quiet) { int error; if (quiet == 0) atasecurity_notify(ATA_SECURITY_ERASE_PREPARE, NULL); error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*ata_flags*/0, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_ERASE_PREPARE, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout, /*force48bit*/0); if (error != 0) return error; if (quiet == 0) atasecurity_notify(ATA_SECURITY_ERASE_UNIT, pwd); error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*ata_flags*/AP_FLAG_BYT_BLOK_BLOCKS | AP_FLAG_TLEN_SECT_CNT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_ERASE_UNIT, /*features*/0, /*lba*/0, /*sector_count*/sizeof(*pwd) / 512, /*data_ptr*/(uint8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/erase_timeout, /*force48bit*/0); if (error == 0 && quiet == 0) printf("\nErase Complete\n"); return error; } static int atasecurity_set_password(struct cam_device *device, union ccb *ccb, int retry_count, uint32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_SET_PASSWORD, pwd); return ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*ata_flags*/AP_FLAG_BYT_BLOK_BLOCKS | AP_FLAG_TLEN_SECT_CNT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_SET_PASSWORD, /*features*/0, /*lba*/0, /*sector_count*/sizeof(*pwd) / 512, /*data_ptr*/(uint8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*force48bit*/0); } static void atasecurity_print(struct ata_params *parm) { printf("\nSecurity Option Value\n"); if (arglist & CAM_ARG_VERBOSE) { printf("status %04x\n", parm->security_status); } printf("supported %s\n", parm->security_status & ATA_SECURITY_SUPPORTED ? "yes" : "no"); if (!(parm->security_status & ATA_SECURITY_SUPPORTED)) return; printf("enabled %s\n", parm->security_status & ATA_SECURITY_ENABLED ? "yes" : "no"); printf("drive locked %s\n", parm->security_status & ATA_SECURITY_LOCKED ? "yes" : "no"); printf("security config frozen %s\n", parm->security_status & ATA_SECURITY_FROZEN ? "yes" : "no"); printf("count expired %s\n", parm->security_status & ATA_SECURITY_COUNT_EXP ? "yes" : "no"); printf("security level %s\n", parm->security_status & ATA_SECURITY_LEVEL ? "maximum" : "high"); printf("enhanced erase supported %s\n", parm->security_status & ATA_SECURITY_ENH_SUPP ? "yes" : "no"); printf("erase time "); atasecurity_print_time(parm->erase_time); printf("\n"); printf("enhanced erase time "); atasecurity_print_time(parm->enhanced_erase_time); printf("\n"); printf("master password rev %04x%s\n", parm->master_passwd_revision, parm->master_passwd_revision == 0x0000 || parm->master_passwd_revision == 0xFFFF ? " (unsupported)" : ""); } /* * Validates and copies the password in optarg to the passed buffer. * If the password in optarg is the same length as the buffer then * the data will still be copied but no null termination will occur. */ static int ata_getpwd(uint8_t *passwd, int max, char opt) { int len; len = strlen(optarg); if (len > max) { warnx("-%c password is too long", opt); return (1); } else if (len == 0) { warnx("-%c password is missing", opt); return (1); } else if (optarg[0] == '-'){ warnx("-%c password starts with '-' (generic arg?)", opt); return (1); } else if (strlen(passwd) != 0 && strcmp(passwd, optarg) != 0) { warnx("-%c password conflicts with existing password from -%c", opt, pwd_opt); return (1); } /* Callers pass in a buffer which does NOT need to be terminated */ strncpy(passwd, optarg, max); pwd_opt = opt; return (0); } enum { ATA_HPA_ACTION_PRINT, ATA_HPA_ACTION_SET_MAX, ATA_HPA_ACTION_SET_PWD, ATA_HPA_ACTION_LOCK, ATA_HPA_ACTION_UNLOCK, ATA_HPA_ACTION_FREEZE_LOCK }; static int atahpa_set_confirm(struct cam_device *device, struct ata_params* ident_buf, u_int64_t maxsize, int persist) { printf("\nYou are about to configure HPA to limit the user accessible\n" "sectors to %ju %s on the device:\n%s%d,%s%d: ", maxsize, persist ? "persistently" : "temporarily", device->device_name, device->dev_unit_num, device->given_dev_name, device->given_unit_number); ata_print_ident(ident_buf); for(;;) { char str[50]; printf("\nAre you SURE you want to configure HPA? (yes/no) "); if (NULL != fgets(str, sizeof(str), stdin)) { if (0 == strncasecmp(str, "yes", 3)) { return (1); } else if (0 == strncasecmp(str, "no", 2)) { return (0); } else { printf("Please answer \"yes\" or " "\"no\"\n"); } } } /* NOTREACHED */ return (0); } static int atahpa(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { union ccb *ccb; struct ata_params *ident_buf; struct ccb_getdev cgd; struct ata_set_max_pwd pwd; int error, confirm, quiet, c, action, actions, persist; int security, is48bit, pwdsize; u_int64_t hpasize, maxsize; actions = 0; confirm = 0; quiet = 0; maxsize = 0; persist = 0; security = 0; memset(&pwd, 0, sizeof(pwd)); /* default action is to print hpa information */ action = ATA_HPA_ACTION_PRINT; pwdsize = sizeof(pwd.password); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 's': action = ATA_HPA_ACTION_SET_MAX; maxsize = strtoumax(optarg, NULL, 0); actions++; break; case 'p': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_HPA_ACTION_SET_PWD; security = 1; actions++; break; case 'l': action = ATA_HPA_ACTION_LOCK; security = 1; actions++; break; case 'U': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_HPA_ACTION_UNLOCK; security = 1; actions++; break; case 'f': action = ATA_HPA_ACTION_FREEZE_LOCK; security = 1; actions++; break; case 'P': persist = 1; break; case 'y': confirm++; break; case 'q': quiet++; break; } } if (actions > 1) { warnx("too many hpa actions specified"); return (1); } if (get_cgd(device, &cgd) != 0) { warnx("couldn't get CGD"); return (1); } ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return (1); } error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); if (error != 0) { cam_freeccb(ccb); return (1); } if (quiet == 0) { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); } if (action == ATA_HPA_ACTION_PRINT) { hpasize = 0; if (ident_buf->support.command1 & ATA_SUPPORT_PROTECTED) ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize); atahpa_print(ident_buf, hpasize, 1); cam_freeccb(ccb); free(ident_buf); return (error); } if (!(ident_buf->support.command1 & ATA_SUPPORT_PROTECTED)) { warnx("HPA is not supported by this device"); cam_freeccb(ccb); free(ident_buf); return (1); } if (security && !(ident_buf->support.command2 & ATA_SUPPORT_MAXSECURITY)) { warnx("HPA Security is not supported by this device"); cam_freeccb(ccb); free(ident_buf); return (1); } is48bit = ident_buf->support.command2 & ATA_SUPPORT_ADDRESS48; /* * The ATA spec requires: * 1. Read native max addr is called directly before set max addr * 2. Read native max addr is NOT called before any other set max call */ switch(action) { case ATA_HPA_ACTION_SET_MAX: if (confirm == 0 && atahpa_set_confirm(device, ident_buf, maxsize, persist) == 0) { cam_freeccb(ccb); free(ident_buf); return (1); } error = ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize); if (error == 0) { error = atahpa_set_max(device, retry_count, timeout, ccb, is48bit, maxsize, persist); if (error == 0) { if (quiet == 0) { /* redo identify to get new values */ error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); atahpa_print(ident_buf, hpasize, 1); } /* Hint CAM to reprobe the device. */ reprobe(device); } } break; case ATA_HPA_ACTION_SET_PWD: error = atahpa_password(device, retry_count, timeout, ccb, is48bit, &pwd); if (error == 0 && quiet == 0) printf("HPA password has been set\n"); break; case ATA_HPA_ACTION_LOCK: error = atahpa_lock(device, retry_count, timeout, ccb, is48bit); if (error == 0 && quiet == 0) printf("HPA has been locked\n"); break; case ATA_HPA_ACTION_UNLOCK: error = atahpa_unlock(device, retry_count, timeout, ccb, is48bit, &pwd); if (error == 0 && quiet == 0) printf("HPA has been unlocked\n"); break; case ATA_HPA_ACTION_FREEZE_LOCK: error = atahpa_freeze_lock(device, retry_count, timeout, ccb, is48bit); if (error == 0 && quiet == 0) printf("HPA has been frozen\n"); break; default: errx(1, "Option currently not supported"); } cam_freeccb(ccb); free(ident_buf); return (error); } enum { ATA_AMA_ACTION_PRINT, ATA_AMA_ACTION_SET_MAX, ATA_AMA_ACTION_FREEZE_LOCK }; static int ataama(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { union ccb *ccb; struct ata_params *ident_buf; struct ccb_getdev cgd; int error, quiet, c, action, actions; u_int64_t nativesize, maxsize; actions = 0; quiet = 0; maxsize = 0; /* default action is to print AMA information */ action = ATA_AMA_ACTION_PRINT; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 's': action = ATA_AMA_ACTION_SET_MAX; maxsize = strtoumax(optarg, NULL, 0); actions++; break; case 'f': action = ATA_AMA_ACTION_FREEZE_LOCK; actions++; break; case 'q': quiet++; break; } } if (actions > 1) { warnx("too many AMA actions specified"); return (1); } if (get_cgd(device, &cgd) != 0) { warnx("couldn't get CGD"); return (1); } ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return (1); } error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); if (error != 0) { cam_freeccb(ccb); return (1); } if (quiet == 0) { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); } if (action == ATA_AMA_ACTION_PRINT) { nativesize = 0; if (ident_buf->support2 & ATA_SUPPORT_AMAX_ADDR) ata_get_native_max(device, retry_count, timeout, ccb, &nativesize); ataama_print(ident_buf, nativesize, 1); cam_freeccb(ccb); free(ident_buf); return (error); } if (!(ident_buf->support2 & ATA_SUPPORT_AMAX_ADDR)) { warnx("Accessible Max Address is not supported by this device"); cam_freeccb(ccb); free(ident_buf); return (1); } switch(action) { case ATA_AMA_ACTION_SET_MAX: error = ata_get_native_max(device, retry_count, timeout, ccb, &nativesize); if (error == 0) { error = ataama_set(device, retry_count, timeout, ccb, maxsize); if (error == 0) { if (quiet == 0) { /* redo identify to get new values */ error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); ataama_print(ident_buf, nativesize, 1); } /* Hint CAM to reprobe the device. */ reprobe(device); } } break; case ATA_AMA_ACTION_FREEZE_LOCK: error = ataama_freeze(device, retry_count, timeout, ccb); if (error == 0 && quiet == 0) printf("Accessible Max Address has been frozen\n"); break; default: errx(1, "Option currently not supported"); } cam_freeccb(ccb); free(ident_buf); return (error); } static int atasecurity(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { union ccb *ccb; struct ata_params *ident_buf; int error, confirm, quiet, c, action, actions, setpwd; int security_enabled, erase_timeout, pwdsize; struct ata_security_password pwd; actions = 0; setpwd = 0; erase_timeout = 0; confirm = 0; quiet = 0; memset(&pwd, 0, sizeof(pwd)); /* default action is to print security information */ action = ATA_SECURITY_ACTION_PRINT; /* user is master by default as its safer that way */ pwd.ctrl |= ATA_SECURITY_PASSWORD_MASTER; pwdsize = sizeof(pwd.password); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'f': action = ATA_SECURITY_ACTION_FREEZE; actions++; break; case 'U': if (strcasecmp(optarg, "user") == 0) { pwd.ctrl |= ATA_SECURITY_PASSWORD_USER; pwd.ctrl &= ~ATA_SECURITY_PASSWORD_MASTER; } else if (strcasecmp(optarg, "master") == 0) { pwd.ctrl |= ATA_SECURITY_PASSWORD_MASTER; pwd.ctrl &= ~ATA_SECURITY_PASSWORD_USER; } else { warnx("-U argument '%s' is invalid (must be " "'user' or 'master')", optarg); return (1); } break; case 'l': if (strcasecmp(optarg, "high") == 0) { pwd.ctrl |= ATA_SECURITY_LEVEL_HIGH; pwd.ctrl &= ~ATA_SECURITY_LEVEL_MAXIMUM; } else if (strcasecmp(optarg, "maximum") == 0) { pwd.ctrl |= ATA_SECURITY_LEVEL_MAXIMUM; pwd.ctrl &= ~ATA_SECURITY_LEVEL_HIGH; } else { warnx("-l argument '%s' is unknown (must be " "'high' or 'maximum')", optarg); return (1); } break; case 'k': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_UNLOCK; actions++; break; case 'd': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_DISABLE; actions++; break; case 'e': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_ERASE; actions++; break; case 'h': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); pwd.ctrl |= ATA_SECURITY_ERASE_ENHANCED; action = ATA_SECURITY_ACTION_ERASE_ENHANCED; actions++; break; case 's': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); setpwd = 1; if (action == ATA_SECURITY_ACTION_PRINT) action = ATA_SECURITY_ACTION_SET_PASSWORD; /* * Don't increment action as this can be combined * with other actions. */ break; case 'y': confirm++; break; case 'q': quiet++; break; case 'T': erase_timeout = atoi(optarg) * 1000; break; } } if (actions > 1) { warnx("too many security actions specified"); return (1); } if ((ccb = cam_getccb(device)) == NULL) { warnx("couldn't allocate CCB"); return (1); } error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); if (error != 0) { cam_freeccb(ccb); return (1); } if (quiet == 0) { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); } if (action == ATA_SECURITY_ACTION_PRINT) { atasecurity_print(ident_buf); free(ident_buf); cam_freeccb(ccb); return (0); } if ((ident_buf->support.command1 & ATA_SUPPORT_SECURITY) == 0) { warnx("Security not supported"); free(ident_buf); cam_freeccb(ccb); return (1); } /* default timeout 15 seconds the same as linux hdparm */ timeout = timeout ? timeout : 15 * 1000; security_enabled = ident_buf->security_status & ATA_SECURITY_ENABLED; /* first set the password if requested */ if (setpwd == 1) { /* confirm we can erase before setting the password if erasing */ if (confirm == 0 && (action == ATA_SECURITY_ACTION_ERASE_ENHANCED || action == ATA_SECURITY_ACTION_ERASE) && atasecurity_erase_confirm(device, ident_buf) == 0) { cam_freeccb(ccb); free(ident_buf); return (error); } if (pwd.ctrl & ATA_SECURITY_PASSWORD_MASTER) { pwd.revision = ident_buf->master_passwd_revision; if (pwd.revision != 0 && pwd.revision != 0xfff && --pwd.revision == 0) { pwd.revision = 0xfffe; } } error = atasecurity_set_password(device, ccb, retry_count, timeout, &pwd, quiet); if (error != 0) { cam_freeccb(ccb); free(ident_buf); return (error); } security_enabled = 1; } switch(action) { case ATA_SECURITY_ACTION_FREEZE: error = atasecurity_freeze(device, ccb, retry_count, timeout, quiet); break; case ATA_SECURITY_ACTION_UNLOCK: if (security_enabled) { if (ident_buf->security_status & ATA_SECURITY_LOCKED) { error = atasecurity_unlock(device, ccb, retry_count, timeout, &pwd, quiet); } else { warnx("Can't unlock, drive is not locked"); error = 1; } } else { warnx("Can't unlock, security is disabled"); error = 1; } break; case ATA_SECURITY_ACTION_DISABLE: if (security_enabled) { /* First unlock the drive if its locked */ if (ident_buf->security_status & ATA_SECURITY_LOCKED) { error = atasecurity_unlock(device, ccb, retry_count, timeout, &pwd, quiet); } if (error == 0) { error = atasecurity_disable(device, ccb, retry_count, timeout, &pwd, quiet); } } else { warnx("Can't disable security (already disabled)"); error = 1; } break; case ATA_SECURITY_ACTION_ERASE: if (security_enabled) { if (erase_timeout == 0) { erase_timeout = atasecurity_erase_timeout_msecs( ident_buf->erase_time); } error = atasecurity_erase(device, ccb, retry_count, timeout, erase_timeout, &pwd, quiet); } else { warnx("Can't secure erase (security is disabled)"); error = 1; } break; case ATA_SECURITY_ACTION_ERASE_ENHANCED: if (security_enabled) { if (ident_buf->security_status & ATA_SECURITY_ENH_SUPP) { if (erase_timeout == 0) { erase_timeout = atasecurity_erase_timeout_msecs( ident_buf->enhanced_erase_time); } error = atasecurity_erase(device, ccb, retry_count, timeout, erase_timeout, &pwd, quiet); } else { warnx("Enhanced erase is not supported"); error = 1; } } else { warnx("Can't secure erase (enhanced), " "(security is disabled)"); error = 1; } break; } cam_freeccb(ccb); free(ident_buf); return (error); } /* * Convert periph name into a bus, target and lun. * * Returns the number of parsed components, or 0. */ static int parse_btl_name(char *tstr, path_id_t *bus, target_id_t *target, lun_id_t *lun, cam_argmask *arglst) { int fd; union ccb ccb; bzero(&ccb, sizeof(ccb)); ccb.ccb_h.func_code = XPT_GDEVLIST; if (cam_get_device(tstr, ccb.cgdl.periph_name, sizeof(ccb.cgdl.periph_name), &ccb.cgdl.unit_number) == -1) { warnx("%s", cam_errbuf); return (0); } /* * Attempt to get the passthrough device. This ioctl will * fail if the device name is null, if the device doesn't * exist, or if the passthrough driver isn't in the kernel. */ if ((fd = open(XPT_DEVICE, O_RDWR)) == -1) { warn("Unable to open %s", XPT_DEVICE); return (0); } if (ioctl(fd, CAMGETPASSTHRU, &ccb) == -1) { warn("Unable to find bus:target:lun for device %s%d", ccb.cgdl.periph_name, ccb.cgdl.unit_number); close(fd); return (0); } close(fd); if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(ccb.ccb_h.status); warnx("Unable to find bus:target_lun for device %s%d, " "CAM status: %s (%#x)", ccb.cgdl.periph_name, ccb.cgdl.unit_number, entry ? entry->status_text : "Unknown", ccb.ccb_h.status); return (0); } /* * The kernel fills in the bus/target/lun. We don't * need the passthrough device name and unit number since * we aren't going to open it. */ *bus = ccb.ccb_h.path_id; *target = ccb.ccb_h.target_id; *lun = ccb.ccb_h.target_lun; *arglst |= CAM_ARG_BUS | CAM_ARG_TARGET | CAM_ARG_LUN; return (3); } /* * Parse out a bus, or a bus, target and lun in the following * format: * bus * bus:target * bus:target:lun * * Returns the number of parsed components, or 0. */ static int parse_btl(char *tstr, path_id_t *bus, target_id_t *target, lun_id_t *lun, cam_argmask *arglst) { char *tmpstr, *end; int convs = 0; *bus = CAM_BUS_WILDCARD; *target = CAM_TARGET_WILDCARD; *lun = CAM_LUN_WILDCARD; while (isspace(*tstr) && (*tstr != '\0')) tstr++; if (strncasecmp(tstr, "all", strlen("all")) == 0) { arglist |= CAM_ARG_BUS; return (1); } if (!isdigit(*tstr)) return (parse_btl_name(tstr, bus, target, lun, arglst)); tmpstr = strsep(&tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *bus = strtol(tmpstr, &end, 0); if (*end != '\0') return (0); *arglst |= CAM_ARG_BUS; convs++; tmpstr = strsep(&tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *target = strtol(tmpstr, &end, 0); if (*end != '\0') return (0); *arglst |= CAM_ARG_TARGET; convs++; tmpstr = strsep(&tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *lun = strtoll(tmpstr, &end, 0); if (*end != '\0') return (0); *arglst |= CAM_ARG_LUN; convs++; } } } return convs; } static int dorescan_or_reset(int argc, char **argv, int rescan) { static const char must[] = "you must specify \"all\", a bus, a bus:target:lun or periph to %s"; int rv, error = 0; path_id_t bus = CAM_BUS_WILDCARD; target_id_t target = CAM_TARGET_WILDCARD; lun_id_t lun = CAM_LUN_WILDCARD; char *tstr; if (argc < 3) { warnx(must, rescan? "rescan" : "reset"); return (1); } tstr = argv[optind]; while (isspace(*tstr) && (*tstr != '\0')) tstr++; if (strncasecmp(tstr, "all", strlen("all")) == 0) arglist |= CAM_ARG_BUS; else { rv = parse_btl(argv[optind], &bus, &target, &lun, &arglist); if (rv != 1 && rv != 3) { warnx(must, rescan ? "rescan" : "reset"); return (1); } } if (arglist & CAM_ARG_LUN) error = scanlun_or_reset_dev(bus, target, lun, rescan); else error = rescan_or_reset_bus(bus, rescan); return (error); } static int rescan_or_reset_bus(path_id_t bus, int rescan) { union ccb *ccb = NULL, *matchccb = NULL; int fd = -1, retval; int bufsize; retval = 0; if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s", XPT_DEVICE); warn("%s", XPT_DEVICE); return (1); } ccb = malloc(sizeof(*ccb)); if (ccb == NULL) { warn("failed to allocate CCB"); retval = 1; goto bailout; } bzero(ccb, sizeof(*ccb)); if (bus != CAM_BUS_WILDCARD) { ccb->ccb_h.func_code = rescan ? XPT_SCAN_BUS : XPT_RESET_BUS; ccb->ccb_h.path_id = bus; ccb->ccb_h.target_id = CAM_TARGET_WILDCARD; ccb->ccb_h.target_lun = CAM_LUN_WILDCARD; ccb->crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb->ccb_h.pinfo.priority = 5; if (ioctl(fd, CAMIOCOMMAND, ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { fprintf(stdout, "%s of bus %d was successful\n", rescan ? "Re-scan" : "Reset", bus); } else { fprintf(stdout, "%s of bus %d returned error %#x\n", rescan ? "Re-scan" : "Reset", bus, ccb->ccb_h.status & CAM_STATUS_MASK); retval = 1; } goto bailout; } /* * The right way to handle this is to modify the xpt so that it can * handle a wildcarded bus in a rescan or reset CCB. At the moment * that isn't implemented, so instead we enumerate the buses and * send the rescan or reset to those buses in the case where the * given bus is -1 (wildcard). We don't send a rescan or reset * to the xpt bus; sending a rescan to the xpt bus is effectively a * no-op, sending a rescan to the xpt bus would result in a status of * CAM_REQ_INVALID. */ matchccb = malloc(sizeof(*matchccb)); if (matchccb == NULL) { warn("failed to allocate CCB"); retval = 1; goto bailout; } bzero(matchccb, sizeof(*matchccb)); matchccb->ccb_h.func_code = XPT_DEV_MATCH; matchccb->ccb_h.path_id = CAM_BUS_WILDCARD; bufsize = sizeof(struct dev_match_result) * 20; matchccb->cdm.match_buf_len = bufsize; matchccb->cdm.matches=(struct dev_match_result *)malloc(bufsize); if (matchccb->cdm.matches == NULL) { warnx("can't malloc memory for matches"); retval = 1; goto bailout; } matchccb->cdm.num_matches = 0; matchccb->cdm.num_patterns = 1; matchccb->cdm.pattern_buf_len = sizeof(struct dev_match_pattern); matchccb->cdm.patterns = (struct dev_match_pattern *)malloc( matchccb->cdm.pattern_buf_len); if (matchccb->cdm.patterns == NULL) { warnx("can't malloc memory for patterns"); retval = 1; goto bailout; } matchccb->cdm.patterns[0].type = DEV_MATCH_BUS; matchccb->cdm.patterns[0].pattern.bus_pattern.flags = BUS_MATCH_ANY; do { unsigned int i; if (ioctl(fd, CAMIOCOMMAND, matchccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); retval = 1; goto bailout; } if ((matchccb->ccb_h.status != CAM_REQ_CMP) || ((matchccb->cdm.status != CAM_DEV_MATCH_LAST) && (matchccb->cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", matchccb->ccb_h.status, matchccb->cdm.status); retval = 1; goto bailout; } for (i = 0; i < matchccb->cdm.num_matches; i++) { struct bus_match_result *bus_result; /* This shouldn't happen. */ if (matchccb->cdm.matches[i].type != DEV_MATCH_BUS) continue; bus_result =&matchccb->cdm.matches[i].result.bus_result; /* * We don't want to rescan or reset the xpt bus. * See above. */ if (bus_result->path_id == CAM_XPT_PATH_ID) continue; ccb->ccb_h.func_code = rescan ? XPT_SCAN_BUS : XPT_RESET_BUS; ccb->ccb_h.path_id = bus_result->path_id; ccb->ccb_h.target_id = CAM_TARGET_WILDCARD; ccb->ccb_h.target_lun = CAM_LUN_WILDCARD; ccb->crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb->ccb_h.pinfo.priority = 5; if (ioctl(fd, CAMIOCOMMAND, ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK)==CAM_REQ_CMP){ fprintf(stdout, "%s of bus %d was successful\n", rescan? "Re-scan" : "Reset", bus_result->path_id); } else { /* * Don't bail out just yet, maybe the other * rescan or reset commands will complete * successfully. */ fprintf(stderr, "%s of bus %d returned error " "%#x\n", rescan? "Re-scan" : "Reset", bus_result->path_id, ccb->ccb_h.status & CAM_STATUS_MASK); retval = 1; } } } while ((matchccb->ccb_h.status == CAM_REQ_CMP) && (matchccb->cdm.status == CAM_DEV_MATCH_MORE)); bailout: if (fd != -1) close(fd); if (matchccb != NULL) { free(matchccb->cdm.patterns); free(matchccb->cdm.matches); free(matchccb); } free(ccb); return (retval); } static int scanlun_or_reset_dev(path_id_t bus, target_id_t target, lun_id_t lun, int scan) { union ccb ccb; struct cam_device *device; int fd; device = NULL; if (bus == CAM_BUS_WILDCARD) { warnx("invalid bus number %d", bus); return (1); } if (target == CAM_TARGET_WILDCARD) { warnx("invalid target number %d", target); return (1); } if (lun == CAM_LUN_WILDCARD) { warnx("invalid lun number %jx", (uintmax_t)lun); return (1); } fd = -1; bzero(&ccb, sizeof(union ccb)); if (scan) { if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s\n", XPT_DEVICE); warn("%s", XPT_DEVICE); return (1); } } else { device = cam_open_btl(bus, target, lun, O_RDWR, NULL); if (device == NULL) { warnx("%s", cam_errbuf); return (1); } } ccb.ccb_h.func_code = (scan)? XPT_SCAN_LUN : XPT_RESET_DEV; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = target; ccb.ccb_h.target_lun = lun; ccb.ccb_h.timeout = 5000; ccb.crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb.ccb_h.pinfo.priority = 5; if (scan) { if (ioctl(fd, CAMIOCOMMAND, &ccb) < 0) { warn("CAMIOCOMMAND ioctl failed"); close(fd); return (1); } } else { if (cam_send_ccb(device, &ccb) < 0) { warn("error sending XPT_RESET_DEV CCB"); cam_close_device(device); return (1); } } if (scan) close(fd); else cam_close_device(device); /* * An error code of CAM_BDR_SENT is normal for a BDR request. */ if (((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) || ((!scan) && ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_BDR_SENT))) { fprintf(stdout, "%s of %d:%d:%jx was successful\n", scan? "Re-scan" : "Reset", bus, target, (uintmax_t)lun); return (0); } else { fprintf(stdout, "%s of %d:%d:%jx returned error %#x\n", scan? "Re-scan" : "Reset", bus, target, (uintmax_t)lun, ccb.ccb_h.status & CAM_STATUS_MASK); return (1); } } static struct scsi_nv defect_list_type_map[] = { { "block", SRDD10_BLOCK_FORMAT }, { "extbfi", SRDD10_EXT_BFI_FORMAT }, { "extphys", SRDD10_EXT_PHYS_FORMAT }, { "longblock", SRDD10_LONG_BLOCK_FORMAT }, { "bfi", SRDD10_BYTES_FROM_INDEX_FORMAT }, { "phys", SRDD10_PHYSICAL_SECTOR_FORMAT } }; static int readdefects(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout) { union ccb *ccb = NULL; struct scsi_read_defect_data_hdr_10 *hdr10 = NULL; struct scsi_read_defect_data_hdr_12 *hdr12 = NULL; size_t hdr_size = 0, entry_size = 0; uint8_t *defect_list = NULL; uint8_t list_format = 0; uint32_t dlist_length = 0; uint32_t returned_length = 0, valid_len = 0; uint32_t num_returned = 0, num_valid = 0; uint32_t max_possible_size = 0, hdr_max = 0; uint32_t starting_offset = 0; uint8_t returned_format, returned_type; unsigned int i; int c, error = 0; int mads = 0; bool summary = false, quiet = false, list_type_set = false; bool get_length = true, use_12byte = false, first_pass = true; bool hex_format = false; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'f': { scsi_nv_status status; int entry_num = 0; if (list_type_set) { warnx("%s: -f specified twice", __func__); error = 1; goto defect_bailout; } status = scsi_get_nv(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) { list_format |= defect_list_type_map[ entry_num].value; list_type_set = true; } else { warnx("%s: %s %s option %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", "defect list type", optarg); error = 1; goto defect_bailout; } break; } case 'G': list_format |= SRDD10_GLIST; break; case 'P': list_format |= SRDD10_PLIST; break; case 'q': quiet = true; break; case 's': summary = true; break; case 'S': { char *endptr; starting_offset = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { error = 1; warnx("invalid starting offset %s", optarg); goto defect_bailout; } use_12byte = true; break; } case 'X': hex_format = true; break; default: break; } } if (!list_type_set) { error = 1; warnx("no defect list format specified"); goto defect_bailout; } /* * This implies a summary, and was the previous behavior. */ if ((list_format & ~SRDD10_DLIST_FORMAT_MASK) == 0) summary = true; ccb = cam_getccb(device); /* * We start off asking for just the header to determine how much defect * data is available. Some Hitachi drives return an error if you ask * for more data than the drive has. Once we know the length, we retry * the command with the returned length. When we're retrying the with * 12-byte command, we're always changing to the 12-byte command and * need to get the length. Simplify the logic below by always setting * use_12byte in this case with this slightly more complex logic here. */ if (!use_12byte) { dlist_length = sizeof(*hdr10); } else { retry_12byte: get_length = true; use_12byte = true; dlist_length = sizeof(*hdr12); } retry: if (defect_list != NULL) { free(defect_list); defect_list = NULL; } defect_list = malloc(dlist_length); if (defect_list == NULL) { warnx("can't malloc memory for defect list"); error = 1; goto defect_bailout; } next_batch: bzero(defect_list, dlist_length); /* * cam_getccb() zeros the CCB header only. So we need to zero the * payload portion of the ccb. */ CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); scsi_read_defects(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ task_attr, /*list_format*/ list_format, /*addr_desc_index*/ starting_offset, /*data_ptr*/ defect_list, /*dxfer_len*/ dlist_length, /*minimum_cmd_size*/ use_12byte ? 12 : 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (cam_send_ccb(device, ccb) < 0) { warn("error sending READ DEFECT DATA command"); error = 1; goto defect_bailout; } valid_len = ccb->csio.dxfer_len - ccb->csio.resid; if (!use_12byte) { hdr10 = (struct scsi_read_defect_data_hdr_10 *)defect_list; hdr_size = sizeof(*hdr10); hdr_max = SRDDH10_MAX_LENGTH; if (valid_len >= hdr_size) { returned_length = scsi_2btoul(hdr10->length); returned_format = hdr10->format; } else { returned_length = 0; returned_format = 0; } } else { hdr12 = (struct scsi_read_defect_data_hdr_12 *)defect_list; hdr_size = sizeof(*hdr12); hdr_max = SRDDH12_MAX_LENGTH; if (valid_len >= hdr_size) { returned_length = scsi_4btoul(hdr12->length); returned_format = hdr12->format; } else { returned_length = 0; returned_format = 0; } } returned_type = returned_format & SRDDH10_DLIST_FORMAT_MASK; switch (returned_type) { case SRDD10_BLOCK_FORMAT: entry_size = sizeof(struct scsi_defect_desc_block); break; case SRDD10_LONG_BLOCK_FORMAT: entry_size = sizeof(struct scsi_defect_desc_long_block); break; case SRDD10_EXT_PHYS_FORMAT: case SRDD10_PHYSICAL_SECTOR_FORMAT: entry_size = sizeof(struct scsi_defect_desc_phys_sector); break; case SRDD10_EXT_BFI_FORMAT: case SRDD10_BYTES_FROM_INDEX_FORMAT: entry_size = sizeof(struct scsi_defect_desc_bytes_from_index); break; default: warnx("Unknown defect format 0x%x\n", returned_type); error = 1; goto defect_bailout; break; } max_possible_size = (hdr_max / entry_size) * entry_size; num_returned = returned_length / entry_size; num_valid = min(returned_length, valid_len - hdr_size); num_valid /= entry_size; if (get_length) { get_length = false; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * If the drive is reporting that it just doesn't * support the defect list format, go ahead and use * the length it reported. Otherwise, the length * may not be valid, so use the maximum. */ if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1c) && (ascq == 0x00) && (returned_length > 0)) { if (!use_12byte && (returned_length >= max_possible_size)) { goto retry_12byte; } dlist_length = returned_length + hdr_size; } else if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1f) && (ascq == 0x00) && (returned_length > 0)) { /* Partial defect list transfer */ /* * Hitachi drives return this error * along with a partial defect list if they * have more defects than the 10 byte * command can support. Retry with the 12 * byte command. */ if (!use_12byte) { goto retry_12byte; } dlist_length = returned_length + hdr_size; } else if ((sense_key == SSD_KEY_ILLEGAL_REQUEST) && (asc == 0x24) && (ascq == 0x00)) { /* Invalid field in CDB */ /* * SBC-3 says that if the drive has more * defects than can be reported with the * 10 byte command, it should return this * error and no data. Retry with the 12 * byte command. */ if (!use_12byte) { goto retry_12byte; } dlist_length = returned_length + hdr_size; } else { /* * If we got a SCSI error and no valid length, * just use the 10 byte maximum. The 12 * byte maximum is too large. */ if (returned_length == 0) dlist_length = SRDD10_MAX_LENGTH; else { if (!use_12byte && (returned_length >= max_possible_size)) { goto retry_12byte; } dlist_length = returned_length + hdr_size; } } } else if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP){ error = 1; warnx("Error reading defect header"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } else { if (!use_12byte && (returned_length >= max_possible_size)) { goto retry_12byte; } dlist_length = returned_length + hdr_size; } if (summary) { fprintf(stdout, "%u", num_returned); if (!quiet) { fprintf(stdout, " defect%s", (num_returned != 1) ? "s" : ""); } fprintf(stdout, "\n"); goto defect_bailout; } /* * We always limit the list length to the 10-byte maximum * length (0xffff). The reason is that some controllers * can't handle larger I/Os, and we can transfer the entire * 10 byte list in one shot. For drives that support the 12 * byte read defects command, we'll step through the list * by specifying a starting offset. For drives that don't * support the 12 byte command's starting offset, we'll * just display the first 64K. */ dlist_length = min(dlist_length, SRDD10_MAX_LENGTH); goto retry; } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI spec, if the disk doesn't support * the requested format, it will generally return a sense * key of RECOVERED ERROR, and an additional sense code * of "DEFECT LIST NOT FOUND". HGST drives also return * Primary/Grown defect list not found errors. So just * check for an ASC of 0x1c. */ if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1c)) { const char *format_str; format_str = scsi_nv_to_str(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), list_format & SRDD10_DLIST_FORMAT_MASK); warnx("requested defect format %s not available", format_str ? format_str : "unknown"); format_str = scsi_nv_to_str(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), returned_type); if (format_str != NULL) { warnx("Device returned %s format", format_str); } else { error = 1; warnx("Device returned unknown defect" " data format %#x", returned_type); goto defect_bailout; } } else { error = 1; warnx("Error returned from read defect data command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } } else if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; warnx("Error returned from read defect data command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } if (first_pass) { fprintf(stderr, "Got %d defect", num_returned); if (!summary || (num_returned == 0)) { fprintf(stderr, "s.\n"); goto defect_bailout; } else if (num_returned == 1) fprintf(stderr, ":\n"); else fprintf(stderr, "s:\n"); first_pass = false; } /* * XXX KDM I should probably clean up the printout format for the * disk defects. */ switch (returned_type) { case SRDD10_PHYSICAL_SECTOR_FORMAT: case SRDD10_EXT_PHYS_FORMAT: { struct scsi_defect_desc_phys_sector *dlist; dlist = (struct scsi_defect_desc_phys_sector *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { uint32_t sector; sector = scsi_4btoul(dlist[i].sector); if (returned_type == SRDD10_EXT_PHYS_FORMAT) { mads = (sector & SDD_EXT_PHYS_MADS) ? 0 : 1; sector &= ~SDD_EXT_PHYS_FLAG_MASK; } if (!hex_format) fprintf(stdout, "%d:%d:%d%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].sector), mads ? " - " : "\n"); else fprintf(stdout, "0x%x:0x%x:0x%x%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].sector), mads ? " - " : "\n"); mads = 0; } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDD10_BYTES_FROM_INDEX_FORMAT: case SRDD10_EXT_BFI_FORMAT: { struct scsi_defect_desc_bytes_from_index *dlist; dlist = (struct scsi_defect_desc_bytes_from_index *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { uint32_t bfi; bfi = scsi_4btoul(dlist[i].bytes_from_index); if (returned_type == SRDD10_EXT_BFI_FORMAT) { mads = (bfi & SDD_EXT_BFI_MADS) ? 1 : 0; bfi &= ~SDD_EXT_BFI_FLAG_MASK; } if (!hex_format) fprintf(stdout, "%d:%d:%d%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].bytes_from_index), mads ? " - " : "\n"); else fprintf(stdout, "0x%x:0x%x:0x%x%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].bytes_from_index), mads ? " - " : "\n"); mads = 0; } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDDH10_BLOCK_FORMAT: { struct scsi_defect_desc_block *dlist; dlist = (struct scsi_defect_desc_block *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { if (!hex_format) fprintf(stdout, "%u\n", scsi_4btoul(dlist[i].address)); else fprintf(stdout, "0x%x\n", scsi_4btoul(dlist[i].address)); } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDD10_LONG_BLOCK_FORMAT: { struct scsi_defect_desc_long_block *dlist; dlist = (struct scsi_defect_desc_long_block *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { if (!hex_format) fprintf(stdout, "%ju\n", (uintmax_t)scsi_8btou64( dlist[i].address)); else fprintf(stdout, "0x%jx\n", (uintmax_t)scsi_8btou64( dlist[i].address)); } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } default: fprintf(stderr, "Unknown defect format 0x%x\n", returned_type); error = 1; break; } defect_bailout: if (defect_list != NULL) free(defect_list); if (ccb != NULL) cam_freeccb(ccb); return (error); } #if 0 void reassignblocks(struct cam_device *device, uint32_t *blocks, int num_blocks) { union ccb *ccb; ccb = cam_getccb(device); cam_freeccb(ccb); } #endif void mode_sense(struct cam_device *device, int *cdb_len, int dbd, int llbaa, int pc, int page, int subpage, int task_attr, int retry_count, int timeout, uint8_t *data, int datalen) { union ccb *ccb; int error_code, sense_key, asc, ascq; ccb = cam_getccb(device); if (ccb == NULL) errx(1, "mode_sense: couldn't allocate CCB"); retry: /* * MODE SENSE(6) can't handle more then 255 bytes. If there are more, * device must return error, so we should not get truncated data. */ if (*cdb_len == 6 && datalen > 255) datalen = 255; CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); scsi_mode_sense_subpage(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ task_attr, /* dbd */ dbd, /* pc */ pc << 6, /* page */ page, /* subpage */ subpage, /* param_buf */ data, /* param_len */ datalen, /* minimum_cmd_size */ *cdb_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); if (llbaa && ccb->csio.cdb_len == 10) { struct scsi_mode_sense_10 *cdb = (struct scsi_mode_sense_10 *)ccb->csio.cdb_io.cdb_bytes; cdb->byte2 |= SMS10_LLBAA; } /* Record what CDB size the above function really set. */ *cdb_len = ccb->csio.cdb_len; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (cam_send_ccb(device, ccb) < 0) err(1, "error sending mode sense command"); /* In case of ILLEGEL REQUEST try to fall back to 6-byte command. */ if (*cdb_len != 6 && ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID || (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq) && sense_key == SSD_KEY_ILLEGAL_REQUEST))) { *cdb_len = 6; goto retry; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); cam_close_device(device); errx(1, "mode sense command returned error"); } cam_freeccb(ccb); } void mode_select(struct cam_device *device, int cdb_len, int save_pages, int task_attr, int retry_count, int timeout, uint8_t *data, int datalen) { union ccb *ccb; int retval; ccb = cam_getccb(device); if (ccb == NULL) errx(1, "mode_select: couldn't allocate CCB"); scsi_mode_select_len(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ task_attr, /* scsi_page_fmt */ 1, /* save_pages */ save_pages, /* param_buf */ data, /* param_len */ datalen, /* minimum_cmd_size */ cdb_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); cam_close_device(device); if (retval < 0) err(1, "error sending mode select command"); else errx(1, "error sending mode select command"); } cam_freeccb(ccb); } void modepage(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout) { char *str_subpage; int c, page = -1, subpage = 0, pc = 0, llbaa = 0; int binary = 0, cdb_len = 10, dbd = 0, desc = 0, edit = 0, list = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case '6': cdb_len = 6; break; case 'b': binary = 1; break; case 'd': dbd = 1; break; case 'e': edit = 1; break; case 'l': list++; break; case 'm': str_subpage = optarg; strsep(&str_subpage, ","); page = strtol(optarg, NULL, 0); if (str_subpage) subpage = strtol(str_subpage, NULL, 0); if (page < 0 || page > 0x3f) errx(1, "invalid mode page %d", page); if (subpage < 0 || subpage > 0xff) errx(1, "invalid mode subpage %d", subpage); break; case 'D': desc = 1; break; case 'L': llbaa = 1; break; case 'P': pc = strtol(optarg, NULL, 0); if ((pc < 0) || (pc > 3)) errx(1, "invalid page control field %d", pc); break; default: break; } } if (desc && page == -1) page = SMS_ALL_PAGES_PAGE; if (page == -1 && list == 0) errx(1, "you must specify a mode page!"); if (dbd && desc) errx(1, "-d and -D are incompatible!"); if (llbaa && cdb_len != 10) errx(1, "LLBAA bit is not present in MODE SENSE(6)!"); if (list != 0) { mode_list(device, cdb_len, dbd, pc, list > 1, task_attr, retry_count, timeout); } else { mode_edit(device, cdb_len, desc, dbd, llbaa, pc, page, subpage, edit, binary, task_attr, retry_count, timeout); } } static int scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout) { union ccb *ccb; uint32_t flags = CAM_DIR_NONE; uint8_t *data_ptr = NULL; uint8_t cdb[20]; uint8_t atacmd[12]; struct get_hook hook; int c, data_bytes = 0, valid_bytes; int cdb_len = 0; int atacmd_len = 0; int dmacmd = 0; int fpdmacmd = 0; int need_res = 0; char *datastr = NULL, *tstr, *resstr = NULL; int error = 0; int fd_data = 0, fd_res = 0; int retval; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsicmd: error allocating ccb"); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'a': tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; atacmd_len = buff_encode_visit(atacmd, sizeof(atacmd), tstr, iget, &hook); /* * Increment optind by the number of arguments the * encoding routine processed. After each call to * getopt(3), optind points to the argument that * getopt should process _next_. In this case, * that means it points to the first command string * argument, if there is one. Once we increment * this, it should point to either the next command * line argument, or it should be past the end of * the list. */ optind += hook.got; break; case 'c': tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; cdb_len = buff_encode_visit(cdb, sizeof(cdb), tstr, iget, &hook); /* * Increment optind by the number of arguments the * encoding routine processed. After each call to * getopt(3), optind points to the argument that * getopt should process _next_. In this case, * that means it points to the first command string * argument, if there is one. Once we increment * this, it should point to either the next command * line argument, or it should be past the end of * the list. */ optind += hook.got; break; case 'd': dmacmd = 1; break; case 'f': fpdmacmd = 1; break; case 'i': if (arglist & CAM_ARG_CMD_OUT) { warnx("command must either be " "read or write, not both"); error = 1; goto scsicmd_bailout; } arglist |= CAM_ARG_CMD_IN; flags = CAM_DIR_IN; data_bytes = strtol(optarg, NULL, 0); if (data_bytes <= 0) { warnx("invalid number of input bytes %d", data_bytes); error = 1; goto scsicmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; optind++; datastr = cget(&hook, NULL); /* * If the user supplied "-" instead of a format, he * wants the data to be written to stdout. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_data = 1; data_ptr = (uint8_t *)malloc(data_bytes); if (data_ptr == NULL) { warnx("can't malloc memory for data_ptr"); error = 1; goto scsicmd_bailout; } break; case 'o': if (arglist & CAM_ARG_CMD_IN) { warnx("command must either be " "read or write, not both"); error = 1; goto scsicmd_bailout; } arglist |= CAM_ARG_CMD_OUT; flags = CAM_DIR_OUT; data_bytes = strtol(optarg, NULL, 0); if (data_bytes <= 0) { warnx("invalid number of output bytes %d", data_bytes); error = 1; goto scsicmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; datastr = cget(&hook, NULL); data_ptr = (uint8_t *)malloc(data_bytes); if (data_ptr == NULL) { warnx("can't malloc memory for data_ptr"); error = 1; goto scsicmd_bailout; } bzero(data_ptr, data_bytes); /* * If the user supplied "-" instead of a format, he * wants the data to be read from stdin. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_data = 1; else buff_encode_visit(data_ptr, data_bytes, datastr, iget, &hook); optind += hook.got; break; case 'r': need_res = 1; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; resstr = cget(&hook, NULL); if ((resstr != NULL) && (resstr[0] == '-')) fd_res = 1; optind += hook.got; break; default: break; } } /* * If fd_data is set, and we're writing to the device, we need to * read the data the user wants written from stdin. */ if ((fd_data == 1) && (arglist & CAM_ARG_CMD_OUT)) { ssize_t amt_read; int amt_to_read = data_bytes; uint8_t *buf_ptr = data_ptr; for (amt_read = 0; amt_to_read > 0; amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) { if (amt_read == -1) { warn("error reading data from stdin"); error = 1; goto scsicmd_bailout; } amt_to_read -= amt_read; buf_ptr += amt_read; } } if (arglist & CAM_ARG_ERR_RECOVER) flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ flags |= CAM_DEV_QFRZDIS; if (cdb_len) { /* * This is taken from the SCSI-3 draft spec. * (T10/1157D revision 0.3) * The top 3 bits of an opcode are the group code. * The next 5 bits are the command code. * Group 0: six byte commands * Group 1: ten byte commands * Group 2: ten byte commands * Group 3: reserved * Group 4: sixteen byte commands * Group 5: twelve byte commands * Group 6: vendor specific * Group 7: vendor specific */ switch((cdb[0] >> 5) & 0x7) { case 0: cdb_len = 6; break; case 1: case 2: cdb_len = 10; break; case 3: case 6: case 7: /* computed by buff_encode_visit */ break; case 4: cdb_len = 16; break; case 5: cdb_len = 12; break; } /* * We should probably use csio_build_visit or something like that * here, but it's easier to encode arguments as you go. The * alternative would be skipping the CDB argument and then encoding * it here, since we've got the data buffer argument by now. */ bcopy(cdb, &ccb->csio.cdb_io.cdb_bytes, cdb_len); cam_fill_csio(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ task_attr, /*data_ptr*/ data_ptr, /*dxfer_len*/ data_bytes, /*sense_len*/ SSD_FULL_SIZE, /*cdb_len*/ cdb_len, /*timeout*/ timeout ? timeout : 5000); } else { atacmd_len = 12; bcopy(atacmd, &ccb->ataio.cmd.command, atacmd_len); if (need_res) ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; if (dmacmd) ccb->ataio.cmd.flags |= CAM_ATAIO_DMA; if (fpdmacmd) ccb->ataio.cmd.flags |= CAM_ATAIO_FPDMA; cam_fill_ataio(&ccb->ataio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ data_bytes, /*timeout*/ timeout ? timeout : 5000); } if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsicmd_bailout; } if (atacmd_len && need_res) { if (fd_res == 0) { buff_decode_visit(&ccb->ataio.res.status, 11, resstr, arg_put, NULL); fprintf(stdout, "\n"); } else { fprintf(stdout, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", ccb->ataio.res.status, ccb->ataio.res.error, ccb->ataio.res.lba_low, ccb->ataio.res.lba_mid, ccb->ataio.res.lba_high, ccb->ataio.res.device, ccb->ataio.res.lba_low_exp, ccb->ataio.res.lba_mid_exp, ccb->ataio.res.lba_high_exp, ccb->ataio.res.sector_count, ccb->ataio.res.sector_count_exp); fflush(stdout); } } if (cdb_len) valid_bytes = ccb->csio.dxfer_len - ccb->csio.resid; else valid_bytes = ccb->ataio.dxfer_len - ccb->ataio.resid; if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (arglist & CAM_ARG_CMD_IN) && (valid_bytes > 0)) { if (fd_data == 0) { buff_decode_visit(data_ptr, valid_bytes, datastr, arg_put, NULL); fprintf(stdout, "\n"); } else { ssize_t amt_written; int amt_to_write = valid_bytes; uint8_t *buf_ptr = data_ptr; for (amt_written = 0; (amt_to_write > 0) && (amt_written =write(1, buf_ptr,amt_to_write))> 0;){ amt_to_write -= amt_written; buf_ptr += amt_written; } if (amt_written == -1) { warn("error writing data to stdout"); error = 1; goto scsicmd_bailout; } else if ((amt_written == 0) && (amt_to_write > 0)) { warnx("only wrote %u bytes out of %u", valid_bytes - amt_to_write, valid_bytes); } } } scsicmd_bailout: if ((data_bytes > 0) && (data_ptr != NULL)) free(data_ptr); cam_freeccb(ccb); return (error); } static int camdebug(int argc, char **argv, char *combinedopt) { int c, fd; path_id_t bus = CAM_BUS_WILDCARD; target_id_t target = CAM_TARGET_WILDCARD; lun_id_t lun = CAM_LUN_WILDCARD; char *tstr; union ccb ccb; int error = 0, rv; bzero(&ccb, sizeof(union ccb)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'I': arglist |= CAM_ARG_DEBUG_INFO; ccb.cdbg.flags |= CAM_DEBUG_INFO; break; case 'P': arglist |= CAM_ARG_DEBUG_PERIPH; ccb.cdbg.flags |= CAM_DEBUG_PERIPH; break; case 'S': arglist |= CAM_ARG_DEBUG_SUBTRACE; ccb.cdbg.flags |= CAM_DEBUG_SUBTRACE; break; case 'T': arglist |= CAM_ARG_DEBUG_TRACE; ccb.cdbg.flags |= CAM_DEBUG_TRACE; break; case 'X': arglist |= CAM_ARG_DEBUG_XPT; ccb.cdbg.flags |= CAM_DEBUG_XPT; break; case 'c': arglist |= CAM_ARG_DEBUG_CDB; ccb.cdbg.flags |= CAM_DEBUG_CDB; break; case 'p': arglist |= CAM_ARG_DEBUG_PROBE; ccb.cdbg.flags |= CAM_DEBUG_PROBE; break; default: break; } } argc -= optind; argv += optind; if (argc <= 0) { warnx("you must specify \"off\", \"all\" or a bus,"); warnx("bus:target, bus:target:lun or periph"); return (1); } tstr = *argv; while (isspace(*tstr) && (*tstr != '\0')) tstr++; if (strncmp(tstr, "off", 3) == 0) { ccb.cdbg.flags = CAM_DEBUG_NONE; arglist &= ~(CAM_ARG_DEBUG_INFO|CAM_ARG_DEBUG_PERIPH| CAM_ARG_DEBUG_TRACE|CAM_ARG_DEBUG_SUBTRACE| CAM_ARG_DEBUG_XPT|CAM_ARG_DEBUG_PROBE); } else { rv = parse_btl(tstr, &bus, &target, &lun, &arglist); if (rv < 1) { warnx("you must specify \"all\", \"off\", or a bus,"); warnx("bus:target, bus:target:lun or periph to debug"); return (1); } } if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s", XPT_DEVICE); warn("%s", XPT_DEVICE); return (1); } ccb.ccb_h.func_code = XPT_DEBUG; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = target; ccb.ccb_h.target_lun = lun; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); error = 1; } else { if ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_FUNC_NOTAVAIL) { warnx("CAM debugging not available"); warnx("you need to put options CAMDEBUG in" " your kernel config file!"); error = 1; } else if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_DEBUG CCB failed with status %#x", ccb.ccb_h.status); error = 1; } else { if (ccb.cdbg.flags == CAM_DEBUG_NONE) { fprintf(stderr, "Debugging turned off\n"); } else { fprintf(stderr, "Debugging enabled for " "%d:%d:%jx\n", bus, target, (uintmax_t)lun); } } } close(fd); return (error); } static int tagcontrol(struct cam_device *device, int argc, char **argv, char *combinedopt) { int c; union ccb *ccb; int numtags = -1; int retval = 0; int quiet = 0; char pathstr[1024]; ccb = cam_getccb(device); if (ccb == NULL) { warnx("tagcontrol: error allocating ccb"); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'N': numtags = strtol(optarg, NULL, 0); if (numtags < 0) { warnx("tag count %d is < 0", numtags); retval = 1; goto tagcontrol_bailout; } break; case 'q': quiet++; break; default: break; } } cam_path_string(device, pathstr, sizeof(pathstr)); if (numtags >= 0) { ccb->ccb_h.func_code = XPT_REL_SIMQ; ccb->ccb_h.flags = CAM_DEV_QFREEZE; ccb->crs.release_flags = RELSIM_ADJUST_OPENINGS; ccb->crs.openings = numtags; if (cam_send_ccb(device, ccb) < 0) { warn("error sending XPT_REL_SIMQ CCB"); retval = 1; goto tagcontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_REL_SIMQ CCB failed"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto tagcontrol_bailout; } if (quiet == 0) fprintf(stdout, "%stagged openings now %d\n", pathstr, ccb->crs.openings); } CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cgds); ccb->ccb_h.func_code = XPT_GDEV_STATS; if (cam_send_ccb(device, ccb) < 0) { warn("error sending XPT_GDEV_STATS CCB"); retval = 1; goto tagcontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_GDEV_STATS CCB failed"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto tagcontrol_bailout; } if (arglist & CAM_ARG_VERBOSE) { fprintf(stdout, "%s", pathstr); fprintf(stdout, "dev_openings %d\n", ccb->cgds.dev_openings); fprintf(stdout, "%s", pathstr); fprintf(stdout, "dev_active %d\n", ccb->cgds.dev_active); fprintf(stdout, "%s", pathstr); fprintf(stdout, "allocated %d\n", ccb->cgds.allocated); fprintf(stdout, "%s", pathstr); fprintf(stdout, "queued %d\n", ccb->cgds.queued); fprintf(stdout, "%s", pathstr); fprintf(stdout, "held %d\n", ccb->cgds.held); fprintf(stdout, "%s", pathstr); fprintf(stdout, "mintags %d\n", ccb->cgds.mintags); fprintf(stdout, "%s", pathstr); fprintf(stdout, "maxtags %d\n", ccb->cgds.maxtags); } else { if (quiet == 0) { fprintf(stdout, "%s", pathstr); fprintf(stdout, "device openings: "); } fprintf(stdout, "%d\n", ccb->cgds.dev_openings + ccb->cgds.dev_active); } tagcontrol_bailout: cam_freeccb(ccb); return (retval); } static void cts_print(struct cam_device *device, struct ccb_trans_settings *cts) { char pathstr[1024]; cam_path_string(device, pathstr, sizeof(pathstr)); if (cts->transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { fprintf(stdout, "%ssync parameter: %d\n", pathstr, spi->sync_period); if (spi->sync_offset != 0) { u_int freq; freq = scsi_calc_syncsrate(spi->sync_period); fprintf(stdout, "%sfrequency: %d.%03dMHz\n", pathstr, freq / 1000, freq % 1000); } } if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { fprintf(stdout, "%soffset: %d\n", pathstr, spi->sync_offset); } if (spi->valid & CTS_SPI_VALID_BUS_WIDTH) { fprintf(stdout, "%sbus width: %d bits\n", pathstr, (0x01 << spi->bus_width) * 8); } if (spi->valid & CTS_SPI_VALID_DISC) { fprintf(stdout, "%sdisconnection is %s\n", pathstr, (spi->flags & CTS_SPI_FLAGS_DISC_ENB) ? "enabled" : "disabled"); } } if (cts->transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) fprintf(stdout, "%sWWNN: 0x%llx\n", pathstr, (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) fprintf(stdout, "%sWWPN: 0x%llx\n", pathstr, (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) fprintf(stdout, "%sPortID: 0x%x\n", pathstr, fc->port); if (fc->valid & CTS_FC_VALID_SPEED) fprintf(stdout, "%stransfer speed: %d.%03dMB/s\n", pathstr, fc->bitrate / 1000, fc->bitrate % 1000); } if (cts->transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) fprintf(stdout, "%stransfer speed: %d.%03dMB/s\n", pathstr, sas->bitrate / 1000, sas->bitrate % 1000); } if (cts->transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &cts->xport_specific.ata; if ((pata->valid & CTS_ATA_VALID_MODE) != 0) { fprintf(stdout, "%sATA mode: %s\n", pathstr, ata_mode2string(pata->mode)); } if ((pata->valid & CTS_ATA_VALID_ATAPI) != 0) { fprintf(stdout, "%sATAPI packet length: %d\n", pathstr, pata->atapi); } if ((pata->valid & CTS_ATA_VALID_BYTECOUNT) != 0) { fprintf(stdout, "%sPIO transaction length: %d\n", pathstr, pata->bytecount); } } if (cts->transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts->xport_specific.sata; if ((sata->valid & CTS_SATA_VALID_REVISION) != 0) { fprintf(stdout, "%sSATA revision: %d.x\n", pathstr, sata->revision); } if ((sata->valid & CTS_SATA_VALID_MODE) != 0) { fprintf(stdout, "%sATA mode: %s\n", pathstr, ata_mode2string(sata->mode)); } if ((sata->valid & CTS_SATA_VALID_ATAPI) != 0) { fprintf(stdout, "%sATAPI packet length: %d\n", pathstr, sata->atapi); } if ((sata->valid & CTS_SATA_VALID_BYTECOUNT) != 0) { fprintf(stdout, "%sPIO transaction length: %d\n", pathstr, sata->bytecount); } if ((sata->valid & CTS_SATA_VALID_PM) != 0) { fprintf(stdout, "%sPMP presence: %d\n", pathstr, sata->pm_present); } if ((sata->valid & CTS_SATA_VALID_TAGS) != 0) { fprintf(stdout, "%sNumber of tags: %d\n", pathstr, sata->tags); } if ((sata->valid & CTS_SATA_VALID_CAPS) != 0) { fprintf(stdout, "%sSATA capabilities: %08x\n", pathstr, sata->caps); } } if (cts->transport == XPORT_NVME) { struct ccb_trans_settings_nvme *nvme = &cts->xport_specific.nvme; if (nvme->valid & CTS_NVME_VALID_LINK) { fprintf(stdout, "%sPCIe lanes: %d (%d max)\n", pathstr, nvme->lanes, nvme->max_lanes); fprintf(stdout, "%sPCIe Generation: %d (%d max)\n", pathstr, nvme->speed, nvme->max_speed); } } if (cts->transport == XPORT_NVMF) { struct ccb_trans_settings_nvmf *nvmf = &cts->xport_specific.nvmf; if (nvmf->valid & CTS_NVMF_VALID_TRTYPE) { fprintf(stdout, "%sTransport: %s\n", pathstr, nvmf_transport_type(nvmf->trtype)); } } + if (cts->transport == XPORT_UFSHCI) { + struct ccb_trans_settings_ufshci *ufshci = + &cts->xport_specific.ufshci; + + if (ufshci->valid & CTS_UFSHCI_VALID_MODE) { + fprintf(stdout, "%sHigh Speed Gear: %d (%d max)\n", + pathstr, ufshci->hs_gear, ufshci->max_hs_gear); + fprintf(stdout, "%sUnipro TX lanes: %d (%d max)\n", pathstr, + ufshci->tx_lanes, ufshci->max_tx_lanes); + fprintf(stdout, "%sUnipro RX lanes: %d (%d max)\n", pathstr, + ufshci->rx_lanes, ufshci->max_rx_lanes); + } + } if (cts->protocol == PROTO_ATA) { struct ccb_trans_settings_ata *ata= &cts->proto_specific.ata; if (ata->valid & CTS_ATA_VALID_TQ) { fprintf(stdout, "%stagged queueing: %s\n", pathstr, (ata->flags & CTS_ATA_FLAGS_TAG_ENB) ? "enabled" : "disabled"); } } if (cts->protocol == PROTO_SCSI) { struct ccb_trans_settings_scsi *scsi= &cts->proto_specific.scsi; if (scsi->valid & CTS_SCSI_VALID_TQ) { fprintf(stdout, "%stagged queueing: %s\n", pathstr, (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) ? "enabled" : "disabled"); } } if (cts->protocol == PROTO_NVME) { struct ccb_trans_settings_nvme *nvme = &cts->proto_specific.nvme; if (nvme->valid & CTS_NVME_VALID_SPEC) { fprintf(stdout, "%sNVMe Spec: %d.%d\n", pathstr, NVME_MAJOR(nvme->spec), NVME_MINOR(nvme->spec)); } } } /* * Get a path inquiry CCB for the specified device. */ static int get_cpi(struct cam_device *device, struct ccb_pathinq *cpi) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_cpi: couldn't allocate CCB"); return (1); } ccb->ccb_h.func_code = XPT_PATH_INQ; if (cam_send_ccb(device, ccb) < 0) { warn("get_cpi: error sending Path Inquiry CCB"); retval = 1; goto get_cpi_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cpi_bailout; } bcopy(&ccb->cpi, cpi, sizeof(struct ccb_pathinq)); get_cpi_bailout: cam_freeccb(ccb); return (retval); } /* * Get a get device CCB for the specified device. */ static int get_cgd(struct cam_device *device, struct ccb_getdev *cgd) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_cgd: couldn't allocate CCB"); return (1); } ccb->ccb_h.func_code = XPT_GDEV_TYPE; if (cam_send_ccb(device, ccb) < 0) { warn("get_cgd: error sending Get type information CCB"); retval = 1; goto get_cgd_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cgd_bailout; } bcopy(&ccb->cgd, cgd, sizeof(struct ccb_getdev)); get_cgd_bailout: cam_freeccb(ccb); return (retval); } /* * Returns 1 if the device has the VPD page, 0 if it does not, and -1 on an * error. */ int dev_has_vpd_page(struct cam_device *dev, uint8_t page_id, int retry_count, int timeout, int verbosemode) { union ccb *ccb = NULL; struct scsi_vpd_supported_page_list sup_pages; int i; int retval = 0; ccb = cam_getccb(dev); if (ccb == NULL) { warn("Unable to allocate CCB"); retval = -1; goto bailout; } bzero(&sup_pages, sizeof(sup_pages)); scsi_inquiry(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (uint8_t *)&sup_pages, /* inq_len */ sizeof(sup_pages), /* evpd */ 1, /* page_code */ SVPD_SUPPORTED_PAGE_LIST, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(dev, ccb) < 0) { cam_freeccb(ccb); ccb = NULL; retval = -1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (verbosemode != 0) cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = -1; goto bailout; } for (i = 0; i < sup_pages.length; i++) { if (sup_pages.list[i] == page_id) { retval = 1; goto bailout; } } bailout: if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * devtype is filled in with the type of device. * Returns 0 for success, non-zero for failure. */ int get_device_type(struct cam_device *dev, int retry_count, int timeout, int verbosemode, camcontrol_devtype *devtype) { struct ccb_getdev cgd; int retval; retval = get_cgd(dev, &cgd); if (retval != 0) goto bailout; switch (cgd.protocol) { case PROTO_SCSI: break; case PROTO_ATA: case PROTO_ATAPI: case PROTO_SATAPM: *devtype = CC_DT_ATA; goto bailout; break; /*NOTREACHED*/ case PROTO_NVME: *devtype = CC_DT_NVME; goto bailout; break; /*NOTREACHED*/ case PROTO_MMCSD: *devtype = CC_DT_MMCSD; goto bailout; break; /*NOTREACHED*/ default: *devtype = CC_DT_UNKNOWN; goto bailout; break; /*NOTREACHED*/ } if (retry_count == -1) { /* * For a retry count of -1, used only the cached data to avoid * I/O to the drive. Sending the identify command to the drive * can cause issues for SATL attachaed drives since identify is * not an NCQ command. We check for the strings that windows * displays since those will not be NULs (they are supposed * to be space padded). We could check other bits, but anything * non-zero implies SATL. */ if (cgd.ident_data.serial[0] != 0 || cgd.ident_data.revision[0] != 0 || cgd.ident_data.model[0] != 0) *devtype = CC_DT_SATL; else *devtype = CC_DT_SCSI; } else { /* * Check for the ATA Information VPD page (0x89). If this is an * ATA device behind a SCSI to ATA translation layer (SATL), * this VPD page should be present. * * If that VPD page isn't present, or we get an error back from * the INQUIRY command, we'll just treat it as a normal SCSI * device. */ retval = dev_has_vpd_page(dev, SVPD_ATA_INFORMATION, retry_count, timeout, verbosemode); if (retval == 1) *devtype = CC_DT_SATL; else *devtype = CC_DT_SCSI; } retval = 0; bailout: return (retval); } int build_ata_cmd(union ccb *ccb, uint32_t retry_count, uint32_t flags, uint8_t tag_action, uint8_t protocol, uint8_t ata_flags, uint16_t features, uint16_t sector_count, uint64_t lba, uint8_t command, uint32_t auxiliary, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout, int is48bit, camcontrol_devtype devtype) { int retval = 0; if (devtype == CC_DT_ATA) { cam_fill_ataio(&ccb->ataio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ timeout); if (is48bit || lba > ATA_MAX_28BIT_LBA) ata_48bit_cmd(&ccb->ataio, command, features, lba, sector_count); else ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); if (auxiliary != 0) { ccb->ataio.ata_flags |= ATA_FLAG_AUX; ccb->ataio.aux = auxiliary; } if (ata_flags & AP_FLAG_CHK_COND) ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; if ((protocol & AP_PROTO_MASK) == AP_PROTO_DMA) ccb->ataio.cmd.flags |= CAM_ATAIO_DMA; else if ((protocol & AP_PROTO_MASK) == AP_PROTO_FPDMA) ccb->ataio.cmd.flags |= CAM_ATAIO_FPDMA; } else { if (is48bit || lba > ATA_MAX_28BIT_LBA) protocol |= AP_EXTEND; retval = scsi_ata_pass(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features, /*sector_count*/ sector_count, /*lba*/ lba, /*command*/ command, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ sense_len, /*timeout*/ timeout); } return (retval); } /* * Returns: 0 -- success, 1 -- error, 2 -- lba truncated, * 4 -- count truncated, 6 -- lba and count truncated. */ int get_ata_status(struct cam_device *dev, union ccb *ccb, uint8_t *error, uint16_t *count, uint64_t *lba, uint8_t *device, uint8_t *status) { int retval; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { uint8_t opcode; int error_code = 0, sense_key = 0, asc = 0, ascq = 0; u_int sense_len; /* * In this case, we have SCSI ATA PASS-THROUGH command, 12 * or 16 byte, and need to see what */ if (ccb->ccb_h.flags & CAM_CDB_POINTER) opcode = ccb->csio.cdb_io.cdb_ptr[0]; else opcode = ccb->csio.cdb_io.cdb_bytes[0]; if ((opcode != ATA_PASS_12) && (opcode != ATA_PASS_16)) { warnx("%s: unsupported opcode %02x", __func__, opcode); return (1); } retval = scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq); /* Note: the _ccb() variant returns 0 for an error */ if (retval == 0) return (1); sense_len = ccb->csio.sense_len - ccb->csio.sense_resid; switch (error_code) { case SSD_DESC_CURRENT_ERROR: case SSD_DESC_DEFERRED_ERROR: { struct scsi_sense_data_desc *sense; struct scsi_sense_ata_ret_desc *desc; uint8_t *desc_ptr; sense = (struct scsi_sense_data_desc *) &ccb->csio.sense_data; desc_ptr = scsi_find_desc(sense, sense_len, SSD_DESC_ATA); if (desc_ptr == NULL) { cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); return (1); } desc = (struct scsi_sense_ata_ret_desc *)desc_ptr; *error = desc->error; *count = (desc->count_15_8 << 8) | desc->count_7_0; *lba = ((uint64_t)desc->lba_47_40 << 40) | ((uint64_t)desc->lba_39_32 << 32) | ((uint64_t)desc->lba_31_24 << 24) | (desc->lba_23_16 << 16) | (desc->lba_15_8 << 8) | desc->lba_7_0; *device = desc->device; *status = desc->status; /* * If the extend bit isn't set, the result is for a * 12-byte ATA PASS-THROUGH command or a 16 or 32 byte * command without the extend bit set. This means * that the device is supposed to return 28-bit * status. The count field is only 8 bits, and the * LBA field is only 8 bits. */ if ((desc->flags & SSD_DESC_ATA_FLAG_EXTEND) == 0){ *count &= 0xff; *lba &= 0x0fffffff; } break; } case SSD_CURRENT_ERROR: case SSD_DEFERRED_ERROR: { uint64_t val; /* * In my understanding of SAT-5 specification, saying: * "without interpreting the contents of the STATUS", * this should not happen if CK_COND was set, but it * does at least for some devices, so try to revert. */ if ((sense_key == SSD_KEY_ABORTED_COMMAND) && (asc == 0) && (ascq == 0)) { *status = ATA_STATUS_ERROR; *error = ATA_ERROR_ABORT; *device = 0; *count = 0; *lba = 0; return (0); } if ((sense_key != SSD_KEY_RECOVERED_ERROR) || (asc != 0x00) || (ascq != 0x1d)) return (1); val = 0; scsi_get_sense_info(&ccb->csio.sense_data, sense_len, SSD_DESC_INFO, &val, NULL); *error = (val >> 24) & 0xff; *status = (val >> 16) & 0xff; *device = (val >> 8) & 0xff; *count = val & 0xff; val = 0; scsi_get_sense_info(&ccb->csio.sense_data, sense_len, SSD_DESC_COMMAND, &val, NULL); *lba = ((val >> 16) & 0xff) | (val & 0xff00) | ((val & 0xff) << 16); /* Report UPPER NONZERO bits as errors 2, 4 and 6. */ return ((val >> 28) & 0x06); } default: return (1); } break; } case XPT_ATA_IO: { struct ata_res *res; /* Only some statuses return ATA result register set. */ if (cam_ccb_status(ccb) != CAM_REQ_CMP && cam_ccb_status(ccb) != CAM_ATA_STATUS_ERROR) return (1); res = &ccb->ataio.res; *error = res->error; *status = res->status; *device = res->device; *count = res->sector_count; *lba = (res->lba_high << 16) | (res->lba_mid << 8) | (res->lba_low); if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) { *count |= (res->sector_count_exp << 8); *lba |= ((uint64_t)res->lba_low_exp << 24) | ((uint64_t)res->lba_mid_exp << 32) | ((uint64_t)res->lba_high_exp << 40); } else { *lba |= (res->device & 0xf) << 24; } break; } default: return (1); } return (0); } static void cpi_print(struct ccb_pathinq *cpi) { char adapter_str[1024]; uint64_t i; snprintf(adapter_str, sizeof(adapter_str), "%s%d:", cpi->dev_name, cpi->unit_number); fprintf(stdout, "%s SIM/HBA version: %d\n", adapter_str, cpi->version_num); for (i = 1; i < UINT8_MAX; i = i << 1) { const char *str; if ((i & cpi->hba_inquiry) == 0) continue; fprintf(stdout, "%s supports ", adapter_str); switch(i) { case PI_MDP_ABLE: str = "MDP message"; break; case PI_WIDE_32: str = "32 bit wide SCSI"; break; case PI_WIDE_16: str = "16 bit wide SCSI"; break; case PI_SDTR_ABLE: str = "SDTR message"; break; case PI_LINKED_CDB: str = "linked CDBs"; break; case PI_TAG_ABLE: str = "tag queue messages"; break; case PI_SOFT_RST: str = "soft reset alternative"; break; case PI_SATAPM: str = "SATA Port Multiplier"; break; default: str = "unknown PI bit set"; break; } fprintf(stdout, "%s\n", str); } for (i = 1; i < UINT32_MAX; i = i << 1) { const char *str; if ((i & cpi->hba_misc) == 0) continue; fprintf(stdout, "%s ", adapter_str); switch(i) { case PIM_ATA_EXT: str = "can understand ata_ext requests"; break; case PIM_EXTLUNS: str = "64bit extended LUNs supported"; break; case PIM_SCANHILO: str = "bus scans from high ID to low ID"; break; case PIM_NOREMOVE: str = "removable devices not included in scan"; break; case PIM_NOINITIATOR: str = "initiator role not supported"; break; case PIM_NOBUSRESET: str = "user has disabled initial BUS RESET or" " controller is in target/mixed mode"; break; case PIM_NO_6_BYTE: str = "do not send 6-byte commands"; break; case PIM_SEQSCAN: str = "scan bus sequentially"; break; case PIM_UNMAPPED: str = "unmapped I/O supported"; break; case PIM_NOSCAN: str = "does its own scanning"; break; default: str = "unknown PIM bit set"; break; } fprintf(stdout, "%s\n", str); } for (i = 1; i < UINT16_MAX; i = i << 1) { const char *str; if ((i & cpi->target_sprt) == 0) continue; fprintf(stdout, "%s supports ", adapter_str); switch(i) { case PIT_PROCESSOR: str = "target mode processor mode"; break; case PIT_PHASE: str = "target mode phase cog. mode"; break; case PIT_DISCONNECT: str = "disconnects in target mode"; break; case PIT_TERM_IO: str = "terminate I/O message in target mode"; break; case PIT_GRP_6: str = "group 6 commands in target mode"; break; case PIT_GRP_7: str = "group 7 commands in target mode"; break; default: str = "unknown PIT bit set"; break; } fprintf(stdout, "%s\n", str); } fprintf(stdout, "%s HBA engine count: %d\n", adapter_str, cpi->hba_eng_cnt); fprintf(stdout, "%s maximum target: %d\n", adapter_str, cpi->max_target); fprintf(stdout, "%s maximum LUN: %d\n", adapter_str, cpi->max_lun); fprintf(stdout, "%s highest path ID in subsystem: %d\n", adapter_str, cpi->hpath_id); fprintf(stdout, "%s initiator ID: %d\n", adapter_str, cpi->initiator_id); fprintf(stdout, "%s SIM vendor: %s\n", adapter_str, cpi->sim_vid); fprintf(stdout, "%s HBA vendor: %s\n", adapter_str, cpi->hba_vid); fprintf(stdout, "%s HBA vendor ID: 0x%04x\n", adapter_str, cpi->hba_vendor); fprintf(stdout, "%s HBA device ID: 0x%04x\n", adapter_str, cpi->hba_device); fprintf(stdout, "%s HBA subvendor ID: 0x%04x\n", adapter_str, cpi->hba_subvendor); fprintf(stdout, "%s HBA subdevice ID: 0x%04x\n", adapter_str, cpi->hba_subdevice); fprintf(stdout, "%s bus ID: %d\n", adapter_str, cpi->bus_id); fprintf(stdout, "%s base transfer speed: ", adapter_str); if (cpi->base_transfer_speed > 1000) fprintf(stdout, "%d.%03dMB/sec\n", cpi->base_transfer_speed / 1000, cpi->base_transfer_speed % 1000); else fprintf(stdout, "%dKB/sec\n", (cpi->base_transfer_speed % 1000) * 1000); fprintf(stdout, "%s maximum transfer size: %u bytes\n", adapter_str, cpi->maxio); } static int get_print_cts(struct cam_device *device, int user_settings, int quiet, struct ccb_trans_settings *cts) { int retval; union ccb *ccb; retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_print_cts: error allocating ccb"); return (1); } ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; if (user_settings == 0) ccb->cts.type = CTS_TYPE_CURRENT_SETTINGS; else ccb->cts.type = CTS_TYPE_USER_SETTINGS; if (cam_send_ccb(device, ccb) < 0) { warn("error sending XPT_GET_TRAN_SETTINGS CCB"); retval = 1; goto get_print_cts_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_GET_TRANS_SETTINGS CCB failed"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_print_cts_bailout; } if (quiet == 0) cts_print(device, &ccb->cts); if (cts != NULL) bcopy(&ccb->cts, cts, sizeof(struct ccb_trans_settings)); get_print_cts_bailout: cam_freeccb(ccb); return (retval); } static int ratecontrol(struct cam_device *device, int task_attr, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { int c; union ccb *ccb; int user_settings = 0; int retval = 0; int disc_enable = -1, tag_enable = -1; int mode = -1; int offset = -1; double syncrate = -1; int bus_width = -1; int quiet = 0; int change_settings = 0, send_tur = 0; struct ccb_pathinq cpi; ccb = cam_getccb(device); if (ccb == NULL) { warnx("ratecontrol: error allocating ccb"); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'a': send_tur = 1; break; case 'c': user_settings = 0; break; case 'D': if (strncasecmp(optarg, "enable", 6) == 0) disc_enable = 1; else if (strncasecmp(optarg, "disable", 7) == 0) disc_enable = 0; else { warnx("-D argument \"%s\" is unknown", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'M': mode = ata_string2mode(optarg); if (mode < 0) { warnx("unknown mode '%s'", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'O': offset = strtol(optarg, NULL, 0); if (offset < 0) { warnx("offset value %d is < 0", offset); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'q': quiet++; break; case 'R': syncrate = atof(optarg); if (syncrate < 0) { warnx("sync rate %f is < 0", syncrate); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'T': if (strncasecmp(optarg, "enable", 6) == 0) tag_enable = 1; else if (strncasecmp(optarg, "disable", 7) == 0) tag_enable = 0; else { warnx("-T argument \"%s\" is unknown", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'U': user_settings = 1; break; case 'W': bus_width = strtol(optarg, NULL, 0); if (bus_width < 0) { warnx("bus width %d is < 0", bus_width); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; default: break; } } /* * Grab path inquiry information, so we can determine whether * or not the initiator is capable of the things that the user * requests. */ if ((retval = get_cpi(device, &cpi)) != 0) goto ratecontrol_bailout; if (quiet == 0) { fprintf(stdout, "%s parameters:\n", user_settings ? "User" : "Current"); } retval = get_print_cts(device, user_settings, quiet, &ccb->cts); if (retval != 0) goto ratecontrol_bailout; if (arglist & CAM_ARG_VERBOSE) cpi_print(&cpi); if (change_settings) { int didsettings = 0; struct ccb_trans_settings_spi *spi = NULL; struct ccb_trans_settings_pata *pata = NULL; struct ccb_trans_settings_sata *sata = NULL; struct ccb_trans_settings_ata *ata = NULL; struct ccb_trans_settings_scsi *scsi = NULL; if (ccb->cts.transport == XPORT_SPI) spi = &ccb->cts.xport_specific.spi; if (ccb->cts.transport == XPORT_ATA) pata = &ccb->cts.xport_specific.ata; if (ccb->cts.transport == XPORT_SATA) sata = &ccb->cts.xport_specific.sata; if (ccb->cts.protocol == PROTO_ATA) ata = &ccb->cts.proto_specific.ata; if (ccb->cts.protocol == PROTO_SCSI) scsi = &ccb->cts.proto_specific.scsi; ccb->cts.xport_specific.valid = 0; ccb->cts.proto_specific.valid = 0; if (spi && disc_enable != -1) { spi->valid |= CTS_SPI_VALID_DISC; if (disc_enable == 0) spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; else spi->flags |= CTS_SPI_FLAGS_DISC_ENB; didsettings++; } if (tag_enable != -1) { if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0) { warnx("HBA does not support tagged queueing, " "so you cannot modify tag settings"); retval = 1; goto ratecontrol_bailout; } if (ata) { ata->valid |= CTS_SCSI_VALID_TQ; if (tag_enable == 0) ata->flags &= ~CTS_ATA_FLAGS_TAG_ENB; else ata->flags |= CTS_ATA_FLAGS_TAG_ENB; didsettings++; } else if (scsi) { scsi->valid |= CTS_SCSI_VALID_TQ; if (tag_enable == 0) scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; didsettings++; } } if (spi && offset != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing offset"); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->sync_offset = offset; didsettings++; } if (spi && syncrate != -1) { int prelim_sync_period; if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_SYNC_RATE; /* * The sync rate the user gives us is in MHz. * We need to translate it into KHz for this * calculation. */ syncrate *= 1000; /* * Next, we calculate a "preliminary" sync period * in tenths of a nanosecond. */ if (syncrate == 0) prelim_sync_period = 0; else prelim_sync_period = 10000000 / syncrate; spi->sync_period = scsi_calc_syncparam(prelim_sync_period); didsettings++; } if (sata && syncrate != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } if (!user_settings) { warnx("You can modify only user rate " "settings for SATA"); retval = 1; goto ratecontrol_bailout; } sata->revision = ata_speed2revision(syncrate * 100); if (sata->revision < 0) { warnx("Invalid rate %f", syncrate); retval = 1; goto ratecontrol_bailout; } sata->valid |= CTS_SATA_VALID_REVISION; didsettings++; } if ((pata || sata) && mode != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } if (!user_settings) { warnx("You can modify only user mode " "settings for ATA/SATA"); retval = 1; goto ratecontrol_bailout; } if (pata) { pata->mode = mode; pata->valid |= CTS_ATA_VALID_MODE; } else { sata->mode = mode; sata->valid |= CTS_SATA_VALID_MODE; } didsettings++; } /* * The bus_width argument goes like this: * 0 == 8 bit * 1 == 16 bit * 2 == 32 bit * Therefore, if you shift the number of bits given on the * command line right by 4, you should get the correct * number. */ if (spi && bus_width != -1) { /* * We might as well validate things here with a * decipherable error message, rather than what * will probably be an indecipherable error message * by the time it gets back to us. */ if ((bus_width == 16) && ((cpi.hba_inquiry & PI_WIDE_16) == 0)) { warnx("HBA does not support 16 bit bus width"); retval = 1; goto ratecontrol_bailout; } else if ((bus_width == 32) && ((cpi.hba_inquiry & PI_WIDE_32) == 0)) { warnx("HBA does not support 32 bit bus width"); retval = 1; goto ratecontrol_bailout; } else if ((bus_width != 8) && (bus_width != 16) && (bus_width != 32)) { warnx("Invalid bus width %d", bus_width); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_BUS_WIDTH; spi->bus_width = bus_width >> 4; didsettings++; } if (didsettings == 0) { goto ratecontrol_bailout; } ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; if (cam_send_ccb(device, ccb) < 0) { warn("error sending XPT_SET_TRAN_SETTINGS CCB"); retval = 1; goto ratecontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_SET_TRANS_SETTINGS CCB failed"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } } if (send_tur) { retval = testunitready(device, task_attr, retry_count, timeout, (arglist & CAM_ARG_VERBOSE) ? 0 : 1); /* * If the TUR didn't succeed, just bail. */ if (retval != 0) { if (quiet == 0) fprintf(stderr, "Test Unit Ready failed\n"); goto ratecontrol_bailout; } } if ((change_settings || send_tur) && !quiet && (ccb->cts.transport == XPORT_ATA || ccb->cts.transport == XPORT_SATA || send_tur)) { fprintf(stdout, "New parameters:\n"); retval = get_print_cts(device, user_settings, 0, NULL); } ratecontrol_bailout: cam_freeccb(ccb); return (retval); } static int scsiformat(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout) { union ccb *ccb; int c; int ycount = 0, quiet = 0; int error = 0, retval = 0; int use_timeout = 10800 * 1000; int immediate = 1; struct format_defect_list_header fh; uint8_t *data_ptr = NULL; uint32_t dxfer_len = 0; uint8_t byte2 = 0; int num_warnings = 0; int reportonly = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsiformat: error allocating ccb"); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'q': quiet++; break; case 'r': reportonly = 1; break; case 'w': immediate = 0; break; case 'y': ycount++; break; } } if (reportonly) goto doreport; if (quiet == 0 && ycount == 0) { fprintf(stdout, "You are about to REMOVE ALL DATA from the " "following device:\n"); error = scsidoinquiry(device, argc, argv, combinedopt, task_attr, retry_count, timeout); if (error != 0) { warnx("scsiformat: error sending inquiry"); goto scsiformat_bailout; } } if (ycount == 0) { if (!get_confirmation()) { error = 1; goto scsiformat_bailout; } } if (timeout != 0) use_timeout = timeout; if (quiet == 0) { fprintf(stdout, "Current format timeout is %d seconds\n", use_timeout / 1000); } /* * If the user hasn't disabled questions and didn't specify a * timeout on the command line, ask them if they want the current * timeout. */ if ((ycount == 0) && (timeout == 0)) { char str[1024]; int new_timeout = 0; fprintf(stdout, "Enter new timeout in seconds or press\n" "return to keep the current timeout [%d] ", use_timeout / 1000); if (fgets(str, sizeof(str), stdin) != NULL) { if (str[0] != '\0') new_timeout = atoi(str); } if (new_timeout != 0) { use_timeout = new_timeout * 1000; fprintf(stdout, "Using new timeout value %d\n", use_timeout / 1000); } } /* * Keep this outside the if block below to silence any unused * variable warnings. */ bzero(&fh, sizeof(fh)); /* * If we're in immediate mode, we've got to include the format * header */ if (immediate != 0) { fh.byte2 = FU_DLH_IMMED; data_ptr = (uint8_t *)&fh; dxfer_len = sizeof(fh); byte2 = FU_FMT_DATA; } else if (quiet == 0) { fprintf(stdout, "Formatting..."); fflush(stdout); } scsi_format_unit(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ task_attr, /* byte2 */ byte2, /* ileave */ 0, /* data_ptr */ data_ptr, /* dxfer_len */ dxfer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ use_timeout); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((immediate == 0) && ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP))) { const char errstr[] = "error sending format command"; if (retval < 0) warn(errstr); else warnx(errstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsiformat_bailout; } /* * If we ran in non-immediate mode, we already checked for errors * above and printed out any necessary information. If we're in * immediate mode, we need to loop through and get status * information periodically. */ if (immediate == 0) { if (quiet == 0) { fprintf(stdout, "Format Complete\n"); } goto scsiformat_bailout; } doreport: do { cam_status status; CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); /* * There's really no need to do error recovery or * retries here, since we're just going to sit in a * loop and wait for the device to finish formatting. */ scsi_test_unit_ready(&ccb->csio, /* retries */ 0, /* cbfcnp */ NULL, /* tag_action */ task_attr, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; retval = cam_send_ccb(device, ccb); /* * If we get an error from the ioctl, bail out. SCSI * errors are expected. */ if (retval < 0) { warn("error sending TEST UNIT READY command"); error = 1; goto scsiformat_bailout; } status = ccb->ccb_h.status & CAM_STATUS_MASK; if ((status != CAM_REQ_CMP) && (status == CAM_SCSI_STATUS_ERROR) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI-2 and SCSI-3 specs, a * drive that is in the middle of a format should * return NOT READY with an ASC of "logical unit * not ready, format in progress". The sense key * specific bytes will then be a progress indicator. */ if ((sense_key == SSD_KEY_NOT_READY) && (asc == 0x04) && (ascq == 0x04)) { uint8_t sks[3]; if ((scsi_get_sks(sense, ccb->csio.sense_len - ccb->csio.sense_resid, sks) == 0) && (quiet == 0)) { uint32_t val; u_int64_t percentage; val = scsi_2btoul(&sks[1]); percentage = 10000ull * val; fprintf(stdout, "\rFormatting: %ju.%02u %% " "(%u/%d) done", (uintmax_t)(percentage / (0x10000 * 100)), (unsigned)((percentage / 0x10000) % 100), val, 0x10000); fflush(stdout); } else if ((quiet == 0) && (++num_warnings <= 1)) { warnx("Unexpected SCSI Sense Key " "Specific value returned " "during format:"); scsi_sense_print(device, &ccb->csio, stderr); warnx("Unable to print status " "information, but format will " "proceed."); warnx("will exit when format is " "complete"); } sleep(1); } else { warnx("Unexpected SCSI error during format"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsiformat_bailout; } } else if (status != CAM_REQ_CMP) { warnx("Unexpected CAM status %#x", status); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsiformat_bailout; } } while((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP); if (quiet == 0) fprintf(stdout, "\nFormat Complete\n"); scsiformat_bailout: cam_freeccb(ccb); return (error); } static int sanitize_wait_ata(struct cam_device *device, union ccb *ccb, int quiet, camcontrol_devtype devtype) { int retval; uint8_t error = 0, ata_device = 0, status = 0; uint16_t count = 0; uint64_t lba = 0; u_int val, perc; do { retval = build_ata_cmd(ccb, /*retries*/ 0, /*flags*/ CAM_DIR_NONE, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*protocol*/ AP_PROTO_NON_DATA, /*ata_flags*/ AP_FLAG_CHK_COND, /*features*/ 0x00, /* SANITIZE STATUS EXT */ /*sector_count*/ 0, /*lba*/ 0, /*command*/ ATA_SANITIZE, /*auxiliary*/ 0, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 10000, /*is48bit*/ 1, /*devtype*/ devtype); if (retval != 0) { warnx("%s: build_ata_cmd() failed, likely " "programmer error", __func__); return (1); } ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; retval = cam_send_ccb(device, ccb); if (retval != 0) { warn("error sending SANITIZE STATUS EXT command"); return (1); } retval = get_ata_status(device, ccb, &error, &count, &lba, &ata_device, &status); if (retval != 0) { warnx("Can't get SANITIZE STATUS EXT status, " "sanitize may still run."); return (retval); } if (status & ATA_STATUS_ERROR) { if (error & ATA_ERROR_ABORT) { switch (lba & 0xff) { case 0x00: warnx("Reason not reported or sanitize failed."); return (1); case 0x01: warnx("Sanitize command unsuccessful. "); return (1); case 0x02: warnx("Unsupported sanitize device command. "); return (1); case 0x03: warnx("Device is in sanitize frozen state. "); return (1); case 0x04: warnx("Sanitize antifreeze lock is enabled. "); return (1); } } warnx("SANITIZE STATUS EXT failed, " "sanitize may still run."); return (1); } if (count & 0x4000) { if (quiet == 0) { val = lba & 0xffff; perc = 10000 * val; fprintf(stdout, "Sanitizing: %u.%02u%% (%d/%d)\r", (perc / (0x10000 * 100)), ((perc / 0x10000) % 100), val, 0x10000); fflush(stdout); } sleep(1); } else break; } while (1); return (0); } static int sanitize_wait_scsi(struct cam_device *device, union ccb *ccb, int task_attr, int quiet) { int warnings = 0, retval; cam_status status; u_int val, perc; do { CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); /* * There's really no need to do error recovery or * retries here, since we're just going to sit in a * loop and wait for the device to finish sanitizing. */ scsi_test_unit_ready(&ccb->csio, /* retries */ 0, /* cbfcnp */ NULL, /* tag_action */ task_attr, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; retval = cam_send_ccb(device, ccb); /* * If we get an error from the ioctl, bail out. SCSI * errors are expected. */ if (retval < 0) { warn("error sending TEST UNIT READY command"); return (1); } status = ccb->ccb_h.status & CAM_STATUS_MASK; if ((status == CAM_SCSI_STATUS_ERROR) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI-3 spec, a drive that is in the * middle of a sanitize should return NOT READY with an * ASC of "logical unit not ready, sanitize in * progress". The sense key specific bytes will then * be a progress indicator. */ if ((sense_key == SSD_KEY_NOT_READY) && (asc == 0x04) && (ascq == 0x1b)) { uint8_t sks[3]; if ((scsi_get_sks(sense, ccb->csio.sense_len - ccb->csio.sense_resid, sks) == 0) && (quiet == 0)) { val = scsi_2btoul(&sks[1]); perc = 10000 * val; fprintf(stdout, "Sanitizing: %u.%02u%% (%d/%d)\r", (perc / (0x10000 * 100)), ((perc / 0x10000) % 100), val, 0x10000); fflush(stdout); } else if ((quiet == 0) && (++warnings <= 1)) { warnx("Unexpected SCSI Sense Key " "Specific value returned " "during sanitize:"); scsi_sense_print(device, &ccb->csio, stderr); warnx("Unable to print status " "information, but sanitze will " "proceed."); warnx("will exit when sanitize is " "complete"); } sleep(1); } else { warnx("Unexpected SCSI error during sanitize"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); return (1); } } else if (status != CAM_REQ_CMP && status != CAM_REQUEUE_REQ) { warnx("Unexpected CAM status %#x", status); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); return (1); } } while ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP); return (0); } static int sanitize(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout) { union ccb *ccb; uint8_t action = 0; int c; int ycount = 0, quiet = 0; int error = 0; int use_timeout; int immediate = 1; int invert = 0; int passes = 0; int ause = 0; int fd = -1; const char *pattern = NULL; uint8_t *data_ptr = NULL; uint32_t dxfer_len = 0; uint8_t byte2; uint16_t feature, count; uint64_t lba; int reportonly = 0; camcontrol_devtype dt; /* * Get the device type, request no I/O be done to do this. */ error = get_device_type(device, -1, 0, 0, &dt); if (error != 0 || (unsigned)dt > CC_DT_UNKNOWN) { warnx("sanitize: can't get device type"); return (1); } ccb = cam_getccb(device); if (ccb == NULL) { warnx("sanitize: error allocating ccb"); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'a': if (strcasecmp(optarg, "overwrite") == 0) action = SSZ_SERVICE_ACTION_OVERWRITE; else if (strcasecmp(optarg, "block") == 0) action = SSZ_SERVICE_ACTION_BLOCK_ERASE; else if (strcasecmp(optarg, "crypto") == 0) action = SSZ_SERVICE_ACTION_CRYPTO_ERASE; else if (strcasecmp(optarg, "exitfailure") == 0) action = SSZ_SERVICE_ACTION_EXIT_MODE_FAILURE; else { warnx("invalid service operation \"%s\"", optarg); error = 1; goto sanitize_bailout; } break; case 'c': passes = strtol(optarg, NULL, 0); if (passes < 1 || passes > 31) { warnx("invalid passes value %d", passes); error = 1; goto sanitize_bailout; } break; case 'I': invert = 1; break; case 'P': pattern = optarg; break; case 'q': quiet++; break; case 'U': ause = 1; break; case 'r': reportonly = 1; break; case 'w': /* ATA supports only immediate commands. */ if (dt == CC_DT_SCSI) immediate = 0; break; case 'y': ycount++; break; } } if (reportonly) goto doreport; if (action == 0) { warnx("an action is required"); error = 1; goto sanitize_bailout; } else if (action == SSZ_SERVICE_ACTION_OVERWRITE) { struct scsi_sanitize_parameter_list *pl; struct stat sb; ssize_t sz, amt; if (pattern == NULL) { warnx("overwrite action requires -P argument"); error = 1; goto sanitize_bailout; } fd = open(pattern, O_RDONLY); if (fd < 0) { warn("cannot open pattern file %s", pattern); error = 1; goto sanitize_bailout; } if (fstat(fd, &sb) < 0) { warn("cannot stat pattern file %s", pattern); error = 1; goto sanitize_bailout; } sz = sb.st_size; if (sz > SSZPL_MAX_PATTERN_LENGTH) { warnx("pattern file size exceeds maximum value %d", SSZPL_MAX_PATTERN_LENGTH); error = 1; goto sanitize_bailout; } dxfer_len = sizeof(*pl) + sz; data_ptr = calloc(1, dxfer_len); if (data_ptr == NULL) { warnx("cannot allocate parameter list buffer"); error = 1; goto sanitize_bailout; } amt = read(fd, data_ptr + sizeof(*pl), sz); if (amt < 0) { warn("cannot read pattern file"); error = 1; goto sanitize_bailout; } else if (amt != sz) { warnx("short pattern file read"); error = 1; goto sanitize_bailout; } pl = (struct scsi_sanitize_parameter_list *)data_ptr; if (passes == 0) pl->byte1 = 1; else pl->byte1 = passes; if (invert != 0) pl->byte1 |= SSZPL_INVERT; scsi_ulto2b(sz, pl->length); } else { const char *arg; if (passes != 0) arg = "-c"; else if (invert != 0) arg = "-I"; else if (pattern != NULL) arg = "-P"; else arg = NULL; if (arg != NULL) { warnx("%s argument only valid with overwrite " "operation", arg); error = 1; goto sanitize_bailout; } } if (quiet == 0 && ycount == 0) { fprintf(stdout, "You are about to REMOVE ALL DATA from the " "following device:\n"); if (dt == CC_DT_SCSI) { error = scsidoinquiry(device, argc, argv, combinedopt, task_attr, retry_count, timeout); } else if (dt == CC_DT_ATA || dt == CC_DT_SATL) { struct ata_params *ident_buf; error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); if (error == 0) { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); free(ident_buf); } } else error = 1; if (error != 0) { warnx("sanitize: error sending inquiry"); goto sanitize_bailout; } } if (ycount == 0) { if (!get_confirmation()) { error = 1; goto sanitize_bailout; } } if (timeout != 0) use_timeout = timeout; else use_timeout = (immediate ? 10 : 10800) * 1000; if (immediate == 0 && quiet == 0) { fprintf(stdout, "Current sanitize timeout is %d seconds\n", use_timeout / 1000); } /* * If the user hasn't disabled questions and didn't specify a * timeout on the command line, ask them if they want the current * timeout. */ if (immediate == 0 && ycount == 0 && timeout == 0) { char str[1024]; int new_timeout = 0; fprintf(stdout, "Enter new timeout in seconds or press\n" "return to keep the current timeout [%d] ", use_timeout / 1000); if (fgets(str, sizeof(str), stdin) != NULL) { if (str[0] != '\0') new_timeout = atoi(str); } if (new_timeout != 0) { use_timeout = new_timeout * 1000; fprintf(stdout, "Using new timeout value %d\n", use_timeout / 1000); } } if (dt == CC_DT_SCSI) { byte2 = action; if (ause != 0) byte2 |= SSZ_UNRESTRICTED_EXIT; if (immediate != 0) byte2 |= SSZ_IMMED; scsi_sanitize(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ task_attr, /* byte2 */ byte2, /* control */ 0, /* data_ptr */ data_ptr, /* dxfer_len */ dxfer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ use_timeout); ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending sanitize command"); error = 1; goto sanitize_bailout; } } else if (dt == CC_DT_ATA || dt == CC_DT_SATL) { if (action == SSZ_SERVICE_ACTION_OVERWRITE) { feature = 0x14; /* OVERWRITE EXT */ lba = 0x4F5700000000 | scsi_4btoul(data_ptr + 4); count = (passes == 0) ? 1 : (passes >= 16) ? 0 : passes; if (invert) count |= 0x80; /* INVERT PATTERN */ if (ause) count |= 0x10; /* FAILURE MODE */ } else if (action == SSZ_SERVICE_ACTION_BLOCK_ERASE) { feature = 0x12; /* BLOCK ERASE EXT */ lba = 0x0000426B4572; count = 0; if (ause) count |= 0x10; /* FAILURE MODE */ } else if (action == SSZ_SERVICE_ACTION_CRYPTO_ERASE) { feature = 0x11; /* CRYPTO SCRAMBLE EXT */ lba = 0x000043727970; count = 0; if (ause) count |= 0x10; /* FAILURE MODE */ } else if (action == SSZ_SERVICE_ACTION_EXIT_MODE_FAILURE) { feature = 0x00; /* SANITIZE STATUS EXT */ lba = 0; count = 1; /* CLEAR SANITIZE OPERATION FAILED */ } else { error = 1; goto sanitize_bailout; } error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA | AP_EXTEND, /*ata_flags*/0, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SANITIZE, /*features*/feature, /*lba*/lba, /*sector_count*/count, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/ use_timeout, /*is48bit*/1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) { sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); if (sense_key == SSD_KEY_ILLEGAL_REQUEST && asc == 0x20 && ascq == 0x00) warnx("sanitize is not supported by " "this device"); else warnx("error sanitizing this device"); } else warnx("error sanitizing this device"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto sanitize_bailout; } /* * If we ran in non-immediate mode, we already checked for errors * above and printed out any necessary information. If we're in * immediate mode, we need to loop through and get status * information periodically. */ if (immediate == 0) { if (quiet == 0) { fprintf(stdout, "Sanitize Complete\n"); } goto sanitize_bailout; } doreport: if (dt == CC_DT_SCSI) { error = sanitize_wait_scsi(device, ccb, task_attr, quiet); } else if (dt == CC_DT_ATA || dt == CC_DT_SATL) { error = sanitize_wait_ata(device, ccb, quiet, dt); } else error = 1; if (error == 0 && quiet == 0) fprintf(stdout, "Sanitize Complete \n"); sanitize_bailout: if (fd >= 0) close(fd); if (data_ptr != NULL) free(data_ptr); cam_freeccb(ccb); return (error); } static int scsireportluns(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout) { union ccb *ccb; int c, countonly, lunsonly; struct scsi_report_luns_data *lundata; int alloc_len; uint8_t report_type; uint32_t list_len, i, j; int retval; retval = 0; lundata = NULL; report_type = RPL_REPORT_DEFAULT; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } countonly = 0; lunsonly = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'c': countonly++; break; case 'l': lunsonly++; break; case 'r': if (strcasecmp(optarg, "default") == 0) report_type = RPL_REPORT_DEFAULT; else if (strcasecmp(optarg, "wellknown") == 0) report_type = RPL_REPORT_WELLKNOWN; else if (strcasecmp(optarg, "all") == 0) report_type = RPL_REPORT_ALL; else { warnx("%s: invalid report type \"%s\"", __func__, optarg); retval = 1; goto bailout; } break; default: break; } } if ((countonly != 0) && (lunsonly != 0)) { warnx("%s: you can only specify one of -c or -l", __func__); retval = 1; goto bailout; } /* * According to SPC-4, the allocation length must be at least 16 * bytes -- enough for the header and one LUN. */ alloc_len = sizeof(*lundata) + 8; retry: lundata = malloc(alloc_len); if (lundata == NULL) { warn("%s: error mallocing %d bytes", __func__, alloc_len); retval = 1; goto bailout; } scsi_report_luns(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ task_attr, /*select_report*/ report_type, /*rpl_buf*/ lundata, /*alloc_len*/ alloc_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending REPORT LUNS command"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } list_len = scsi_4btoul(lundata->length); /* * If we need to list the LUNs, and our allocation * length was too short, reallocate and retry. */ if ((countonly == 0) && (list_len > (alloc_len - sizeof(*lundata)))) { alloc_len = list_len + sizeof(*lundata); free(lundata); goto retry; } if (lunsonly == 0) fprintf(stdout, "%u LUN%s found\n", list_len / 8, ((list_len / 8) > 1) ? "s" : ""); if (countonly != 0) goto bailout; for (i = 0; i < (list_len / 8); i++) { int no_more; no_more = 0; for (j = 0; j < sizeof(lundata->luns[i].lundata); j += 2) { if (j != 0) fprintf(stdout, ","); switch (lundata->luns[i].lundata[j] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: if ((lundata->luns[i].lundata[j] & RPL_LUNDATA_PERIPH_BUS_MASK) != 0) fprintf(stdout, "%d:", lundata->luns[i].lundata[j] & RPL_LUNDATA_PERIPH_BUS_MASK); else if ((j == 0) && ((lundata->luns[i].lundata[j+2] & RPL_LUNDATA_PERIPH_BUS_MASK) == 0)) no_more = 1; fprintf(stdout, "%d", lundata->luns[i].lundata[j+1]); break; case RPL_LUNDATA_ATYP_FLAT: { uint8_t tmplun[2]; tmplun[0] = lundata->luns[i].lundata[j] & RPL_LUNDATA_FLAT_LUN_MASK; tmplun[1] = lundata->luns[i].lundata[j+1]; fprintf(stdout, "%d", scsi_2btoul(tmplun)); no_more = 1; break; } case RPL_LUNDATA_ATYP_LUN: fprintf(stdout, "%d:%d:%d", (lundata->luns[i].lundata[j+1] & RPL_LUNDATA_LUN_BUS_MASK) >> 5, lundata->luns[i].lundata[j] & RPL_LUNDATA_LUN_TARG_MASK, lundata->luns[i].lundata[j+1] & RPL_LUNDATA_LUN_LUN_MASK); break; case RPL_LUNDATA_ATYP_EXTLUN: { int field_len_code, eam_code; eam_code = lundata->luns[i].lundata[j] & RPL_LUNDATA_EXT_EAM_MASK; field_len_code = (lundata->luns[i].lundata[j] & RPL_LUNDATA_EXT_LEN_MASK) >> 4; if ((eam_code == RPL_LUNDATA_EXT_EAM_WK) && (field_len_code == 0x00)) { fprintf(stdout, "%d", lundata->luns[i].lundata[j+1]); } else if ((eam_code == RPL_LUNDATA_EXT_EAM_NOT_SPEC) && (field_len_code == 0x03)) { uint8_t tmp_lun[8]; /* * This format takes up all 8 bytes. * If we aren't starting at offset 0, * that's a bug. */ if (j != 0) { fprintf(stdout, "Invalid " "offset %d for " "Extended LUN not " "specified format", j); no_more = 1; break; } bzero(tmp_lun, sizeof(tmp_lun)); bcopy(&lundata->luns[i].lundata[j+1], &tmp_lun[1], sizeof(tmp_lun) - 1); fprintf(stdout, "%#jx", (intmax_t)scsi_8btou64(tmp_lun)); no_more = 1; } else { fprintf(stderr, "Unknown Extended LUN" "Address method %#x, length " "code %#x", eam_code, field_len_code); no_more = 1; } break; } default: fprintf(stderr, "Unknown LUN address method " "%#x\n", lundata->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK); break; } /* * For the flat addressing method, there are no * other levels after it. */ if (no_more != 0) break; } fprintf(stdout, "\n"); } bailout: cam_freeccb(ccb); free(lundata); return (retval); } static int scsireadcapacity(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout) { union ccb *ccb; int blocksizeonly, humanize, numblocks, quiet, sizeonly, baseten, longonly; struct scsi_read_capacity_data rcap; struct scsi_read_capacity_data_long rcaplong; uint64_t maxsector; uint32_t block_len; int retval; int c; blocksizeonly = 0; humanize = 0; longonly = 0; numblocks = 0; quiet = 0; sizeonly = 0; baseten = 0; retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': blocksizeonly++; break; case 'h': humanize++; baseten = 0; break; case 'H': humanize++; baseten++; break; case 'l': longonly++; break; case 'N': numblocks++; break; case 'q': quiet++; break; case 's': sizeonly++; break; default: break; } } if ((blocksizeonly != 0) && (numblocks != 0)) { warnx("%s: you can only specify one of -b or -N", __func__); retval = 1; goto bailout; } if ((blocksizeonly != 0) && (sizeonly != 0)) { warnx("%s: you can only specify one of -b or -s", __func__); retval = 1; goto bailout; } if ((humanize != 0) && (quiet != 0)) { warnx("%s: you can only specify one of -h/-H or -q", __func__); retval = 1; goto bailout; } if ((humanize != 0) && (blocksizeonly != 0)) { warnx("%s: you can only specify one of -h/-H or -b", __func__); retval = 1; goto bailout; } if (longonly != 0) goto long_only; scsi_read_capacity(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ task_attr, &rcap, SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending READ CAPACITY command"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } maxsector = scsi_4btoul(rcap.addr); block_len = scsi_4btoul(rcap.length); /* * A last block of 2^32-1 means that the true capacity is over 2TB, * and we need to issue the long READ CAPACITY to get the real * capacity. Otherwise, we're all set. */ if (maxsector != 0xffffffff) goto do_print; long_only: scsi_read_capacity_16(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ task_attr, /*lba*/ 0, /*reladdr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)&rcaplong, /*rcap_buf_len*/ sizeof(rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending READ CAPACITY (16) command"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } maxsector = scsi_8btou64(rcaplong.addr); block_len = scsi_4btoul(rcaplong.length); do_print: if (blocksizeonly == 0) { /* * Humanize implies !quiet, and also implies numblocks. */ if (humanize != 0) { char tmpstr[6]; int64_t tmpbytes; int ret; tmpbytes = (maxsector + 1) * block_len; ret = humanize_number(tmpstr, sizeof(tmpstr), tmpbytes, "", HN_AUTOSCALE, HN_B | HN_DECIMAL | ((baseten != 0) ? HN_DIVISOR_1000 : 0)); if (ret == -1) { warnx("%s: humanize_number failed!", __func__); retval = 1; goto bailout; } fprintf(stdout, "Device Size: %s%s", tmpstr, (sizeonly == 0) ? ", " : "\n"); } else if (numblocks != 0) { fprintf(stdout, "%s%ju%s", (quiet == 0) ? "Blocks: " : "", (uintmax_t)maxsector + 1, (sizeonly == 0) ? ", " : "\n"); } else { fprintf(stdout, "%s%ju%s", (quiet == 0) ? "Last Block: " : "", (uintmax_t)maxsector, (sizeonly == 0) ? ", " : "\n"); } } if (sizeonly == 0) fprintf(stdout, "%s%u%s\n", (quiet == 0) ? "Block Length: " : "", block_len, (quiet == 0) ? " bytes" : ""); bailout: cam_freeccb(ccb); return (retval); } static int smpcmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c, error = 0; union ccb *ccb; uint8_t *smp_request = NULL, *smp_response = NULL; int request_size = 0, response_size = 0; int fd_request = 0, fd_response = 0; char *datastr = NULL; struct get_hook hook; int retval; int flags = 0; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'R': arglist |= CAM_ARG_CMD_IN; response_size = strtol(optarg, NULL, 0); if (response_size <= 0) { warnx("invalid number of response bytes %d", response_size); error = 1; goto smpcmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; optind++; datastr = cget(&hook, NULL); /* * If the user supplied "-" instead of a format, he * wants the data to be written to stdout. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_response = 1; smp_response = (uint8_t *)malloc(response_size); if (smp_response == NULL) { warn("can't malloc memory for SMP response"); error = 1; goto smpcmd_bailout; } break; case 'r': arglist |= CAM_ARG_CMD_OUT; request_size = strtol(optarg, NULL, 0); if (request_size <= 0) { warnx("invalid number of request bytes %d", request_size); error = 1; goto smpcmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; datastr = cget(&hook, NULL); smp_request = (uint8_t *)malloc(request_size); if (smp_request == NULL) { warn("can't malloc memory for SMP request"); error = 1; goto smpcmd_bailout; } bzero(smp_request, request_size); /* * If the user supplied "-" instead of a format, he * wants the data to be read from stdin. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_request = 1; else buff_encode_visit(smp_request, request_size, datastr, iget, &hook); optind += hook.got; break; default: break; } } /* * If fd_data is set, and we're writing to the device, we need to * read the data the user wants written from stdin. */ if ((fd_request == 1) && (arglist & CAM_ARG_CMD_OUT)) { ssize_t amt_read; int amt_to_read = request_size; uint8_t *buf_ptr = smp_request; for (amt_read = 0; amt_to_read > 0; amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) { if (amt_read == -1) { warn("error reading data from stdin"); error = 1; goto smpcmd_bailout; } amt_to_read -= amt_read; buf_ptr += amt_read; } } if (((arglist & CAM_ARG_CMD_IN) == 0) || ((arglist & CAM_ARG_CMD_OUT) == 0)) { warnx("%s: need both the request (-r) and response (-R) " "arguments", __func__); error = 1; goto smpcmd_bailout; } flags |= CAM_DEV_QFRZDIS; cam_fill_smpio(&ccb->smpio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*smp_request*/ smp_request, /*smp_request_len*/ request_size, /*smp_response*/ smp_response, /*smp_response_len*/ response_size, /*timeout*/ timeout ? timeout : 5000); ccb->smpio.flags = SMP_FLAG_NONE; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (response_size > 0)) { if (fd_response == 0) { buff_decode_visit(smp_response, response_size, datastr, arg_put, NULL); fprintf(stdout, "\n"); } else { ssize_t amt_written; int amt_to_write = response_size; uint8_t *buf_ptr = smp_response; for (amt_written = 0; (amt_to_write > 0) && (amt_written = write(STDOUT_FILENO, buf_ptr, amt_to_write)) > 0;){ amt_to_write -= amt_written; buf_ptr += amt_written; } if (amt_written == -1) { warn("error writing data to stdout"); error = 1; goto smpcmd_bailout; } else if ((amt_written == 0) && (amt_to_write > 0)) { warnx("only wrote %u bytes out of %u", response_size - amt_to_write, response_size); } } } smpcmd_bailout: if (ccb != NULL) cam_freeccb(ccb); if (smp_request != NULL) free(smp_request); if (smp_response != NULL) free(smp_response); return (error); } static int mmcsdcmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c, error = 0; union ccb *ccb; int32_t mmc_opcode = 0, mmc_arg = 0; int32_t mmc_flags = -1; int retval; int is_write = 0; int is_bw_4 = 0, is_bw_1 = 0; int is_frequency = 0; int is_highspeed = 0, is_stdspeed = 0; int is_info_request = 0; int flags = 0; uint8_t mmc_data_byte = 0; uint32_t mmc_frequency = 0; /* For IO_RW_EXTENDED command */ uint8_t *mmc_data = NULL; struct mmc_data mmc_d; int mmc_data_len = 0; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case '4': is_bw_4 = 1; break; case '1': is_bw_1 = 1; break; case 'S': if (!strcmp(optarg, "high")) is_highspeed = 1; else is_stdspeed = 1; break; case 'I': is_info_request = 1; break; case 'F': is_frequency = 1; mmc_frequency = strtol(optarg, NULL, 0); break; case 'c': mmc_opcode = strtol(optarg, NULL, 0); if (mmc_opcode < 0) { warnx("invalid MMC opcode %d", mmc_opcode); error = 1; goto mmccmd_bailout; } break; case 'a': mmc_arg = strtol(optarg, NULL, 0); if (mmc_arg < 0) { warnx("invalid MMC arg %d", mmc_arg); error = 1; goto mmccmd_bailout; } break; case 'f': mmc_flags = strtol(optarg, NULL, 0); if (mmc_flags < 0) { warnx("invalid MMC flags %d", mmc_flags); error = 1; goto mmccmd_bailout; } break; case 'l': mmc_data_len = strtol(optarg, NULL, 0); if (mmc_data_len <= 0) { warnx("invalid MMC data len %d", mmc_data_len); error = 1; goto mmccmd_bailout; } break; case 'W': is_write = 1; break; case 'b': mmc_data_byte = strtol(optarg, NULL, 0); break; default: break; } } flags |= CAM_DEV_QFRZDIS; /* masks are broken?! */ /* If flags are left default, supply the right flags */ if (mmc_flags < 0) switch (mmc_opcode) { case MMC_GO_IDLE_STATE: mmc_flags = MMC_RSP_NONE | MMC_CMD_BC; break; case IO_SEND_OP_COND: mmc_flags = MMC_RSP_R4; break; case SD_SEND_RELATIVE_ADDR: mmc_flags = MMC_RSP_R6 | MMC_CMD_BCR; break; case MMC_SELECT_CARD: mmc_flags = MMC_RSP_R1B | MMC_CMD_AC; mmc_arg = mmc_arg << 16; break; case SD_IO_RW_DIRECT: mmc_flags = MMC_RSP_R5 | MMC_CMD_AC; mmc_arg = SD_IO_RW_ADR(mmc_arg); if (is_write) mmc_arg |= SD_IO_RW_WR | SD_IO_RW_RAW | SD_IO_RW_DAT(mmc_data_byte); break; case SD_IO_RW_EXTENDED: mmc_flags = MMC_RSP_R5 | MMC_CMD_ADTC; mmc_arg = SD_IO_RW_ADR(mmc_arg); int len_arg = mmc_data_len; if (mmc_data_len == 512) len_arg = 0; // Byte mode mmc_arg |= SD_IOE_RW_LEN(len_arg) | SD_IO_RW_INCR; // Block mode // mmc_arg |= SD_IOE_RW_BLK | SD_IOE_RW_LEN(len_arg) | SD_IO_RW_INCR; break; default: mmc_flags = MMC_RSP_R1; break; } // Switch bus width instead of sending IO command if (is_bw_4 || is_bw_1) { struct ccb_trans_settings_mmc *cts; ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; ccb->ccb_h.flags = 0; cts = &ccb->cts.proto_specific.mmc; cts->ios.bus_width = is_bw_4 == 1 ? bus_width_4 : bus_width_1; cts->ios_valid = MMC_BW; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { warn("Error sending command"); } else { printf("Parameters set OK\n"); } cam_freeccb(ccb); return (retval); } if (is_frequency) { struct ccb_trans_settings_mmc *cts; ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; ccb->ccb_h.flags = 0; cts = &ccb->cts.proto_specific.mmc; cts->ios.clock = mmc_frequency; cts->ios_valid = MMC_CLK; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { warn("Error sending command"); } else { printf("Parameters set OK\n"); } cam_freeccb(ccb); return (retval); } // Switch bus speed instead of sending IO command if (is_stdspeed || is_highspeed) { struct ccb_trans_settings_mmc *cts; ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; ccb->ccb_h.flags = 0; cts = &ccb->cts.proto_specific.mmc; cts->ios.timing = is_highspeed == 1 ? bus_timing_hs : bus_timing_normal; cts->ios_valid = MMC_BT; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { warn("Error sending command"); } else { printf("Speed set OK (HS: %d)\n", is_highspeed); } cam_freeccb(ccb); return (retval); } // Get information about controller and its settings if (is_info_request) { ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; ccb->ccb_h.flags = 0; struct ccb_trans_settings_mmc *cts; cts = &ccb->cts.proto_specific.mmc; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { warn("Error sending command"); return (retval); } printf("Host controller information\n"); printf("Host OCR: 0x%x\n", cts->host_ocr); printf("Min frequency: %u KHz\n", cts->host_f_min / 1000); printf("Max frequency: %u MHz\n", cts->host_f_max / 1000000); printf("Supported bus width:\n"); if (cts->host_caps & MMC_CAP_4_BIT_DATA) printf(" 4 bit\n"); if (cts->host_caps & MMC_CAP_8_BIT_DATA) printf(" 8 bit\n"); printf("Supported operating modes:\n"); if (cts->host_caps & MMC_CAP_HSPEED) printf(" Can do High Speed transfers\n"); if (cts->host_caps & MMC_CAP_UHS_SDR12) printf(" Can do UHS SDR12\n"); if (cts->host_caps & MMC_CAP_UHS_SDR25) printf(" Can do UHS SDR25\n"); if (cts->host_caps & MMC_CAP_UHS_SDR50) printf(" Can do UHS SDR50\n"); if (cts->host_caps & MMC_CAP_UHS_SDR104) printf(" Can do UHS SDR104\n"); if (cts->host_caps & MMC_CAP_UHS_DDR50) printf(" Can do UHS DDR50\n"); if (cts->host_caps & MMC_CAP_MMC_DDR52_120) printf(" Can do eMMC DDR52 at 1.2V\n"); if (cts->host_caps & MMC_CAP_MMC_DDR52_180) printf(" Can do eMMC DDR52 at 1.8V\n"); if (cts->host_caps & MMC_CAP_MMC_HS200_120) printf(" Can do eMMC HS200 at 1.2V\n"); if (cts->host_caps & MMC_CAP_MMC_HS200_180) printf(" Can do eMMC HS200 at 1.8V\n"); if (cts->host_caps & MMC_CAP_MMC_HS400_120) printf(" Can do eMMC HS400 at 1.2V\n"); if (cts->host_caps & MMC_CAP_MMC_HS400_180) printf(" Can do eMMC HS400 at 1.8V\n"); printf("Supported VCCQ voltages:\n"); if (cts->host_caps & MMC_CAP_SIGNALING_120) printf(" 1.2V\n"); if (cts->host_caps & MMC_CAP_SIGNALING_180) printf(" 1.8V\n"); if (cts->host_caps & MMC_CAP_SIGNALING_330) printf(" 3.3V\n"); printf("Current settings:\n"); printf(" Bus width: "); switch (cts->ios.bus_width) { case bus_width_1: printf("1 bit\n"); break; case bus_width_4: printf("4 bit\n"); break; case bus_width_8: printf("8 bit\n"); break; } printf(" Freq: %d.%03d MHz%s\n", cts->ios.clock / 1000000, (cts->ios.clock / 1000) % 1000, cts->ios.timing == bus_timing_hs ? " (high-speed timing)" : ""); printf(" VCCQ: "); switch (cts->ios.vccq) { case vccq_330: printf("3.3V\n"); break; case vccq_180: printf("1.8V\n"); break; case vccq_120: printf("1.2V\n"); break; } return (0); } printf("CMD %d arg %d flags %02x\n", mmc_opcode, mmc_arg, mmc_flags); if (mmc_data_len > 0) { flags |= CAM_DIR_IN; mmc_data = malloc(mmc_data_len); memset(mmc_data, 0, mmc_data_len); memset(&mmc_d, 0, sizeof(mmc_d)); mmc_d.len = mmc_data_len; mmc_d.data = mmc_data; mmc_d.flags = MMC_DATA_READ; } else flags |= CAM_DIR_NONE; cam_fill_mmcio(&ccb->mmcio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*mmc_opcode*/ mmc_opcode, /*mmc_arg*/ mmc_arg, /*mmc_flags*/ mmc_flags, /*mmc_data*/ mmc_data_len > 0 ? &mmc_d : NULL, /*timeout*/ timeout ? timeout : 5000); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)) { printf("MMCIO: error %d, %08x %08x %08x %08x\n", ccb->mmcio.cmd.error, ccb->mmcio.cmd.resp[0], ccb->mmcio.cmd.resp[1], ccb->mmcio.cmd.resp[2], ccb->mmcio.cmd.resp[3]); switch (mmc_opcode) { case SD_IO_RW_DIRECT: printf("IO_RW_DIRECT: resp byte %02x, cur state %d\n", SD_R5_DATA(ccb->mmcio.cmd.resp), (ccb->mmcio.cmd.resp[0] >> 12) & 0x3); break; case SD_IO_RW_EXTENDED: printf("IO_RW_EXTENDED: read %d bytes w/o error:\n", mmc_data_len); hexdump(mmc_data, mmc_data_len, NULL, 0); break; case SD_SEND_RELATIVE_ADDR: printf("SEND_RELATIVE_ADDR: published RCA %02x\n", ccb->mmcio.cmd.resp[0] >> 16); break; default: printf("No command-specific decoder for CMD %d\n", mmc_opcode); if (mmc_data_len > 0) hexdump(mmc_data, mmc_data_len, NULL, 0); } } mmccmd_bailout: if (ccb != NULL) cam_freeccb(ccb); if (mmc_data_len > 0 && mmc_data != NULL) free(mmc_data); return (error); } static int smpreportgeneral(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_report_general_request *request = NULL; struct smp_report_general_response *response = NULL; struct sbuf *sb = NULL; int error = 0; int c, long_response = 0; int retval; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; default: break; } } request = malloc(sizeof(*request)); if (request == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); error = 1; goto bailout; } response = malloc(sizeof(*response)); if (response == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*response)); error = 1; goto bailout; } try_long: smp_report_general(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, request, /*request_len*/ sizeof(*request), (uint8_t *)response, /*response_len*/ sizeof(*response), /*long_response*/ long_response, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto bailout; } /* * If the device supports the long response bit, try again and see * if we can get all of the data. */ if ((response->long_response & SMP_RG_LONG_RESPONSE) && (long_response == 0)) { ccb->ccb_h.status = CAM_REQ_INPROG; CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->smpio); long_response = 1; goto try_long; } /* * XXX KDM detect and decode SMP errors here. */ sb = sbuf_new_auto(); if (sb == NULL) { warnx("%s: error allocating sbuf", __func__); goto bailout; } smp_report_general_sbuf(response, sizeof(*response), sb); if (sbuf_finish(sb) != 0) { warnx("%s: sbuf_finish", __func__); goto bailout; } printf("%s", sbuf_data(sb)); bailout: if (ccb != NULL) cam_freeccb(ccb); if (request != NULL) free(request); if (response != NULL) free(response); if (sb != NULL) sbuf_delete(sb); return (error); } static struct camcontrol_opts phy_ops[] = { {"nop", SMP_PC_PHY_OP_NOP, CAM_ARG_NONE, NULL}, {"linkreset", SMP_PC_PHY_OP_LINK_RESET, CAM_ARG_NONE, NULL}, {"hardreset", SMP_PC_PHY_OP_HARD_RESET, CAM_ARG_NONE, NULL}, {"disable", SMP_PC_PHY_OP_DISABLE, CAM_ARG_NONE, NULL}, {"clearerrlog", SMP_PC_PHY_OP_CLEAR_ERR_LOG, CAM_ARG_NONE, NULL}, {"clearaffiliation", SMP_PC_PHY_OP_CLEAR_AFFILIATON, CAM_ARG_NONE,NULL}, {"sataportsel", SMP_PC_PHY_OP_TRANS_SATA_PSS, CAM_ARG_NONE, NULL}, {"clearitnl", SMP_PC_PHY_OP_CLEAR_STP_ITN_LS, CAM_ARG_NONE, NULL}, {"setdevname", SMP_PC_PHY_OP_SET_ATT_DEV_NAME, CAM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; static int smpphycontrol(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_phy_control_request *request = NULL; struct smp_phy_control_response *response = NULL; int long_response = 0; int retval = 0; int phy = -1; uint32_t phy_operation = SMP_PC_PHY_OP_NOP; int phy_op_set = 0; uint64_t attached_dev_name = 0; int dev_name_set = 0; uint32_t min_plr = 0, max_plr = 0; uint32_t pp_timeout_val = 0; int slumber_partial = 0; int set_pp_timeout_val = 0; int c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': case 'A': case 's': case 'S': { int enable = -1; if (strcasecmp(optarg, "enable") == 0) enable = 1; else if (strcasecmp(optarg, "disable") == 0) enable = 2; else { warnx("%s: Invalid argument %s", __func__, optarg); retval = 1; goto bailout; } switch (c) { case 's': slumber_partial |= enable << SMP_PC_SAS_SLUMBER_SHIFT; break; case 'S': slumber_partial |= enable << SMP_PC_SAS_PARTIAL_SHIFT; break; case 'a': slumber_partial |= enable << SMP_PC_SATA_SLUMBER_SHIFT; break; case 'A': slumber_partial |= enable << SMP_PC_SATA_PARTIAL_SHIFT; break; default: warnx("%s: programmer error", __func__); retval = 1; goto bailout; break; /*NOTREACHED*/ } break; } case 'd': attached_dev_name = (uintmax_t)strtoumax(optarg, NULL,0); dev_name_set = 1; break; case 'l': long_response = 1; break; case 'm': /* * We don't do extensive checking here, so this * will continue to work when new speeds come out. */ min_plr = strtoul(optarg, NULL, 0); if ((min_plr == 0) || (min_plr > 0xf)) { warnx("%s: invalid link rate %x", __func__, min_plr); retval = 1; goto bailout; } break; case 'M': /* * We don't do extensive checking here, so this * will continue to work when new speeds come out. */ max_plr = strtoul(optarg, NULL, 0); if ((max_plr == 0) || (max_plr > 0xf)) { warnx("%s: invalid link rate %x", __func__, max_plr); retval = 1; goto bailout; } break; case 'o': { camcontrol_optret optreturn; cam_argmask argnums; const char *subopt; if (phy_op_set != 0) { warnx("%s: only one phy operation argument " "(-o) allowed", __func__); retval = 1; goto bailout; } phy_op_set = 1; /* * Allow the user to specify the phy operation * numerically, as well as with a name. This will * future-proof it a bit, so options that are added * in future specs can be used. */ if (isdigit(optarg[0])) { phy_operation = strtoul(optarg, NULL, 0); if ((phy_operation == 0) || (phy_operation > 0xff)) { warnx("%s: invalid phy operation %#x", __func__, phy_operation); retval = 1; goto bailout; } break; } optreturn = getoption(phy_ops, optarg, &phy_operation, &argnums, &subopt); if (optreturn == CC_OR_AMBIGUOUS) { warnx("%s: ambiguous option %s", __func__, optarg); usage(0); retval = 1; goto bailout; } else if (optreturn == CC_OR_NOT_FOUND) { warnx("%s: option %s not found", __func__, optarg); usage(0); retval = 1; goto bailout; } break; } case 'p': phy = atoi(optarg); break; case 'T': pp_timeout_val = strtoul(optarg, NULL, 0); if (pp_timeout_val > 15) { warnx("%s: invalid partial pathway timeout " "value %u, need a value less than 16", __func__, pp_timeout_val); retval = 1; goto bailout; } set_pp_timeout_val = 1; break; default: break; } } if (phy == -1) { warnx("%s: a PHY (-p phy) argument is required",__func__); retval = 1; goto bailout; } if (((dev_name_set != 0) && (phy_operation != SMP_PC_PHY_OP_SET_ATT_DEV_NAME)) || ((phy_operation == SMP_PC_PHY_OP_SET_ATT_DEV_NAME) && (dev_name_set == 0))) { warnx("%s: -d name and -o setdevname arguments both " "required to set device name", __func__); retval = 1; goto bailout; } request = malloc(sizeof(*request)); if (request == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); retval = 1; goto bailout; } response = malloc(sizeof(*response)); if (response == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*response)); retval = 1; goto bailout; } smp_phy_control(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, request, sizeof(*request), (uint8_t *)response, sizeof(*response), long_response, /*expected_exp_change_count*/ 0, phy, phy_operation, (set_pp_timeout_val != 0) ? 1 : 0, attached_dev_name, min_plr, max_plr, slumber_partial, pp_timeout_val, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { /* * Use CAM_EPF_NORMAL so we only get one line of * SMP command decoding. */ cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_NORMAL, stderr); } retval = 1; goto bailout; } /* XXX KDM print out something here for success? */ bailout: if (ccb != NULL) cam_freeccb(ccb); if (request != NULL) free(request); if (response != NULL) free(response); return (retval); } static int smpmaninfo(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_report_manuf_info_request request; struct smp_report_manuf_info_response response; struct sbuf *sb = NULL; int long_response = 0; int retval = 0; int c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; default: break; } } bzero(&request, sizeof(request)); bzero(&response, sizeof(response)); smp_report_manuf_info(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, &request, sizeof(request), (uint8_t *)&response, sizeof(response), long_response, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } sb = sbuf_new_auto(); if (sb == NULL) { warnx("%s: error allocating sbuf", __func__); goto bailout; } smp_report_manuf_info_sbuf(&response, sizeof(response), sb); if (sbuf_finish(sb) != 0) { warnx("%s: sbuf_finish", __func__); goto bailout; } printf("%s", sbuf_data(sb)); bailout: if (ccb != NULL) cam_freeccb(ccb); if (sb != NULL) sbuf_delete(sb); return (retval); } static int getdevid(struct cam_devitem *item) { int retval = 0; union ccb *ccb = NULL; struct cam_device *dev; dev = cam_open_btl(item->dev_match.path_id, item->dev_match.target_id, item->dev_match.target_lun, O_RDWR, NULL); if (dev == NULL) { warnx("%s", cam_errbuf); retval = 1; goto bailout; } item->device_id_len = 0; ccb = cam_getccb(dev); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); retval = 1; goto bailout; } /* * On the first try, we just probe for the size of the data, and * then allocate that much memory and try again. */ retry: ccb->ccb_h.func_code = XPT_DEV_ADVINFO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->cdai.flags = CDAI_FLAG_NONE; ccb->cdai.buftype = CDAI_TYPE_SCSI_DEVID; ccb->cdai.bufsiz = item->device_id_len; if (item->device_id_len != 0) ccb->cdai.buf = (uint8_t *)item->device_id; if (cam_send_ccb(dev, ccb) < 0) { warn("%s: error sending XPT_GDEV_ADVINFO CCB", __func__); retval = 1; goto bailout; } if (ccb->ccb_h.status != CAM_REQ_CMP) { warnx("%s: CAM status %#x", __func__, ccb->ccb_h.status); retval = 1; goto bailout; } if (item->device_id_len == 0) { /* * This is our first time through. Allocate the buffer, * and then go back to get the data. */ if (ccb->cdai.provsiz == 0) { warnx("%s: invalid .provsiz field returned with " "XPT_GDEV_ADVINFO CCB", __func__); retval = 1; goto bailout; } item->device_id_len = ccb->cdai.provsiz; item->device_id = malloc(item->device_id_len); if (item->device_id == NULL) { warn("%s: unable to allocate %d bytes", __func__, item->device_id_len); retval = 1; goto bailout; } ccb->ccb_h.status = CAM_REQ_INPROG; goto retry; } bailout: if (dev != NULL) cam_close_device(dev); if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * XXX KDM merge this code with getdevtree()? */ static int buildbusdevlist(struct cam_devlist *devlist) { union ccb ccb; int bufsize, fd = -1; struct dev_match_pattern *patterns; struct cam_devitem *item = NULL; int skip_device = 0; int retval = 0; if ((fd = open(XPT_DEVICE, O_RDWR)) == -1) { warn("couldn't open %s", XPT_DEVICE); return (1); } bzero(&ccb, sizeof(union ccb)); ccb.ccb_h.path_id = CAM_XPT_PATH_ID; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.ccb_h.func_code = XPT_DEV_MATCH; bufsize = sizeof(struct dev_match_result) * 100; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = (struct dev_match_result *)malloc(bufsize); if (ccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); close(fd); return (1); } ccb.cdm.num_matches = 0; ccb.cdm.num_patterns = 2; ccb.cdm.pattern_buf_len = sizeof(struct dev_match_pattern) * ccb.cdm.num_patterns; patterns = (struct dev_match_pattern *)malloc(ccb.cdm.pattern_buf_len); if (patterns == NULL) { warnx("can't malloc memory for patterns"); retval = 1; goto bailout; } ccb.cdm.patterns = patterns; bzero(patterns, ccb.cdm.pattern_buf_len); patterns[0].type = DEV_MATCH_DEVICE; patterns[0].pattern.device_pattern.flags = DEV_MATCH_PATH; patterns[0].pattern.device_pattern.path_id = devlist->path_id; patterns[1].type = DEV_MATCH_PERIPH; patterns[1].pattern.periph_pattern.flags = PERIPH_MATCH_PATH; patterns[1].pattern.periph_pattern.path_id = devlist->path_id; /* * We do the ioctl multiple times if necessary, in case there are * more than 100 nodes in the EDT. */ do { unsigned int i; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("error sending CAMIOCOMMAND ioctl"); retval = 1; goto bailout; } if ((ccb.ccb_h.status != CAM_REQ_CMP) || ((ccb.cdm.status != CAM_DEV_MATCH_LAST) && (ccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", ccb.ccb_h.status, ccb.cdm.status); retval = 1; goto bailout; } for (i = 0; i < ccb.cdm.num_matches; i++) { switch (ccb.cdm.matches[i].type) { case DEV_MATCH_DEVICE: { struct device_match_result *dev_result; dev_result = &ccb.cdm.matches[i].result.device_result; if (dev_result->flags & DEV_RESULT_UNCONFIGURED) { skip_device = 1; break; } else skip_device = 0; item = malloc(sizeof(*item)); if (item == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*item)); retval = 1; goto bailout; } bzero(item, sizeof(*item)); bcopy(dev_result, &item->dev_match, sizeof(*dev_result)); STAILQ_INSERT_TAIL(&devlist->dev_queue, item, links); if (getdevid(item) != 0) { retval = 1; goto bailout; } break; } case DEV_MATCH_PERIPH: { struct periph_match_result *periph_result; periph_result = &ccb.cdm.matches[i].result.periph_result; if (skip_device != 0) break; item->num_periphs++; item->periph_matches = realloc( item->periph_matches, item->num_periphs * sizeof(struct periph_match_result)); if (item->periph_matches == NULL) { warn("%s: error allocating periph " "list", __func__); retval = 1; goto bailout; } bcopy(periph_result, &item->periph_matches[ item->num_periphs - 1], sizeof(*periph_result)); break; } default: fprintf(stderr, "%s: unexpected match " "type %d\n", __func__, ccb.cdm.matches[i].type); retval = 1; goto bailout; break; /*NOTREACHED*/ } } } while ((ccb.ccb_h.status == CAM_REQ_CMP) && (ccb.cdm.status == CAM_DEV_MATCH_MORE)); bailout: if (fd != -1) close(fd); free(patterns); free(ccb.cdm.matches); if (retval != 0) freebusdevlist(devlist); return (retval); } static void freebusdevlist(struct cam_devlist *devlist) { struct cam_devitem *item, *item2; STAILQ_FOREACH_SAFE(item, &devlist->dev_queue, links, item2) { STAILQ_REMOVE(&devlist->dev_queue, item, cam_devitem, links); free(item->device_id); free(item->periph_matches); free(item); } } static struct cam_devitem * findsasdevice(struct cam_devlist *devlist, uint64_t sasaddr) { struct cam_devitem *item; STAILQ_FOREACH(item, &devlist->dev_queue, links) { struct scsi_vpd_id_descriptor *idd; /* * XXX KDM look for LUN IDs as well? */ idd = scsi_get_devid(item->device_id, item->device_id_len, scsi_devid_is_sas_target); if (idd == NULL) continue; if (scsi_8btou64(idd->identifier) == sasaddr) return (item); } return (NULL); } static int smpphylist(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { struct smp_report_general_request *rgrequest = NULL; struct smp_report_general_response *rgresponse = NULL; struct smp_discover_request *disrequest = NULL; struct smp_discover_response *disresponse = NULL; struct cam_devlist devlist; union ccb *ccb; int long_response = 0; int num_phys = 0; int quiet = 0; int retval; int i, c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } STAILQ_INIT(&devlist.dev_queue); rgrequest = malloc(sizeof(*rgrequest)); if (rgrequest == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*rgrequest)); retval = 1; goto bailout; } rgresponse = malloc(sizeof(*rgresponse)); if (rgresponse == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*rgresponse)); retval = 1; goto bailout; } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; case 'q': quiet = 1; break; default: break; } } smp_report_general(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, rgrequest, /*request_len*/ sizeof(*rgrequest), (uint8_t *)rgresponse, /*response_len*/ sizeof(*rgresponse), /*long_response*/ long_response, timeout); ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } num_phys = rgresponse->num_phys; if (num_phys == 0) { if (quiet == 0) fprintf(stdout, "%s: No Phys reported\n", __func__); retval = 1; goto bailout; } devlist.path_id = device->path_id; retval = buildbusdevlist(&devlist); if (retval != 0) goto bailout; if (quiet == 0) { fprintf(stdout, "%d PHYs:\n", num_phys); fprintf(stdout, "PHY Attached SAS Address\n"); } disrequest = malloc(sizeof(*disrequest)); if (disrequest == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*disrequest)); retval = 1; goto bailout; } disresponse = malloc(sizeof(*disresponse)); if (disresponse == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*disresponse)); retval = 1; goto bailout; } for (i = 0; i < num_phys; i++) { struct cam_devitem *item; struct device_match_result *dev_match; char vendor[16], product[48], revision[16]; char tmpstr[256]; int j; CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->smpio); ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; smp_discover(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, disrequest, sizeof(*disrequest), (uint8_t *)disresponse, sizeof(*disresponse), long_response, /*ignore_zone_group*/ 0, /*phy*/ i, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || (((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) && (disresponse->function_result != SMP_FR_PHY_VACANT))) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } if (disresponse->function_result == SMP_FR_PHY_VACANT) { if (quiet == 0) fprintf(stdout, "%3d \n", i); continue; } if (disresponse->attached_device == SMP_DIS_AD_TYPE_NONE) { item = NULL; } else { item = findsasdevice(&devlist, scsi_8btou64(disresponse->attached_sas_address)); } if ((quiet == 0) || (item != NULL)) { fprintf(stdout, "%3d 0x%016jx", i, (uintmax_t)scsi_8btou64( disresponse->attached_sas_address)); if (item == NULL) { fprintf(stdout, "\n"); continue; } } else if (quiet != 0) continue; dev_match = &item->dev_match; if (dev_match->protocol == PROTO_SCSI) { cam_strvis(vendor, dev_match->inq_data.vendor, sizeof(dev_match->inq_data.vendor), sizeof(vendor)); cam_strvis(product, dev_match->inq_data.product, sizeof(dev_match->inq_data.product), sizeof(product)); cam_strvis(revision, dev_match->inq_data.revision, sizeof(dev_match->inq_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s %s>", vendor, product, revision); } else if ((dev_match->protocol == PROTO_ATA) || (dev_match->protocol == PROTO_SATAPM)) { cam_strvis(product, dev_match->ident_data.model, sizeof(dev_match->ident_data.model), sizeof(product)); cam_strvis(revision, dev_match->ident_data.revision, sizeof(dev_match->ident_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s>", product, revision); } else { sprintf(tmpstr, "<>"); } fprintf(stdout, " %-33s ", tmpstr); /* * If we have 0 periphs, that's a bug... */ if (item->num_periphs == 0) { fprintf(stdout, "\n"); continue; } fprintf(stdout, "("); for (j = 0; j < item->num_periphs; j++) { if (j > 0) fprintf(stdout, ","); fprintf(stdout, "%s%d", item->periph_matches[j].periph_name, item->periph_matches[j].unit_number); } fprintf(stdout, ")\n"); } bailout: if (ccb != NULL) cam_freeccb(ccb); free(rgrequest); free(rgresponse); free(disrequest); free(disresponse); freebusdevlist(&devlist); return (retval); } static int atapm_proc_resp(struct cam_device *device, union ccb *ccb) { uint8_t error = 0, ata_device = 0, status = 0; uint16_t count = 0; uint64_t lba = 0; int retval; retval = get_ata_status(device, ccb, &error, &count, &lba, &ata_device, &status); if (retval == 1) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } warnx("Can't get ATA command status"); return (retval); } if (status & ATA_STATUS_ERROR) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); return (1); } printf("%s%d: ", device->device_name, device->dev_unit_num); switch (count) { case ATA_PM_STANDBY: printf("Standby mode\n"); break; case ATA_PM_STANDBY_Y: printf("Standby_y mode\n"); break; case 0x40: /* obsolete since ACS-3 */ printf("NV Cache Power Mode and the spindle is spun down or spinning down\n"); break; case 0x41: /* obsolete since ACS-3 */ printf("NV Cache Power Mode and the spindle is spun up or spinning up\n"); break; case ATA_PM_IDLE: printf("Idle mode\n"); break; case ATA_PM_IDLE_A: printf("Idle_a mode\n"); break; case ATA_PM_IDLE_B: printf("Idle_b mode\n"); break; case ATA_PM_IDLE_C: printf("Idle_c mode\n"); break; case ATA_PM_ACTIVE_IDLE: printf("Active or Idle mode\n"); break; default: printf("Unknown mode 0x%02x\n", count); break; } return (0); } static int atapm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int retval = 0; int t = -1; int c; uint8_t ata_flags = 0; u_char cmd, sc; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 't': t = atoi(optarg); break; default: break; } } if (strcmp(argv[1], "idle") == 0) { if (t == -1) cmd = ATA_IDLE_IMMEDIATE; else cmd = ATA_IDLE_CMD; } else if (strcmp(argv[1], "standby") == 0) { if (t == -1) cmd = ATA_STANDBY_IMMEDIATE; else cmd = ATA_STANDBY_CMD; } else if (strcmp(argv[1], "powermode") == 0) { cmd = ATA_CHECK_POWER_MODE; ata_flags = AP_FLAG_CHK_COND; t = -1; } else { cmd = ATA_SLEEP; t = -1; } if (t < 0) sc = 0; else if (t <= (240 * 5)) sc = (t + 4) / 5; else if (t <= (252 * 5)) /* special encoding for 21 minutes */ sc = 252; else if (t <= (11 * 30 * 60)) sc = (t - 1) / (30 * 60) + 241; else sc = 253; retval = ata_do_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*ata_flags*/ata_flags, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/0, /*lba*/0, /*sector_count*/sc, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout ? timeout : 30 * 1000, /*force48bit*/0); if (retval == 0 && cmd == ATA_CHECK_POWER_MODE) retval = atapm_proc_resp(device, ccb); cam_freeccb(ccb); return (retval); } static int ataaxm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int retval = 0; int l = -1; int c; u_char cmd, sc; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': l = atoi(optarg); break; default: break; } } sc = 0; if (strcmp(argv[1], "apm") == 0) { if (l == -1) cmd = 0x85; else { cmd = 0x05; sc = l; } } else /* aam */ { if (l == -1) cmd = 0xC2; else { cmd = 0x42; sc = l; } } retval = ata_do_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*ata_flags*/0, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SETFEATURES, /*features*/cmd, /*lba*/0, /*sector_count*/sc, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout ? timeout : 30 * 1000, /*force48bit*/0); cam_freeccb(ccb); return (retval); } int scsigetopcodes(struct cam_device *device, int opcode_set, int opcode, int show_sa_errors, int sa_set, int service_action, int timeout_desc, int task_attr, int retry_count, int timeout, int verbosemode, uint32_t *fill_len, uint8_t **data_ptr) { union ccb *ccb = NULL; uint8_t *buf = NULL; uint32_t alloc_len = 0, num_opcodes; uint32_t valid_len = 0; uint32_t avail_len = 0; struct scsi_report_supported_opcodes_all *all_hdr; struct scsi_report_supported_opcodes_one *one; int options = 0; int retval = 0; /* * Make it clear that we haven't yet allocated or filled anything. */ *fill_len = 0; *data_ptr = NULL; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); retval = 1; goto bailout; } if (opcode_set != 0) { options |= RSO_OPTIONS_OC; num_opcodes = 1; alloc_len = sizeof(*one) + CAM_MAX_CDBLEN; } else { num_opcodes = 256; alloc_len = sizeof(*all_hdr) + (num_opcodes * sizeof(struct scsi_report_supported_opcodes_descr)); } if (timeout_desc != 0) { options |= RSO_RCTD; alloc_len += num_opcodes * sizeof(struct scsi_report_supported_opcodes_timeout); } if (sa_set != 0) { options |= RSO_OPTIONS_OC_SA; if (show_sa_errors != 0) options &= ~RSO_OPTIONS_OC; } retry_alloc: if (buf != NULL) { free(buf); buf = NULL; } buf = malloc(alloc_len); if (buf == NULL) { warn("Unable to allocate %u bytes", alloc_len); retval = 1; goto bailout; } bzero(buf, alloc_len); scsi_report_supported_opcodes(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ task_attr, /*options*/ options, /*req_opcode*/ opcode, /*req_service_action*/ service_action, /*data_ptr*/ buf, /*dxfer_len*/ alloc_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 10000); ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending REPORT SUPPORTED OPERATION CODES command"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (verbosemode != 0) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } valid_len = ccb->csio.dxfer_len - ccb->csio.resid; if (((options & RSO_OPTIONS_MASK) == RSO_OPTIONS_ALL) && (valid_len >= sizeof(*all_hdr))) { all_hdr = (struct scsi_report_supported_opcodes_all *)buf; avail_len = scsi_4btoul(all_hdr->length) + sizeof(*all_hdr); } else if (((options & RSO_OPTIONS_MASK) != RSO_OPTIONS_ALL) && (valid_len >= sizeof(*one))) { uint32_t cdb_length; one = (struct scsi_report_supported_opcodes_one *)buf; cdb_length = scsi_2btoul(one->cdb_length); avail_len = sizeof(*one) + cdb_length; if (one->support & RSO_ONE_CTDP) { struct scsi_report_supported_opcodes_timeout *td; td = (struct scsi_report_supported_opcodes_timeout *) &buf[avail_len]; if (valid_len >= (avail_len + sizeof(td->length))) { avail_len += scsi_2btoul(td->length) + sizeof(td->length); } else { avail_len += sizeof(*td); } } } /* * avail_len could be zero if we didn't get enough data back from * thet target to determine */ if ((avail_len != 0) && (avail_len > valid_len)) { alloc_len = avail_len; goto retry_alloc; } *fill_len = valid_len; *data_ptr = buf; bailout: if (retval != 0) free(buf); cam_freeccb(ccb); return (retval); } static int scsiprintoneopcode(struct cam_device *device, int req_opcode, int sa_set, int req_sa, uint8_t *buf, uint32_t valid_len) { struct scsi_report_supported_opcodes_one *one; struct scsi_report_supported_opcodes_timeout *td; uint32_t cdb_len = 0, td_len = 0; const char *op_desc = NULL; unsigned int i; int retval = 0; one = (struct scsi_report_supported_opcodes_one *)buf; /* * If we don't have the full single opcode descriptor, no point in * continuing. */ if (valid_len < __offsetof(struct scsi_report_supported_opcodes_one, cdb_length)) { warnx("Only %u bytes returned, not enough to verify support", valid_len); retval = 1; goto bailout; } op_desc = scsi_op_desc(req_opcode, &device->inq_data); printf("%s (0x%02x)", op_desc != NULL ? op_desc : "UNKNOWN", req_opcode); if (sa_set != 0) printf(", SA 0x%x", req_sa); printf(": "); switch (one->support & RSO_ONE_SUP_MASK) { case RSO_ONE_SUP_UNAVAIL: printf("No command support information currently available\n"); break; case RSO_ONE_SUP_NOT_SUP: printf("Command not supported\n"); retval = 1; goto bailout; break; /*NOTREACHED*/ case RSO_ONE_SUP_AVAIL: printf("Command is supported, complies with a SCSI standard\n"); break; case RSO_ONE_SUP_VENDOR: printf("Command is supported, vendor-specific " "implementation\n"); break; default: printf("Unknown command support flags 0x%#x\n", one->support & RSO_ONE_SUP_MASK); break; } /* * If we don't have the CDB length, it isn't exactly an error, the * command probably isn't supported. */ if (valid_len < __offsetof(struct scsi_report_supported_opcodes_one, cdb_usage)) goto bailout; cdb_len = scsi_2btoul(one->cdb_length); /* * If our valid data doesn't include the full reported length, * return. The caller should have detected this and adjusted his * allocation length to get all of the available data. */ if (valid_len < sizeof(*one) + cdb_len) { retval = 1; goto bailout; } /* * If all we have is the opcode, there is no point in printing out * the usage bitmap. */ if (cdb_len <= 1) { retval = 1; goto bailout; } printf("CDB usage bitmap:"); for (i = 0; i < cdb_len; i++) { printf(" %02x", one->cdb_usage[i]); } printf("\n"); /* * If we don't have a timeout descriptor, we're done. */ if ((one->support & RSO_ONE_CTDP) == 0) goto bailout; /* * If we don't have enough valid length to include the timeout * descriptor length, we're done. */ if (valid_len < (sizeof(*one) + cdb_len + sizeof(td->length))) goto bailout; td = (struct scsi_report_supported_opcodes_timeout *) &buf[sizeof(*one) + cdb_len]; td_len = scsi_2btoul(td->length); td_len += sizeof(td->length); /* * If we don't have the full timeout descriptor, we're done. */ if (td_len < sizeof(*td)) goto bailout; /* * If we don't have enough valid length to contain the full timeout * descriptor, we're done. */ if (valid_len < (sizeof(*one) + cdb_len + td_len)) goto bailout; printf("Timeout information:\n"); printf("Command-specific: 0x%02x\n", td->cmd_specific); printf("Nominal timeout: %u seconds\n", scsi_4btoul(td->nominal_time)); printf("Recommended timeout: %u seconds\n", scsi_4btoul(td->recommended_time)); bailout: return (retval); } static int scsiprintopcodes(struct cam_device *device, int td_req, uint8_t *buf, uint32_t valid_len) { struct scsi_report_supported_opcodes_all *hdr; struct scsi_report_supported_opcodes_descr *desc; uint32_t avail_len = 0, used_len = 0; uint8_t *cur_ptr; int retval = 0; if (valid_len < sizeof(*hdr)) { warnx("%s: not enough returned data (%u bytes) opcode list", __func__, valid_len); retval = 1; goto bailout; } hdr = (struct scsi_report_supported_opcodes_all *)buf; avail_len = scsi_4btoul(hdr->length); avail_len += sizeof(hdr->length); /* * Take the lesser of the amount of data the drive claims is * available, and the amount of data the HBA says was returned. */ avail_len = MIN(avail_len, valid_len); used_len = sizeof(hdr->length); printf("%-6s %4s %8s ", "Opcode", "SA", "CDB len" ); if (td_req != 0) printf("%5s %6s %6s ", "CS", "Nom", "Rec"); printf(" Description\n"); while ((avail_len - used_len) > sizeof(*desc)) { struct scsi_report_supported_opcodes_timeout *td; uint32_t td_len; const char *op_desc = NULL; cur_ptr = &buf[used_len]; desc = (struct scsi_report_supported_opcodes_descr *)cur_ptr; op_desc = scsi_op_desc(desc->opcode, &device->inq_data); if (op_desc == NULL) op_desc = "UNKNOWN"; printf("0x%02x %#4x %8u ", desc->opcode, scsi_2btoul(desc->service_action), scsi_2btoul(desc->cdb_length)); used_len += sizeof(*desc); if ((desc->flags & RSO_CTDP) == 0) { printf(" %s\n", op_desc); continue; } /* * If we don't have enough space to fit a timeout * descriptor, then we're done. */ if (avail_len - used_len < sizeof(*td)) { used_len = avail_len; printf(" %s\n", op_desc); continue; } cur_ptr = &buf[used_len]; td = (struct scsi_report_supported_opcodes_timeout *)cur_ptr; td_len = scsi_2btoul(td->length); td_len += sizeof(td->length); used_len += td_len; /* * If the given timeout descriptor length is less than what * we understand, skip it. */ if (td_len < sizeof(*td)) { printf(" %s\n", op_desc); continue; } printf(" 0x%02x %6u %6u %s\n", td->cmd_specific, scsi_4btoul(td->nominal_time), scsi_4btoul(td->recommended_time), op_desc); } bailout: return (retval); } static int scsiopcodes(struct cam_device *device, int argc, char **argv, char *combinedopt, int task_attr, int retry_count, int timeout, int verbosemode) { int c; uint32_t opcode = 0, service_action = 0; int td_set = 0, opcode_set = 0, sa_set = 0; int show_sa_errors = 1; uint32_t valid_len = 0; uint8_t *buf = NULL; char *endptr; int retval = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'N': show_sa_errors = 0; break; case 'o': opcode = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { warnx("Invalid opcode \"%s\", must be a number", optarg); retval = 1; goto bailout; } if (opcode > 0xff) { warnx("Invalid opcode 0x%#x, must be between" "0 and 0xff inclusive", opcode); retval = 1; goto bailout; } opcode_set = 1; break; case 's': service_action = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { warnx("Invalid service action \"%s\", must " "be a number", optarg); retval = 1; goto bailout; } if (service_action > 0xffff) { warnx("Invalid service action 0x%#x, must " "be between 0 and 0xffff inclusive", service_action); retval = 1; } sa_set = 1; break; case 'T': td_set = 1; break; default: break; } } if ((sa_set != 0) && (opcode_set == 0)) { warnx("You must specify an opcode with -o if a service " "action is given"); retval = 1; goto bailout; } retval = scsigetopcodes(device, opcode_set, opcode, show_sa_errors, sa_set, service_action, td_set, task_attr, retry_count, timeout, verbosemode, &valid_len, &buf); if (retval != 0) goto bailout; if ((opcode_set != 0) || (sa_set != 0)) { retval = scsiprintoneopcode(device, opcode, sa_set, service_action, buf, valid_len); } else { retval = scsiprintopcodes(device, td_set, buf, valid_len); } bailout: free(buf); return (retval); } static int reprobe(struct cam_device *device) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } ccb->ccb_h.func_code = XPT_REPROBE_LUN; if (cam_send_ccb(device, ccb) < 0) { warn("error sending XPT_REPROBE_LUN CCB"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } bailout: cam_freeccb(ccb); return (retval); } void usage(int printlong) { fprintf(printlong ? stdout : stderr, "usage: camcontrol [device id][generic args][command args]\n" " camcontrol devlist [-b] [-v]\n" " camcontrol periphlist [dev_id][-n dev_name] [-u unit]\n" " camcontrol tur [dev_id][generic args]\n" " camcontrol sense [dev_id][generic args][-D][-x]\n" " camcontrol inquiry [dev_id][generic args] [-D] [-S] [-R]\n" " camcontrol identify [dev_id][generic args] [-v]\n" " camcontrol reportluns [dev_id][generic args] [-c] [-l] [-r report]\n" " camcontrol readcap [dev_id][generic args] [-b] [-h] [-H] [-N]\n" " [-q] [-s] [-l]\n" " camcontrol start [dev_id][generic args]\n" " camcontrol stop [dev_id][generic args]\n" " camcontrol load [dev_id][generic args]\n" " camcontrol eject [dev_id][generic args]\n" " camcontrol reprobe [dev_id][generic args]\n" " camcontrol rescan \n" " camcontrol reset \n" " camcontrol defects [dev_id][generic args] <-f format> [-P][-G]\n" " [-q][-s][-S offset][-X]\n" " camcontrol modepage [dev_id][generic args] <-m page | -l>\n" " [-P pagectl][-e | -b][-d]\n" " camcontrol cmd [dev_id][generic args]\n" " <-a cmd [args] | -c cmd [args]>\n" " [-d] [-f] [-i len fmt|-o len fmt [args]] [-r fmt]\n" " camcontrol smpcmd [dev_id][generic args]\n" " <-r len fmt [args]> <-R len fmt [args]>\n" " camcontrol smprg [dev_id][generic args][-l]\n" " camcontrol smppc [dev_id][generic args] <-p phy> [-l]\n" " [-o operation][-d name][-m rate][-M rate]\n" " [-T pp_timeout][-a enable|disable]\n" " [-A enable|disable][-s enable|disable]\n" " [-S enable|disable]\n" " camcontrol smpphylist [dev_id][generic args][-l][-q]\n" " camcontrol smpmaninfo [dev_id][generic args][-l]\n" " camcontrol debug [-I][-P][-T][-S][-X][-c]\n" " \n" " camcontrol tags [dev_id][generic args] [-N tags] [-q] [-v]\n" " camcontrol negotiate [dev_id][generic args] [-a][-c]\n" " [-D ][-M mode][-O offset]\n" " [-q][-R syncrate][-v][-T ]\n" " [-U][-W bus_width]\n" " camcontrol format [dev_id][generic args][-q][-r][-w][-y]\n" " camcontrol sanitize [dev_id][generic args]\n" " [-a overwrite|block|crypto|exitfailure]\n" " [-c passes][-I][-P pattern][-q][-U][-r][-w]\n" " [-y]\n" " camcontrol idle [dev_id][generic args][-t time]\n" " camcontrol standby [dev_id][generic args][-t time]\n" " camcontrol sleep [dev_id][generic args]\n" " camcontrol powermode [dev_id][generic args]\n" " camcontrol apm [dev_id][generic args][-l level]\n" " camcontrol aam [dev_id][generic args][-l level]\n" " camcontrol fwdownload [dev_id][generic args] <-f fw_image> [-q]\n" " [-s][-y]\n" " camcontrol security [dev_id][generic args]\n" " <-d pwd | -e pwd | -f | -h pwd | -k pwd>\n" " [-l ] [-q] [-s pwd] [-T timeout]\n" " [-U ] [-y]\n" " camcontrol hpa [dev_id][generic args] [-f] [-l] [-P] [-p pwd]\n" " [-q] [-s max_sectors] [-U pwd] [-y]\n" " camcontrol ama [dev_id][generic args] [-f] [-q] [-s max_sectors]\n" " camcontrol persist [dev_id][generic args] <-i action|-o action>\n" " [-a][-I tid][-k key][-K sa_key][-p][-R rtp]\n" " [-s scope][-S][-T type][-U]\n" " camcontrol attrib [dev_id][generic args] <-r action|-w attr>\n" " [-a attr_num][-c][-e elem][-F form1,form1]\n" " [-p part][-s start][-T type][-V vol]\n" " camcontrol opcodes [dev_id][generic args][-o opcode][-s SA]\n" " [-N][-T]\n" " camcontrol zone [dev_id][generic args]<-c cmd> [-a] [-l LBA]\n" " [-o rep_opts] [-P print_opts]\n" " camcontrol epc [dev_id][generic_args]<-c cmd> [-d] [-D] [-e]\n" " [-H] [-p power_cond] [-P] [-r rst_src] [-s]\n" " [-S power_src] [-T timer]\n" " camcontrol timestamp [dev_id][generic_args] <-r [-f format|-m|-U]>|\n" " <-s <-f format -T time | -U >>\n" " camcontrol devtype [dev_id]\n" " camcontrol depop [dev_id] [-d | -l | -r] [-e element] [-c capacity]\n" " camcontrol mmcsdcmd [dev_id] [[-c mmc_opcode] [-a mmc_arg]\n" " [-f mmc_flags] [-l data_len]\n" " [-W [-b data_byte]]] |\n" " [-F frequency] |\n" " [-I]\n" " [-1 | -4]\n" " [-S high|normal]\n" " \n" " camcontrol help\n"); if (!printlong) return; fprintf(stdout, "Specify one of the following options:\n" "devlist list all CAM devices\n" "periphlist list all CAM peripheral drivers attached to a device\n" "sense send a request sense command to the named device\n" "tur send a test unit ready to the named device\n" "inquiry send a SCSI inquiry command to the named device\n" "identify send a ATA identify command to the named device\n" "reportluns send a SCSI report luns command to the device\n" "readcap send a SCSI read capacity command to the device\n" "start send a Start Unit command to the device\n" "stop send a Stop Unit command to the device\n" "load send a Start Unit command to the device with the load bit set\n" "eject send a Stop Unit command to the device with the eject bit set\n" "reprobe update capacity information of the given device\n" "rescan rescan all buses, the given bus, bus:target:lun or device\n" "reset reset all buses, the given bus, bus:target:lun or device\n" "defects read the defect list of the specified device\n" "modepage display or edit (-e) the given mode page\n" "cmd send the given SCSI command, may need -i or -o as well\n" "smpcmd send the given SMP command, requires -o and -i\n" "smprg send the SMP Report General command\n" "smppc send the SMP PHY Control command, requires -p\n" "smpphylist display phys attached to a SAS expander\n" "smpmaninfo send the SMP Report Manufacturer Info command\n" "debug turn debugging on/off for a bus, target, or lun, or all devices\n" "tags report or set the number of transaction slots for a device\n" "negotiate report or set device negotiation parameters\n" "format send the SCSI FORMAT UNIT command to the named device\n" "sanitize send the SCSI SANITIZE command to the named device\n" "idle send the ATA IDLE command to the named device\n" "standby send the ATA STANDBY command to the named device\n" "sleep send the ATA SLEEP command to the named device\n" "powermode send the ATA CHECK POWER MODE command to the named device\n" "fwdownload program firmware of the named device with the given image\n" "security report or send ATA security commands to the named device\n" "persist send the SCSI PERSISTENT RESERVE IN or OUT commands\n" "attrib send the SCSI READ or WRITE ATTRIBUTE commands\n" "opcodes send the SCSI REPORT SUPPORTED OPCODES command\n" "zone manage Zoned Block (Shingled) devices\n" "epc send ATA Extended Power Conditions commands\n" "timestamp report or set the device's timestamp\n" "devtype report the type of device\n" "depop manage drive storage elements\n" "mmcsdcmd send the given MMC command, needs -c and -a as well\n" "help this message\n" "Device Identifiers:\n" "bus:target specify the bus and target, lun defaults to 0\n" "bus:target:lun specify the bus, target and lun\n" "deviceUNIT specify the device name, like \"da4\" or \"cd2\"\n" "Generic arguments:\n" "-v be verbose, print out sense information\n" "-t timeout command timeout in seconds, overrides default timeout\n" "-n dev_name specify device name, e.g. \"da\", \"cd\"\n" "-u unit specify unit number, e.g. \"0\", \"5\"\n" "-E have the kernel attempt to perform SCSI error recovery\n" "-C count specify the SCSI command retry count (needs -E to work)\n" "-Q task_attr specify ordered, simple or head tag type for SCSI cmds\n" "modepage arguments:\n" "-l list all available mode pages\n" "-m page specify the mode page to view or edit\n" "-e edit the specified mode page\n" "-b force view to binary mode\n" "-d disable block descriptors for mode sense\n" "-P pgctl page control field 0-3\n" "defects arguments:\n" "-f format specify defect list format (block, bfi or phys)\n" "-G get the grown defect list\n" "-P get the permanent defect list\n" "sense arguments:\n" "-D request descriptor sense data\n" "-x do a hexdump of the sense data\n" "inquiry arguments:\n" "-D get the standard inquiry data\n" "-S get the serial number\n" "-R get the transfer rate, etc.\n" "reportluns arguments:\n" "-c only report a count of available LUNs\n" "-l only print out luns, and not a count\n" "-r specify \"default\", \"wellknown\" or \"all\"\n" "readcap arguments\n" "-b only report the blocksize\n" "-h human readable device size, base 2\n" "-H human readable device size, base 10\n" "-N print the number of blocks instead of last block\n" "-q quiet, print numbers only\n" "-s only report the last block/device size\n" "cmd arguments:\n" "-c cdb [args] specify the SCSI CDB\n" "-i len fmt specify input data and input data format\n" "-o len fmt [args] specify output data and output data fmt\n" "smpcmd arguments:\n" "-r len fmt [args] specify the SMP command to be sent\n" "-R len fmt [args] specify SMP response format\n" "smprg arguments:\n" "-l specify the long response format\n" "smppc arguments:\n" "-p phy specify the PHY to operate on\n" "-l specify the long request/response format\n" "-o operation specify the phy control operation\n" "-d name set the attached device name\n" "-m rate set the minimum physical link rate\n" "-M rate set the maximum physical link rate\n" "-T pp_timeout set the partial pathway timeout value\n" "-a enable|disable enable or disable SATA slumber\n" "-A enable|disable enable or disable SATA partial phy power\n" "-s enable|disable enable or disable SAS slumber\n" "-S enable|disable enable or disable SAS partial phy power\n" "smpphylist arguments:\n" "-l specify the long response format\n" "-q only print phys with attached devices\n" "smpmaninfo arguments:\n" "-l specify the long response format\n" "debug arguments:\n" "-I CAM_DEBUG_INFO -- scsi commands, errors, data\n" "-T CAM_DEBUG_TRACE -- routine flow tracking\n" "-S CAM_DEBUG_SUBTRACE -- internal routine command flow\n" "-c CAM_DEBUG_CDB -- print out SCSI CDBs only\n" "tags arguments:\n" "-N tags specify the number of tags to use for this device\n" "-q be quiet, don't report the number of tags\n" "-v report a number of tag-related parameters\n" "negotiate arguments:\n" "-a send a test unit ready after negotiation\n" "-c report/set current negotiation settings\n" "-D \"enable\" or \"disable\" disconnection\n" "-M mode set ATA mode\n" "-O offset set command delay offset\n" "-q be quiet, don't report anything\n" "-R syncrate synchronization rate in MHz\n" "-T \"enable\" or \"disable\" tagged queueing\n" "-U report/set user negotiation settings\n" "-W bus_width set the bus width in bits (8, 16 or 32)\n" "-v also print a Path Inquiry CCB for the controller\n" "format arguments:\n" "-q be quiet, don't print status messages\n" "-r run in report only mode\n" "-w don't send immediate format command\n" "-y don't ask any questions\n" "sanitize arguments:\n" "-a operation operation mode: overwrite, block, crypto or exitfailure\n" "-c passes overwrite passes to perform (1 to 31)\n" "-I invert overwrite pattern after each pass\n" "-P pattern path to overwrite pattern file\n" "-q be quiet, don't print status messages\n" "-r run in report only mode\n" "-U run operation in unrestricted completion exit mode\n" "-w don't send immediate sanitize command\n" "-y don't ask any questions\n" "idle/standby arguments:\n" "-t number of seconds before respective state.\n" "fwdownload arguments:\n" "-f fw_image path to firmware image file\n" "-q don't print informational messages, only errors\n" "-s run in simulation mode\n" "-v print info for every firmware segment sent to device\n" "-y don't ask any questions\n" "security arguments:\n" "-d pwd disable security using the given password for the selected\n" " user\n" "-e pwd erase the device using the given pwd for the selected user\n" "-f freeze the security configuration of the specified device\n" "-h pwd enhanced erase the device using the given pwd for the\n" " selected user\n" "-k pwd unlock the device using the given pwd for the selected\n" " user\n" "-l specifies which security level to set: high or maximum\n" "-q be quiet, do not print any status messages\n" "-s pwd password the device (enable security) using the given\n" " pwd for the selected user\n" "-T timeout overrides the timeout (seconds) used for erase operation\n" "-U specifies which user to set: user or master\n" "-y don't ask any questions\n" "hpa arguments:\n" "-f freeze the HPA configuration of the device\n" "-l lock the HPA configuration of the device\n" "-P make the HPA max sectors persist\n" "-p pwd Set the HPA configuration password required for unlock\n" " calls\n" "-q be quiet, do not print any status messages\n" "-s sectors configures the maximum user accessible sectors of the\n" " device\n" "-U pwd unlock the HPA configuration of the device\n" "-y don't ask any questions\n" "ama arguments:\n" "-f freeze the AMA configuration of the device\n" "-q be quiet, do not print any status messages\n" "-s sectors configures the maximum user accessible sectors of the\n" " device\n" "persist arguments:\n" "-i action specify read_keys, read_reservation, report_cap, or\n" " read_full_status\n" "-o action specify register, register_ignore, reserve, release,\n" " clear, preempt, preempt_abort, register_move, replace_lost\n" "-a set the All Target Ports (ALL_TG_PT) bit\n" "-I tid specify a Transport ID, e.g.: sas,0x1234567812345678\n" "-k key specify the Reservation Key\n" "-K sa_key specify the Service Action Reservation Key\n" "-p set the Activate Persist Through Power Loss bit\n" "-R rtp specify the Relative Target Port\n" "-s scope specify the scope: lun, extent, element or a number\n" "-S specify Transport ID for register, requires -I\n" "-T res_type specify the reservation type: read_shared, wr_ex, rd_ex,\n" " ex_ac, wr_ex_ro, ex_ac_ro, wr_ex_ar, ex_ac_ar\n" "-U unregister the current initiator for register_move\n" "attrib arguments:\n" "-r action specify attr_values, attr_list, lv_list, part_list, or\n" " supp_attr\n" "-w attr specify an attribute to write, one -w argument per attr\n" "-a attr_num only display this attribute number\n" "-c get cached attributes\n" "-e elem_addr request attributes for the given element in a changer\n" "-F form1,form2 output format, comma separated list: text_esc, text_raw,\n" " nonascii_esc, nonascii_trim, nonascii_raw, field_all,\n" " field_none, field_desc, field_num, field_size, field_rw\n" "-p partition request attributes for the given partition\n" "-s start_attr request attributes starting at the given number\n" "-T elem_type specify the element type (used with -e)\n" "-V logical_vol specify the logical volume ID\n" "opcodes arguments:\n" "-o opcode specify the individual opcode to list\n" "-s service_action specify the service action for the opcode\n" "-N do not return SCSI error for unsupported SA\n" "-T request nominal and recommended timeout values\n" "zone arguments:\n" "-c cmd required: rz, open, close, finish, or rwp\n" "-a apply the action to all zones\n" "-l LBA specify the zone starting LBA\n" "-o rep_opts report zones options: all, empty, imp_open, exp_open,\n" " closed, full, ro, offline, reset, nonseq, nonwp\n" "-P print_opt report zones printing: normal, summary, script\n" "epc arguments:\n" "-c cmd required: restore, goto, timer, state, enable, disable,\n" " source, status, list\n" "-d disable power mode (timer, state)\n" "-D delayed entry (goto)\n" "-e enable power mode (timer, state)\n" "-H hold power mode (goto)\n" "-p power_cond Idle_a, Idle_b, Idle_c, Standby_y, Standby_z (timer,\n" " state, goto)\n" "-P only display power mode (status)\n" "-r rst_src restore settings from: default, saved (restore)\n" "-s save mode (timer, state, restore)\n" "-S power_src set power source: battery, nonbattery (source)\n" "-T timer set timer, seconds, .1 sec resolution (timer)\n" "timestamp arguments:\n" "-r report the timestamp of the device\n" "-f format report the timestamp of the device with the given\n" " strftime(3) format string\n" "-m report the timestamp of the device as milliseconds since\n" " January 1st, 1970\n" "-U report the time with UTC instead of the local time zone\n" "-s set the timestamp of the device\n" "-f format the format of the time string passed into strptime(3)\n" "-T time the time value passed into strptime(3)\n" "-U set the timestamp of the device to UTC time\n" "depop arguments:\n" "-d remove an element from service\n" "-l list status of all elements of drive\n" "-r restore all elements to service\n" "-e elm element to remove\n" "-c capacity requested new capacity\n" "mmcsdcmd arguments:\n" "-c mmc_cmd MMC command to send to the card\n" "-a mmc_arg Argument for the MMC command\n" "-f mmc_flag Flags to set for the MMC command\n" "-l data_len Expect data_len bytes of data in reply and display them\n" "-W Fill the data buffer before invoking the MMC command\n" "-b data_byte One byte of data to fill the data buffer with\n" "-F frequency Operating frequency to set on the controller\n" "-4 Set bus width to 4 bit\n" "-1 Set bus width to 8 bit\n" "-S high | std Set high-speed or standard timing\n" "-I Display various card and host controller information\n" ); } int main(int argc, char **argv) { int c; char *device = NULL; int unit = 0; struct cam_device *cam_dev = NULL; int timeout = 0, retry_count = 1; camcontrol_optret optreturn; char *tstr; const char *mainopt = "C:En:Q:t:u:v"; const char *subopt = NULL; char combinedopt[256]; int error = 0, optstart = 2; int task_attr = MSG_SIMPLE_Q_TAG; int devopen = 1; cam_cmd cmdlist; path_id_t bus; target_id_t target; lun_id_t lun; cmdlist = CAM_CMD_NONE; arglist = CAM_ARG_NONE; if (argc < 2) { usage(0); exit(1); } /* * Get the base option. */ optreturn = getoption(option_table,argv[1], &cmdlist, &arglist,&subopt); if (optreturn == CC_OR_AMBIGUOUS) { warnx("ambiguous option %s", argv[1]); usage(0); exit(1); } else if (optreturn == CC_OR_NOT_FOUND) { warnx("option %s not found", argv[1]); usage(0); exit(1); } /* * Ahh, getopt(3) is a pain. * * This is a gross hack. There really aren't many other good * options (excuse the pun) for parsing options in a situation like * this. getopt is kinda braindead, so you end up having to run * through the options twice, and give each invocation of getopt * the option string for the other invocation. * * You would think that you could just have two groups of options. * The first group would get parsed by the first invocation of * getopt, and the second group would get parsed by the second * invocation of getopt. It doesn't quite work out that way. When * the first invocation of getopt finishes, it leaves optind pointing * to the argument _after_ the first argument in the second group. * So when the second invocation of getopt comes around, it doesn't * recognize the first argument it gets and then bails out. * * A nice alternative would be to have a flag for getopt that says * "just keep parsing arguments even when you encounter an unknown * argument", but there isn't one. So there's no real clean way to * easily parse two sets of arguments without having one invocation * of getopt know about the other. * * Without this hack, the first invocation of getopt would work as * long as the generic arguments are first, but the second invocation * (in the subfunction) would fail in one of two ways. In the case * where you don't set optreset, it would fail because optind may be * pointing to the argument after the one it should be pointing at. * In the case where you do set optreset, and reset optind, it would * fail because getopt would run into the first set of options, which * it doesn't understand. * * All of this would "sort of" work if you could somehow figure out * whether optind had been incremented one option too far. The * mechanics of that, however, are more daunting than just giving * both invocations all of the expect options for either invocation. * * Needless to say, I wouldn't mind if someone invented a better * (non-GPL!) command line parsing interface than getopt. I * wouldn't mind if someone added more knobs to getopt to make it * work better. Who knows, I may talk myself into doing it someday, * if the standards weenies let me. As it is, it just leads to * hackery like this and causes people to avoid it in some cases. * * KDM, September 8th, 1998 */ if (subopt != NULL) sprintf(combinedopt, "%s%s", mainopt, subopt); else sprintf(combinedopt, "%s", mainopt); /* * For these options we do not parse optional device arguments and * we do not open a passthrough device. */ if ((cmdlist == CAM_CMD_RESCAN) || (cmdlist == CAM_CMD_RESET) || (cmdlist == CAM_CMD_DEVTREE) || (cmdlist == CAM_CMD_USAGE) || (cmdlist == CAM_CMD_DEBUG)) devopen = 0; if ((devopen == 1) && (argc > 2 && argv[2][0] != '-')) { char name[30]; int rv; if (isdigit(argv[2][0])) { /* device specified as bus:target[:lun] */ rv = parse_btl(argv[2], &bus, &target, &lun, &arglist); if (rv < 2) errx(1, "numeric device specification must " "be either bus:target, or " "bus:target:lun"); /* default to 0 if lun was not specified */ if ((arglist & CAM_ARG_LUN) == 0) { lun = 0; arglist |= CAM_ARG_LUN; } optstart++; } else { if (cam_get_device(argv[2], name, sizeof name, &unit) == -1) errx(1, "%s", cam_errbuf); device = strdup(name); arglist |= CAM_ARG_DEVICE | CAM_ARG_UNIT; optstart++; } } /* * Start getopt processing at argv[2/3], since we've already * accepted argv[1..2] as the command name, and as a possible * device name. */ optind = optstart; /* * Now we run through the argument list looking for generic * options, and ignoring options that possibly belong to * subfunctions. */ while ((c = getopt(argc, argv, combinedopt))!= -1){ switch(c) { case 'C': retry_count = strtol(optarg, NULL, 0); if (retry_count < 0) errx(1, "retry count %d is < 0", retry_count); arglist |= CAM_ARG_RETRIES; break; case 'E': arglist |= CAM_ARG_ERR_RECOVER; break; case 'n': arglist |= CAM_ARG_DEVICE; tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; device = (char *)strdup(tstr); break; case 'Q': { char *endptr; int table_entry = 0; tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; if (isdigit(*tstr)) { task_attr = strtol(tstr, &endptr, 0); if (*endptr != '\0') { errx(1, "Invalid queue option " "%s", tstr); } } else { size_t table_size; scsi_nv_status status; table_size = sizeof(task_attrs) / sizeof(task_attrs[0]); status = scsi_get_nv(task_attrs, table_size, tstr, &table_entry, SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) task_attr = task_attrs[ table_entry].value; else { errx(1, "%s option %s", (status == SCSI_NV_AMBIGUOUS)? "ambiguous" : "invalid", tstr); } } break; } case 't': timeout = strtol(optarg, NULL, 0); if (timeout < 0) errx(1, "invalid timeout %d", timeout); /* Convert the timeout from seconds to ms */ timeout *= 1000; arglist |= CAM_ARG_TIMEOUT; break; case 'u': arglist |= CAM_ARG_UNIT; unit = strtol(optarg, NULL, 0); break; case 'v': arglist |= CAM_ARG_VERBOSE; break; default: break; } } /* * For most commands we'll want to open the passthrough device * associated with the specified device. In the case of the rescan * commands, we don't use a passthrough device at all, just the * transport layer device. */ if (devopen == 1) { if (((arglist & (CAM_ARG_BUS|CAM_ARG_TARGET)) == 0) && (((arglist & CAM_ARG_DEVICE) == 0) || ((arglist & CAM_ARG_UNIT) == 0))) { errx(1, "subcommand \"%s\" requires a valid device " "identifier", argv[1]); } if ((cam_dev = ((arglist & (CAM_ARG_BUS | CAM_ARG_TARGET))? cam_open_btl(bus, target, lun, O_RDWR, NULL) : cam_open_spec_device(device,unit,O_RDWR,NULL))) == NULL) errx(1,"%s", cam_errbuf); } /* * Reset optind to 2, and reset getopt, so these routines can parse * the arguments again. */ optind = optstart; optreset = 1; switch(cmdlist) { case CAM_CMD_DEVLIST: error = getdevlist(cam_dev); break; case CAM_CMD_HPA: error = atahpa(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; case CAM_CMD_AMA: error = ataama(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; case CAM_CMD_DEVTREE: error = getdevtree(argc, argv, combinedopt); break; case CAM_CMD_DEVTYPE: error = getdevtype(cam_dev); break; case CAM_CMD_REQSENSE: error = requestsense(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout); break; case CAM_CMD_TUR: error = testunitready(cam_dev, task_attr, retry_count, timeout, 0); break; case CAM_CMD_INQUIRY: error = scsidoinquiry(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout); break; case CAM_CMD_IDENTIFY: error = identify(cam_dev, retry_count, timeout); break; case CAM_CMD_STARTSTOP: error = scsistart(cam_dev, arglist & CAM_ARG_START_UNIT, arglist & CAM_ARG_EJECT, task_attr, retry_count, timeout); break; case CAM_CMD_RESCAN: error = dorescan_or_reset(argc, argv, 1); break; case CAM_CMD_RESET: error = dorescan_or_reset(argc, argv, 0); break; case CAM_CMD_READ_DEFECTS: error = readdefects(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout); break; case CAM_CMD_MODE_PAGE: modepage(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout); break; case CAM_CMD_SCSI_CMD: error = scsicmd(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout); break; case CAM_CMD_MMCSD_CMD: error = mmcsdcmd(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_CMD: error = smpcmd(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_RG: error = smpreportgeneral(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_PC: error = smpphycontrol(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_PHYLIST: error = smpphylist(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_MANINFO: error = smpmaninfo(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_DEBUG: error = camdebug(argc, argv, combinedopt); break; case CAM_CMD_TAG: error = tagcontrol(cam_dev, argc, argv, combinedopt); break; case CAM_CMD_RATE: error = ratecontrol(cam_dev, task_attr, retry_count, timeout, argc, argv, combinedopt); break; case CAM_CMD_FORMAT: error = scsiformat(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout); break; case CAM_CMD_REPORTLUNS: error = scsireportluns(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout); break; case CAM_CMD_READCAP: error = scsireadcapacity(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout); break; case CAM_CMD_IDLE: case CAM_CMD_STANDBY: case CAM_CMD_SLEEP: case CAM_CMD_POWER_MODE: error = atapm(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_APM: case CAM_CMD_AAM: error = ataaxm(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SECURITY: error = atasecurity(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; case CAM_CMD_DOWNLOAD_FW: error = fwdownload(cam_dev, argc, argv, combinedopt, arglist & CAM_ARG_VERBOSE, task_attr, retry_count, timeout); break; case CAM_CMD_SANITIZE: error = sanitize(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout); break; case CAM_CMD_PERSIST: error = scsipersist(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout, arglist & CAM_ARG_VERBOSE, arglist & CAM_ARG_ERR_RECOVER); break; case CAM_CMD_ATTRIB: error = scsiattrib(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout, arglist & CAM_ARG_VERBOSE, arglist & CAM_ARG_ERR_RECOVER); break; case CAM_CMD_OPCODES: error = scsiopcodes(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout, arglist & CAM_ARG_VERBOSE); break; case CAM_CMD_REPROBE: error = reprobe(cam_dev); break; case CAM_CMD_ZONE: error = zone(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout, arglist & CAM_ARG_VERBOSE); break; case CAM_CMD_EPC: error = epc(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE); break; case CAM_CMD_TIMESTAMP: error = timestamp(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout, arglist & CAM_ARG_VERBOSE); break; case CAM_CMD_DEPOP: error = depop(cam_dev, argc, argv, combinedopt, task_attr, retry_count, timeout, arglist & CAM_ARG_VERBOSE); break; case CAM_CMD_USAGE: usage(1); break; default: usage(0); error = 1; break; } if (cam_dev != NULL) cam_close_device(cam_dev); exit(error); } diff --git a/sys/cam/cam_ccb.h b/sys/cam/cam_ccb.h index 15e136e8a072..da98b98ba7d1 100644 --- a/sys/cam/cam_ccb.h +++ b/sys/cam/cam_ccb.h @@ -1,1572 +1,1592 @@ /*- * Data structures and definitions for CAM Control Blocks (CCBs). * * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _CAM_CAM_CCB_H #define _CAM_CAM_CCB_H 1 #include #include #include #ifndef _KERNEL #include #endif #include #include #include #include #include /* General allocation length definitions for CCB structures */ #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ #define VUHBALEN 14 /* Vendor Unique HBA length */ #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ #define DEV_IDLEN 16 /* ASCII string len for device names */ #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ /* Struct definitions for CAM control blocks */ /* Common CCB header */ /* CCB memory allocation flags */ typedef enum { CAM_CCB_FROM_UMA = 0x00000001,/* CCB from a periph UMA zone */ } ccb_alloc_flags; /* CAM CCB flags */ typedef enum { CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ CAM_unused1 = 0x00000002, CAM_unused2 = 0x00000004, CAM_NEGOTIATE = 0x00000008,/* * Perform transport negotiation * with this command. */ CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */ CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */ CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */ CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */ CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */ CAM_DATA_MASK = 0x00240010,/* Data type mask */ CAM_unused3 = 0x00000100, CAM_unused4 = 0x00000200, CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ CAM_unused5 = 0x00080000, CAM_unused6 = 0x00100000, CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_unused7 = 0x00800000, /* Phase cognizant mode flags */ CAM_unused8 = 0x01000000, CAM_unused9 = 0x02000000, CAM_unused10 = 0x04000000, CAM_unused11 = 0x08000000, CAM_unused12 = 0x10000000, CAM_unused13 = 0x20000000, CAM_unused14 = 0x40000000, /* Host target Mode flags */ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_unused15 = 0x10000000, CAM_unused16 = 0x20000000, CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */ CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */ } ccb_flags; typedef enum { CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */ CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/ CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */ } ccb_xflags; /* XPT Opcodes for xpt_action */ typedef enum { /* Function code flags are bits greater than 0xff */ XPT_FC_QUEUED = 0x100, /* Non-immediate function code */ XPT_FC_USER_CCB = 0x200, XPT_FC_XPT_ONLY = 0x400, /* Only for the transport layer device */ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, /* Passes through the device queues */ /* Common function commands: 0x00->0x0F */ XPT_NOOP = 0x00, /* Execute Nothing */ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, /* Execute the requested I/O operation */ XPT_GDEV_TYPE = 0x02, /* Get type information for specified device */ XPT_GDEVLIST = 0x03, /* Get a list of peripheral devices */ XPT_PATH_INQ = 0x04, /* Path routing inquiry */ XPT_REL_SIMQ = 0x05, /* Release a frozen device queue */ XPT_SASYNC_CB = 0x06, /* Set Asynchronous Callback Parameters */ XPT_SDEV_TYPE = 0x07, /* Set device type information */ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* (Re)Scan the SCSI Bus */ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, /* Get EDT entries matching the given pattern */ XPT_DEBUG = 0x0a, /* Turn on debugging for a bus, target or lun */ XPT_PATH_STATS = 0x0b, /* Path statistics (error counts, etc.) */ XPT_GDEV_STATS = 0x0c, /* Device statistics (error counts, etc.) */ XPT_DEV_ADVINFO = 0x0e, /* Get/Set Device advanced information */ XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Asynchronous event */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, /* Reset the specified SCSI bus */ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, /* Bus Device Reset the specified SCSI device */ XPT_TERM_IO = 0x13, /* Terminate the I/O process */ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Logical Unit */ XPT_GET_TRAN_SETTINGS = 0x15, /* * Get default/user transfer settings * for the target */ XPT_SET_TRAN_SETTINGS = 0x16, /* * Set transfer rate/width * negotiation settings */ XPT_CALC_GEOMETRY = 0x17, /* * Calculate the geometry parameters for * a device give the sector size and * volume size. */ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, /* Execute the requested ATA I/O operation */ XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */ XPT_SET_SIM_KNOB = 0x19, /* * Set SIM specific knob values. */ XPT_GET_SIM_KNOB = 0x1a, /* * Get SIM specific knob values. */ XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED, /* Serial Management Protocol */ XPT_NVME_IO = 0x1c | XPT_FC_DEV_QUEUED, /* Execute the requested NVMe I/O operation */ XPT_MMC_IO = 0x1d | XPT_FC_DEV_QUEUED, /* Placeholder for MMC / SD / SDIO I/O stuff */ XPT_SCAN_TGT = 0x1e | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Target */ XPT_NVME_ADMIN = 0x1f | XPT_FC_DEV_QUEUED, /* Execute the requested NVMe Admin operation */ /* HBA engine commands 0x20->0x2F */ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, /* HBA engine feature inquiry */ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, /* HBA execute engine request */ /* Target mode commands: 0x30->0x3F */ XPT_EN_LUN = 0x30, /* Enable LUN as a target */ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, /* Execute target I/O request */ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Accept Host Target Mode CDB */ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, /* Continue Host Target I/O Connection */ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event (obsolete) */ XPT_NOTIFY_ACK = 0x35, /* Acknowledgement of event (obsolete) */ XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event */ XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Acknowledgement of event */ XPT_REPROBE_LUN = 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Query device capacity and notify GEOM */ XPT_MMC_SET_TRAN_SETTINGS = 0x40 | XPT_FC_DEV_QUEUED, XPT_MMC_GET_TRAN_SETTINGS = 0x41 | XPT_FC_DEV_QUEUED, /* Vendor Unique codes: 0x80->0x8F */ XPT_VUNIQUE = 0x80 } xpt_opcode; #define XPT_FC_GROUP_MASK 0xF0 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) #define XPT_FC_GROUP_COMMON 0x00 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 #define XPT_FC_GROUP_HBA_ENGINE 0x20 #define XPT_FC_GROUP_TMODE 0x30 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 #define XPT_FC_IS_DEV_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) #define XPT_FC_IS_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) typedef enum { PROTO_UNKNOWN, PROTO_UNSPECIFIED, PROTO_SCSI, /* Small Computer System Interface */ PROTO_ATA, /* AT Attachment */ PROTO_ATAPI, /* AT Attachment Packetized Interface */ PROTO_SATAPM, /* SATA Port Multiplier */ PROTO_SEMB, /* SATA Enclosure Management Bridge */ PROTO_NVME, /* NVME */ PROTO_MMCSD, /* MMC, SD, SDIO */ } cam_proto; typedef enum { XPORT_UNKNOWN, XPORT_UNSPECIFIED, XPORT_SPI, /* SCSI Parallel Interface */ XPORT_FC, /* Fiber Channel */ XPORT_SSA, /* Serial Storage Architecture */ XPORT_USB, /* Universal Serial Bus */ XPORT_PPB, /* Parallel Port Bus */ XPORT_ATA, /* AT Attachment */ XPORT_SAS, /* Serial Attached SCSI */ XPORT_SATA, /* Serial AT Attachment */ XPORT_ISCSI, /* iSCSI */ XPORT_SRP, /* SCSI RDMA Protocol */ XPORT_NVME, /* NVMe over PCIe */ XPORT_MMCSD, /* MMC, SD, SDIO card */ XPORT_NVMF, /* NVMe over Fabrics */ + XPORT_UFSHCI, /* Universal Flash Storage Host Interface */ } cam_xport; #define XPORT_IS_NVME(t) ((t) == XPORT_NVME || (t) == XPORT_NVMF) #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA) #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \ (t) != XPORT_UNSPECIFIED && \ !XPORT_IS_ATA(t) && !XPORT_IS_NVME(t)) #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \ XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \ XPORT_IS_NVME(t) ? DEVSTAT_TYPE_IF_NVME : \ DEVSTAT_TYPE_IF_OTHER) #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) #define PROTO_VERSION_UNSPECIFIED UINT_MAX #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) #define XPORT_VERSION_UNSPECIFIED UINT_MAX typedef union { LIST_ENTRY(ccb_hdr) le; SLIST_ENTRY(ccb_hdr) sle; TAILQ_ENTRY(ccb_hdr) tqe; STAILQ_ENTRY(ccb_hdr) stqe; } camq_entry; typedef union { void *ptr; u_long field; uint8_t bytes[sizeof(uintptr_t)]; } ccb_priv_entry; typedef union { ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; uint8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_ppriv_area; typedef union { ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; uint8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_spriv_area; typedef struct { struct timeval *etime; uintptr_t sim_data; uintptr_t periph_data; } ccb_qos_area; struct ccb_hdr { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ #if BYTE_ORDER == LITTLE_ENDIAN uint16_t retry_count; uint16_t alloc_flags; /* ccb_alloc_flags */ #else uint16_t alloc_flags; /* ccb_alloc_flags */ uint16_t retry_count; #endif void (*cbfcnp)(struct cam_periph *, union ccb *); /* Callback on completion function */ xpt_opcode func_code; /* XPT function code */ uint32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ lun_id_t target_lun; /* Target LUN number */ uint32_t flags; /* ccb_flags */ uint32_t xflags; /* Extended flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; ccb_qos_area qos; uint32_t timeout; /* Hard timeout value in mseconds */ struct timeval softtimeout; /* Soft timeout value in sec + usec */ }; /* Get Device Information CCB */ struct ccb_getdev { struct ccb_hdr ccb_h; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; uint8_t serial_num[252]; uint8_t inq_flags; uint8_t serial_num_len; void *padding[2]; }; /* Device Statistics CCB */ struct ccb_getdevstats { struct ccb_hdr ccb_h; int dev_openings; /* Space left for more work on device*/ int dev_active; /* Transactions running on the device */ int allocated; /* CCBs allocated for the device */ int queued; /* CCBs queued to be sent to the device */ int held; /* * CCBs held by peripheral drivers * for this device */ int maxtags; /* * Boundary conditions for number of * tagged operations */ int mintags; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { CAM_GDEVLIST_LAST_DEVICE, CAM_GDEVLIST_LIST_CHANGED, CAM_GDEVLIST_MORE_DEVS, CAM_GDEVLIST_ERROR } ccb_getdevlist_status_e; struct ccb_getdevlist { struct ccb_hdr ccb_h; char periph_name[DEV_IDLEN]; uint32_t unit_number; unsigned int generation; uint32_t index; ccb_getdevlist_status_e status; }; typedef enum { PERIPH_MATCH_ANY = 0x000, PERIPH_MATCH_PATH = 0x001, PERIPH_MATCH_TARGET = 0x002, PERIPH_MATCH_LUN = 0x004, PERIPH_MATCH_NAME = 0x008, PERIPH_MATCH_UNIT = 0x010, } periph_pattern_flags; struct periph_match_pattern { char periph_name[DEV_IDLEN]; uint32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; periph_pattern_flags flags; }; typedef enum { DEV_MATCH_ANY = 0x000, DEV_MATCH_PATH = 0x001, DEV_MATCH_TARGET = 0x002, DEV_MATCH_LUN = 0x004, DEV_MATCH_INQUIRY = 0x008, DEV_MATCH_DEVID = 0x010, } dev_pattern_flags; struct device_id_match_pattern { uint8_t id_len; uint8_t id[256]; }; struct device_match_pattern { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; dev_pattern_flags flags; union { struct scsi_static_inquiry_pattern inq_pat; struct device_id_match_pattern devid_pat; } data; }; typedef enum { BUS_MATCH_ANY = 0x000, BUS_MATCH_PATH = 0x001, BUS_MATCH_NAME = 0x002, BUS_MATCH_UNIT = 0x004, BUS_MATCH_BUS_ID = 0x008, } bus_pattern_flags; struct bus_match_pattern { path_id_t path_id; char dev_name[DEV_IDLEN]; uint32_t unit_number; uint32_t bus_id; bus_pattern_flags flags; }; union match_pattern { struct periph_match_pattern periph_pattern; struct device_match_pattern device_pattern; struct bus_match_pattern bus_pattern; }; typedef enum { DEV_MATCH_PERIPH, DEV_MATCH_DEVICE, DEV_MATCH_BUS } dev_match_type; struct dev_match_pattern { dev_match_type type; union match_pattern pattern; }; struct periph_match_result { char periph_name[DEV_IDLEN]; uint32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; }; typedef enum { DEV_RESULT_NOFLAG = 0x00, DEV_RESULT_UNCONFIGURED = 0x01 } dev_result_flags; struct device_match_result { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; }; struct bus_match_result { path_id_t path_id; char dev_name[DEV_IDLEN]; uint32_t unit_number; uint32_t bus_id; }; union match_result { struct periph_match_result periph_result; struct device_match_result device_result; struct bus_match_result bus_result; }; struct dev_match_result { dev_match_type type; union match_result result; }; typedef enum { CAM_DEV_MATCH_LAST, CAM_DEV_MATCH_MORE, CAM_DEV_MATCH_LIST_CHANGED, CAM_DEV_MATCH_SIZE_ERROR, CAM_DEV_MATCH_ERROR } ccb_dev_match_status; typedef enum { CAM_DEV_POS_NONE = 0x000, CAM_DEV_POS_BUS = 0x001, CAM_DEV_POS_TARGET = 0x002, CAM_DEV_POS_DEVICE = 0x004, CAM_DEV_POS_PERIPH = 0x008, CAM_DEV_POS_PDPTR = 0x010, CAM_DEV_POS_TYPEMASK = 0xf00, CAM_DEV_POS_EDT = 0x100, CAM_DEV_POS_PDRV = 0x200 } dev_pos_type; struct ccb_dm_cookie { void *bus; void *target; void *device; void *periph; void *pdrv; }; struct ccb_dev_position { u_int generations[4]; #define CAM_BUS_GENERATION 0x00 #define CAM_TARGET_GENERATION 0x01 #define CAM_DEV_GENERATION 0x02 #define CAM_PERIPH_GENERATION 0x03 dev_pos_type position_type; struct ccb_dm_cookie cookie; }; struct ccb_dev_match { struct ccb_hdr ccb_h; ccb_dev_match_status status; uint32_t num_patterns; uint32_t pattern_buf_len; struct dev_match_pattern *patterns; uint32_t num_matches; uint32_t match_buf_len; struct dev_match_result *matches; struct ccb_dev_position pos; }; /* * Definitions for the path inquiry CCB fields. */ #define CAM_VERSION 0x1a /* Hex value for current version */ typedef enum { PI_MDP_ABLE = 0x80, /* Supports MDP message */ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ PI_SATAPM = 0x04, /* Supports SATA PM */ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ } pi_inqflag; typedef enum { PIT_PROCESSOR = 0x80, /* Target mode processor mode */ PIT_PHASE = 0x40, /* Target mode phase cog. mode */ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ PIT_GRP_6 = 0x08, /* Group 6 commands supported */ PIT_GRP_7 = 0x04 /* Group 7 commands supported */ } pi_tmflag; typedef enum { PIM_ATA_EXT = 0x200,/* ATA requests can understand ata_ext requests */ PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */ PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */ PIM_UNMAPPED = 0x02, PIM_NOSCAN = 0x01 /* SIM does its own scanning */ } pi_miscflag; /* Path Inquiry CCB */ struct ccb_pathinq_settings_spi { uint8_t ppr_options; }; struct ccb_pathinq_settings_fc { uint64_t wwnn; /* world wide node name */ uint64_t wwpn; /* world wide port name */ uint32_t port; /* 24 bit port id, if known */ uint32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_sas { uint32_t bitrate; /* Mbps */ }; #define NVME_DEV_NAME_LEN 52 struct ccb_pathinq_settings_nvme { uint32_t nsid; /* Namespace ID for this path */ uint32_t domain; uint8_t bus; uint8_t slot; uint8_t function; uint8_t extra; char dev_name[NVME_DEV_NAME_LEN]; /* nvme controller dev name for this device */ }; _Static_assert(sizeof(struct ccb_pathinq_settings_nvme) == 64, "ccb_pathinq_settings_nvme too big"); struct ccb_pathinq_settings_nvmf { uint32_t nsid; /* Namespace ID for this path */ uint8_t trtype; char dev_name[NVME_DEV_NAME_LEN]; /* nvme controller dev name for this device */ }; #define PATHINQ_SETTINGS_SIZE 128 struct ccb_pathinq { struct ccb_hdr ccb_h; uint8_t version_num; /* Version number for the SIM/HBA */ uint8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ uint16_t target_sprt; /* Flags for target mode support */ uint32_t hba_misc; /* Misc HBA features */ uint16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ uint8_t vuhba_flags[VUHBALEN]; uint32_t max_target; /* Maximum supported Target */ uint32_t max_lun; /* Maximum supported Lun */ uint32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ uint32_t unit_number; /* Unit number for SIM */ uint32_t bus_id; /* Bus ID for SIM */ uint32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; struct ccb_pathinq_settings_nvme nvme; struct ccb_pathinq_settings_nvmf nvmf; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ uint16_t hba_vendor; /* HBA vendor ID */ uint16_t hba_device; /* HBA device ID */ uint16_t hba_subvendor; /* HBA subvendor ID */ uint16_t hba_subdevice; /* HBA subdevice ID */ }; /* Path Statistics CCB */ struct ccb_pathstats { struct ccb_hdr ccb_h; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { SMP_FLAG_NONE = 0x00, SMP_FLAG_REQ_SG = 0x01, SMP_FLAG_RSP_SG = 0x02 } ccb_smp_pass_flags; /* * Serial Management Protocol CCB * XXX Currently the semantics for this CCB are that it is executed either * by the addressed device, or that device's parent (i.e. an expander for * any device on an expander) if the addressed device doesn't support SMP. * Later, once we have the ability to probe SMP-only devices and put them * in CAM's topology, the CCB will only be executed by the addressed device * if possible. */ struct ccb_smpio { struct ccb_hdr ccb_h; uint8_t *smp_request; int smp_request_len; uint16_t smp_request_sglist_cnt; uint8_t *smp_response; int smp_response_len; uint16_t smp_response_sglist_cnt; ccb_smp_pass_flags flags; }; typedef union { uint8_t *sense_ptr; /* * Pointer to storage * for sense information */ /* Storage Area for sense information */ struct scsi_sense_data sense_buf; } sense_t; typedef union { uint8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ /* Area for the CDB send */ uint8_t cdb_bytes[IOCDBLEN]; } cdb_t; /* * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO * function codes. */ struct ccb_scsiio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ uint8_t *req_map; /* Ptr to mapping info */ uint8_t *data_ptr; /* Ptr to the data buf/SG list */ uint32_t dxfer_len; /* Data transfer length */ /* Autosense storage */ struct scsi_sense_data sense_data; uint8_t sense_len; /* Number of bytes to autosense */ uint8_t cdb_len; /* Number of bytes for the CDB */ uint16_t sglist_cnt; /* Number of SG list entries */ uint8_t scsi_status; /* Returned SCSI status */ uint8_t sense_resid; /* Autosense resid length: 2's comp */ uint32_t resid; /* Transfer residual length: 2's comp */ cdb_t cdb_io; /* Union for CDB bytes/pointer */ uint8_t *msg_ptr; /* Pointer to the message buffer */ uint16_t msg_len; /* Number of bytes for the Message */ uint8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 uint8_t priority; /* Command priority for SIMPLE tag */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) struct bio *bio; /* Associated bio */ #endif }; static __inline uint8_t * scsiio_cdb_ptr(struct ccb_scsiio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* * ATA I/O Request CCB used for the XPT_ATA_IO function code. */ struct ccb_ataio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct ata_cmd cmd; /* ATA command register set */ struct ata_res res; /* ATA result register set */ uint8_t *data_ptr; /* Ptr to the data buf/SG list */ uint32_t dxfer_len; /* Data transfer length */ uint32_t resid; /* Transfer residual length: 2's comp */ uint8_t ata_flags; /* Flags for the rest of the buffer */ #define ATA_FLAG_AUX 0x1 #define ATA_FLAG_ICC 0x2 uint8_t icc; /* Isochronous Command Completion */ uint32_t aux; uint32_t unused; }; /* * MMC I/O Request CCB used for the XPT_MMC_IO function code. */ struct ccb_mmcio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct mmc_command cmd; struct mmc_command stop; }; struct ccb_accept_tio { struct ccb_hdr ccb_h; cdb_t cdb_io; /* Union for CDB bytes/pointer */ uint8_t cdb_len; /* Number of bytes for the CDB */ uint8_t tag_action; /* What to do for tag queueing */ uint8_t sense_len; /* Number of bytes of Sense Data */ uint8_t priority; /* Command priority for SIMPLE tag */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ struct scsi_sense_data sense_data; }; static __inline uint8_t * atio_cdb_ptr(struct ccb_accept_tio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* Release SIM Queue */ struct ccb_relsim { struct ccb_hdr ccb_h; uint32_t release_flags; #define RELSIM_ADJUST_OPENINGS 0x01 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 uint32_t openings; uint32_t release_timeout; /* Abstract argument. */ uint32_t qfrozen_cnt; }; /* * NVMe I/O Request CCB used for the XPT_NVME_IO and XPT_NVME_ADMIN function codes. */ struct ccb_nvmeio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct nvme_command cmd; /* NVME command, per NVME standard */ struct nvme_completion cpl; /* NVME completion, per NVME standard */ uint8_t *data_ptr; /* Ptr to the data buf/SG list */ uint32_t dxfer_len; /* Data transfer length */ uint16_t sglist_cnt; /* Number of SG list entries */ uint16_t unused; /* padding for removed uint32_t */ }; /* * Definitions for the asynchronous callback CCB fields. */ typedef enum { AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */ AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */ AC_CONTRACT = 0x1000,/* A contractual callback */ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ AC_LOST_DEVICE = 0x100,/* A device went away */ AC_FOUND_DEVICE = 0x080,/* A new device was found */ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ } ac_code; typedef void ac_callback_t (void *softc, uint32_t code, struct cam_path *path, void *args); /* * Generic Asynchronous callbacks. * * Generic arguments passed bac which are then interpreted between a per-system * contract number. */ #define AC_CONTRACT_DATA_MAX (128 - sizeof (uint64_t)) struct ac_contract { uint64_t contract_number; uint8_t contract_data[AC_CONTRACT_DATA_MAX]; }; #define AC_CONTRACT_DEV_CHG 1 struct ac_device_changed { uint64_t wwpn; uint32_t port; target_id_t target; uint8_t arrived; }; /* Set Asynchronous Callback CCB */ struct ccb_setasync { struct ccb_hdr ccb_h; uint32_t event_enable; /* Async Event enables */ ac_callback_t *callback; void *callback_arg; }; /* Set Device Type CCB */ struct ccb_setdev { struct ccb_hdr ccb_h; uint8_t dev_type; /* Value for dev type field in EDT */ }; /* SCSI Control Functions */ /* Abort XPT request CCB */ struct ccb_abort { struct ccb_hdr ccb_h; union ccb *abort_ccb; /* Pointer to CCB to abort */ }; /* Reset SCSI Bus CCB */ struct ccb_resetbus { struct ccb_hdr ccb_h; }; /* Reset SCSI Device CCB */ struct ccb_resetdev { struct ccb_hdr ccb_h; }; /* Terminate I/O Process Request CCB */ struct ccb_termio { struct ccb_hdr ccb_h; union ccb *termio_ccb; /* Pointer to CCB to terminate */ }; typedef enum { CTS_TYPE_CURRENT_SETTINGS, CTS_TYPE_USER_SETTINGS } cts_type; struct ccb_trans_settings_scsi { u_int valid; /* Which fields to honor */ #define CTS_SCSI_VALID_TQ 0x01 u_int flags; #define CTS_SCSI_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_ata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_TQ 0x01 u_int flags; #define CTS_ATA_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_spi { u_int valid; /* Which fields to honor */ #define CTS_SPI_VALID_SYNC_RATE 0x01 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 #define CTS_SPI_VALID_BUS_WIDTH 0x04 #define CTS_SPI_VALID_DISC 0x08 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 u_int flags; #define CTS_SPI_FLAGS_DISC_ENB 0x01 u_int sync_period; u_int sync_offset; u_int bus_width; u_int ppr_options; }; struct ccb_trans_settings_fc { u_int valid; /* Which fields to honor */ #define CTS_FC_VALID_WWNN 0x8000 #define CTS_FC_VALID_WWPN 0x4000 #define CTS_FC_VALID_PORT 0x2000 #define CTS_FC_VALID_SPEED 0x1000 uint64_t wwnn; /* world wide node name */ uint64_t wwpn; /* world wide port name */ uint32_t port; /* 24 bit port id, if known */ uint32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sas { u_int valid; /* Which fields to honor */ #define CTS_SAS_VALID_SPEED 0x1000 uint32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_pata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_MODE 0x01 #define CTS_ATA_VALID_BYTECOUNT 0x02 #define CTS_ATA_VALID_ATAPI 0x20 #define CTS_ATA_VALID_CAPS 0x40 int mode; /* Mode */ u_int bytecount; /* Length of PIO transaction */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_ATA_CAPS_H 0x0000ffff #define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */ #define CTS_ATA_CAPS_D 0xffff0000 }; struct ccb_trans_settings_sata { u_int valid; /* Which fields to honor */ #define CTS_SATA_VALID_MODE 0x01 #define CTS_SATA_VALID_BYTECOUNT 0x02 #define CTS_SATA_VALID_REVISION 0x04 #define CTS_SATA_VALID_PM 0x08 #define CTS_SATA_VALID_TAGS 0x10 #define CTS_SATA_VALID_ATAPI 0x20 #define CTS_SATA_VALID_CAPS 0x40 int mode; /* Legacy PATA mode */ u_int bytecount; /* Length of PIO transaction */ int revision; /* SATA revision */ u_int pm_present; /* PM is present (XPT->SIM) */ u_int tags; /* Number of allowed tags */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_SATA_CAPS_H 0x0000ffff #define CTS_SATA_CAPS_H_PMREQ 0x00000001 #define CTS_SATA_CAPS_H_APST 0x00000002 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */ #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */ #define CTS_SATA_CAPS_D 0xffff0000 #define CTS_SATA_CAPS_D_PMREQ 0x00010000 #define CTS_SATA_CAPS_D_APST 0x00020000 }; struct ccb_trans_settings_nvme { u_int valid; /* Which fields to honor */ #define CTS_NVME_VALID_SPEC 0x01 #define CTS_NVME_VALID_CAPS 0x02 #define CTS_NVME_VALID_LINK 0x04 uint32_t spec; /* NVMe spec implemented -- same as vs register */ uint32_t max_xfer; /* Max transfer size (0 -> unlimited */ uint32_t caps; uint8_t lanes; /* Number of PCIe lanes */ uint8_t speed; /* PCIe generation for each lane */ uint8_t max_lanes; /* Number of PCIe lanes */ uint8_t max_speed; /* PCIe generation for each lane */ }; struct ccb_trans_settings_nvmf { u_int valid; /* Which fields to honor */ #define CTS_NVMF_VALID_TRTYPE 0x01 uint8_t trtype; }; +struct ccb_trans_settings_ufshci +{ + u_int valid; /* Which fields to honor */ + /* + * Ensure the validity of the information for the Unipro link + * (GEAR, SPEED, LANE) + */ +#define CTS_UFSHCI_VALID_LINK 0x01 + uint32_t speed; + uint8_t hs_gear; /* High Speed Gear (G1, G2, G3...) */ + uint8_t tx_lanes; + uint8_t rx_lanes; + uint8_t max_hs_gear; /* Maximum HS Gear */ + uint8_t max_tx_lanes; + uint8_t max_rx_lanes; +}; + + #include struct ccb_trans_settings_mmc { struct mmc_ios ios; #define MMC_CLK (1 << 1) #define MMC_VDD (1 << 2) #define MMC_CS (1 << 3) #define MMC_BW (1 << 4) #define MMC_PM (1 << 5) #define MMC_BT (1 << 6) #define MMC_BM (1 << 7) #define MMC_VCCQ (1 << 8) uint32_t ios_valid; /* The folowing is used only for GET_TRAN_SETTINGS */ uint32_t host_ocr; int host_f_min; int host_f_max; /* Copied from sys/dev/mmc/bridge.h */ #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can do 4-bit data transfers */ #define MMC_CAP_8_BIT_DATA (1 << 1) /* Can do 8-bit data transfers */ #define MMC_CAP_HSPEED (1 << 2) /* Can do High Speed transfers */ #define MMC_CAP_BOOT_NOACC (1 << 4) /* Cannot access boot partitions */ #define MMC_CAP_WAIT_WHILE_BUSY (1 << 5) /* Host waits for busy responses */ #define MMC_CAP_UHS_SDR12 (1 << 6) /* Can do UHS SDR12 */ #define MMC_CAP_UHS_SDR25 (1 << 7) /* Can do UHS SDR25 */ #define MMC_CAP_UHS_SDR50 (1 << 8) /* Can do UHS SDR50 */ #define MMC_CAP_UHS_SDR104 (1 << 9) /* Can do UHS SDR104 */ #define MMC_CAP_UHS_DDR50 (1 << 10) /* Can do UHS DDR50 */ #define MMC_CAP_MMC_DDR52_120 (1 << 11) /* Can do eMMC DDR52 at 1.2 V */ #define MMC_CAP_MMC_DDR52_180 (1 << 12) /* Can do eMMC DDR52 at 1.8 V */ #define MMC_CAP_MMC_DDR52 (MMC_CAP_MMC_DDR52_120 | MMC_CAP_MMC_DDR52_180) #define MMC_CAP_MMC_HS200_120 (1 << 13) /* Can do eMMC HS200 at 1.2 V */ #define MMC_CAP_MMC_HS200_180 (1 << 14) /* Can do eMMC HS200 at 1.8 V */ #define MMC_CAP_MMC_HS200 (MMC_CAP_MMC_HS200_120| MMC_CAP_MMC_HS200_180) #define MMC_CAP_MMC_HS400_120 (1 << 15) /* Can do eMMC HS400 at 1.2 V */ #define MMC_CAP_MMC_HS400_180 (1 << 16) /* Can do eMMC HS400 at 1.8 V */ #define MMC_CAP_MMC_HS400 (MMC_CAP_MMC_HS400_120 | MMC_CAP_MMC_HS400_180) #define MMC_CAP_MMC_HSX00_120 (MMC_CAP_MMC_HS200_120 | MMC_CAP_MMC_HS400_120) #define MMC_CAP_MMC_ENH_STROBE (1 << 17) /* Can do eMMC Enhanced Strobe */ #define MMC_CAP_SIGNALING_120 (1 << 18) /* Can do signaling at 1.2 V */ #define MMC_CAP_SIGNALING_180 (1 << 19) /* Can do signaling at 1.8 V */ #define MMC_CAP_SIGNALING_330 (1 << 20) /* Can do signaling at 3.3 V */ #define MMC_CAP_DRIVER_TYPE_A (1 << 21) /* Can do Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 22) /* Can do Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 23) /* Can do Driver Type D */ uint32_t host_caps; uint32_t host_max_data; }; /* Get/Set transfer rate/width/disconnection/tag queueing settings */ struct ccb_trans_settings { struct ccb_hdr ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; struct ccb_trans_settings_nvme nvme; struct ccb_trans_settings_mmc mmc; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; struct ccb_trans_settings_nvme nvme; struct ccb_trans_settings_nvmf nvmf; + struct ccb_trans_settings_ufshci ufshci; } xport_specific; }; /* * Calculate the geometry parameters for a device * give the block size and volume size in blocks. */ struct ccb_calc_geometry { struct ccb_hdr ccb_h; uint32_t block_size; uint64_t volume_size; uint32_t cylinders; uint8_t heads; uint8_t secs_per_track; }; /* * Set or get SIM (and transport) specific knobs */ #define KNOB_VALID_ADDRESS 0x1 #define KNOB_VALID_ROLE 0x2 #define KNOB_ROLE_NONE 0x0 #define KNOB_ROLE_INITIATOR 0x1 #define KNOB_ROLE_TARGET 0x2 #define KNOB_ROLE_BOTH 0x3 struct ccb_sim_knob_settings_spi { u_int valid; u_int initiator_id; u_int role; }; struct ccb_sim_knob_settings_fc { u_int valid; uint64_t wwnn; /* world wide node name */ uint64_t wwpn; /* world wide port name */ u_int role; }; struct ccb_sim_knob_settings_sas { u_int valid; uint64_t wwnn; /* world wide node name */ u_int role; }; #define KNOB_SETTINGS_SIZE 128 struct ccb_sim_knob { struct ccb_hdr ccb_h; union { u_int valid; /* Which fields to honor */ struct ccb_sim_knob_settings_spi spi; struct ccb_sim_knob_settings_fc fc; struct ccb_sim_knob_settings_sas sas; char pad[KNOB_SETTINGS_SIZE]; } xport_specific; }; /* * Rescan the given bus, or bus/target/lun */ struct ccb_rescan { struct ccb_hdr ccb_h; cam_flags flags; }; /* * Turn on debugging for the given bus, bus/target, or bus/target/lun. */ struct ccb_debug { struct ccb_hdr ccb_h; cam_debug_flags flags; }; /* Target mode structures. */ struct ccb_en_lun { struct ccb_hdr ccb_h; uint16_t grp6_len; /* Group 6 VU CDB length */ uint16_t grp7_len; /* Group 7 VU CDB length */ uint8_t enable; }; /* old, barely used immediate notify, binary compatibility */ struct ccb_immed_notify { struct ccb_hdr ccb_h; struct scsi_sense_data sense_data; uint8_t sense_len; /* Number of bytes in sense buffer */ uint8_t initiator_id; /* Id of initiator that selected */ uint8_t message_args[7]; /* Message Arguments */ }; struct ccb_notify_ack { struct ccb_hdr ccb_h; uint16_t seq_id; /* Sequence identifier */ uint8_t event; /* Event flags */ }; struct ccb_immediate_notify { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tag for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; struct ccb_notify_acknowledge { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tar for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Response information */ /* * Lower byte of arg is one of RESPONSE CODE values defined below * (subset of response codes from SPL-4 and FCP-4 specifications), * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION. */ #define CAM_RSP_TMF_COMPLETE 0x00 #define CAM_RSP_TMF_REJECTED 0x04 #define CAM_RSP_TMF_FAILED 0x05 #define CAM_RSP_TMF_SUCCEEDED 0x08 #define CAM_RSP_TMF_INCORRECT_LUN 0x09 }; /* HBA engine structures. */ typedef enum { EIT_BUFFER, /* Engine type: buffer memory */ EIT_LOSSLESS, /* Engine type: lossless compression */ EIT_LOSSY, /* Engine type: lossy compression */ EIT_ENCRYPT /* Engine type: encryption */ } ei_type; typedef enum { EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ } ei_algo; struct ccb_eng_inq { struct ccb_hdr ccb_h; uint16_t eng_num; /* The engine number for this inquiry */ ei_type eng_type; /* Returned engine type */ ei_algo eng_algo; /* Returned engine algorithm type */ uint32_t eng_memeory; /* Returned engine memory size */ }; struct ccb_eng_exec { /* This structure must match SCSIIO size */ struct ccb_hdr ccb_h; uint8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ uint8_t *req_map; /* Ptr for mapping info on the req. */ uint8_t *data_ptr; /* Pointer to the data buf/SG list */ uint32_t dxfer_len; /* Data transfer length */ uint8_t *engdata_ptr; /* Pointer to the engine buffer data */ uint16_t sglist_cnt; /* Num of scatter gather list entries */ uint32_t dmax_len; /* Destination data maximum length */ uint32_t dest_len; /* Destination data length */ int32_t src_resid; /* Source residual length: 2's comp */ uint32_t timeout; /* Timeout value */ uint16_t eng_num; /* Engine number for this request */ uint16_t vu_flags; /* Vendor Unique flags */ }; /* * Definitions for the timeout field in the SCSI I/O CCB. */ #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ #define CAM_SUCCESS 0 /* For signaling general success */ #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ /* * CCB for working with advanced device information. This operates in a fashion * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer * type requested, and provide a buffer size/buffer to write to. If the * buffer is too small, provsiz will be larger than bufsiz. */ struct ccb_dev_advinfo { struct ccb_hdr ccb_h; uint32_t flags; #define CDAI_FLAG_NONE 0x0 /* No flags set */ #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */ uint32_t buftype; /* IN: Type of data being requested */ /* NB: buftype is interpreted on a per-transport basis */ #define CDAI_TYPE_SCSI_DEVID 1 #define CDAI_TYPE_SERIAL_NUM 2 #define CDAI_TYPE_PHYS_PATH 3 #define CDAI_TYPE_RCAPLONG 4 #define CDAI_TYPE_EXT_INQ 5 #define CDAI_TYPE_NVME_CNTRL 6 /* NVMe Identify Controller data */ #define CDAI_TYPE_NVME_NS 7 /* NVMe Identify Namespace data */ #define CDAI_TYPE_MMC_PARAMS 8 /* MMC/SD ident */ off_t bufsiz; /* IN: Size of external buffer */ #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */ off_t provsiz; /* OUT: Size required/used */ uint8_t *buf; /* IN/OUT: Buffer for requested data */ }; /* * CCB for sending async events */ struct ccb_async { struct ccb_hdr ccb_h; uint32_t async_code; off_t async_arg_size; void *async_arg_ptr; }; /* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc * and the argument to xpt_ccb_free. */ union ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_getdev cgd; struct ccb_getdevlist cgdl; struct ccb_pathinq cpi; struct ccb_relsim crs; struct ccb_setasync csa; struct ccb_setdev csd; struct ccb_pathstats cpis; struct ccb_getdevstats cgds; struct ccb_dev_match cdm; struct ccb_trans_settings cts; struct ccb_calc_geometry ccg; struct ccb_sim_knob knob; struct ccb_abort cab; struct ccb_resetbus crb; struct ccb_resetdev crd; struct ccb_termio tio; struct ccb_accept_tio atio; struct ccb_scsiio ctio; struct ccb_en_lun cel; struct ccb_immed_notify cin; struct ccb_notify_ack cna; struct ccb_immediate_notify cin1; struct ccb_notify_acknowledge cna2; struct ccb_eng_inq cei; struct ccb_eng_exec cee; struct ccb_smpio smpio; struct ccb_rescan crcn; struct ccb_debug cdbg; struct ccb_ataio ataio; struct ccb_dev_advinfo cdai; struct ccb_async casync; struct ccb_nvmeio nvmeio; struct ccb_mmcio mmcio; }; #define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp) \ bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h), \ sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h)) __BEGIN_DECLS static __inline void cam_fill_csio(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t tag_action, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint8_t cdb_len, uint32_t timeout) { csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; csio->priority = 0; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) csio->bio = NULL; #endif } static __inline void cam_fill_ctio(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout) { csio->ccb_h.func_code = XPT_CONT_TARGET_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->scsi_status = scsi_status; csio->tag_action = tag_action; csio->priority = 0; csio->tag_id = tag_id; csio->init_id = init_id; } static __inline void cam_fill_ataio(struct ccb_ataio *ataio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, u_int tag_action __unused, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout) { ataio->ccb_h.func_code = XPT_ATA_IO; ataio->ccb_h.flags = flags; ataio->ccb_h.retry_count = retries; ataio->ccb_h.cbfcnp = cbfcnp; ataio->ccb_h.timeout = timeout; ataio->data_ptr = data_ptr; ataio->dxfer_len = dxfer_len; ataio->ata_flags = 0; } static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout) { #ifdef _KERNEL KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH, ("direction != CAM_DIR_BOTH")); KASSERT((smp_request != NULL) && (smp_response != NULL), ("need valid request and response buffers")); KASSERT((smp_request_len != 0) && (smp_response_len != 0), ("need non-zero request and response lengths")); #endif /*_KERNEL*/ smpio->ccb_h.func_code = XPT_SMP_IO; smpio->ccb_h.flags = flags; smpio->ccb_h.retry_count = retries; smpio->ccb_h.cbfcnp = cbfcnp; smpio->ccb_h.timeout = timeout; smpio->smp_request = smp_request; smpio->smp_request_len = smp_request_len; smpio->smp_response = smp_response; smpio->smp_response_len = smp_response_len; } static __inline void cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags, struct mmc_data *mmc_d, uint32_t timeout) { mmcio->ccb_h.func_code = XPT_MMC_IO; mmcio->ccb_h.flags = flags; mmcio->ccb_h.retry_count = retries; mmcio->ccb_h.cbfcnp = cbfcnp; mmcio->ccb_h.timeout = timeout; mmcio->cmd.opcode = mmc_opcode; mmcio->cmd.arg = mmc_arg; mmcio->cmd.flags = mmc_flags; mmcio->stop.opcode = 0; mmcio->stop.arg = 0; mmcio->stop.flags = 0; if (mmc_d != NULL) { mmcio->cmd.data = mmc_d; } else mmcio->cmd.data = NULL; mmcio->cmd.resp[0] = 0; mmcio->cmd.resp[1] = 0; mmcio->cmd.resp[2] = 0; mmcio->cmd.resp[3] = 0; } static __inline void cam_set_ccbstatus(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static __inline cam_status cam_ccb_status(union ccb *ccb) { return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK)); } static inline bool cam_ccb_success(union ccb *ccb) { return (cam_ccb_status(ccb) == CAM_REQ_CMP); } void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); static __inline void cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout) { nvmeio->ccb_h.func_code = XPT_NVME_IO; nvmeio->ccb_h.flags = flags; nvmeio->ccb_h.retry_count = retries; nvmeio->ccb_h.cbfcnp = cbfcnp; nvmeio->ccb_h.timeout = timeout; nvmeio->data_ptr = data_ptr; nvmeio->dxfer_len = dxfer_len; } static __inline void cam_fill_nvmeadmin(struct ccb_nvmeio *nvmeio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout) { nvmeio->ccb_h.func_code = XPT_NVME_ADMIN; nvmeio->ccb_h.flags = flags; nvmeio->ccb_h.retry_count = retries; nvmeio->ccb_h.cbfcnp = cbfcnp; nvmeio->ccb_h.timeout = timeout; nvmeio->data_ptr = data_ptr; nvmeio->dxfer_len = dxfer_len; } __END_DECLS #endif /* _CAM_CAM_CCB_H */ diff --git a/sys/cam/scsi/scsi_xpt.c b/sys/cam/scsi/scsi_xpt.c index 2bb59cb2d92b..439dd2050a95 100644 --- a/sys/cam/scsi/scsi_xpt.c +++ b/sys/cam/scsi/scsi_xpt.c @@ -1,3168 +1,3169 @@ /*- * Implementation of the SCSI Transport * * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct scsi_quirk_entry { struct scsi_inquiry_pattern inq_pat; uint8_t quirks; #define CAM_QUIRK_NOLUNS 0x01 #define CAM_QUIRK_NOVPDS 0x02 #define CAM_QUIRK_HILUNS 0x04 #define CAM_QUIRK_NOHILUNS 0x08 #define CAM_QUIRK_NORPTLUNS 0x10 u_int mintags; u_int maxtags; }; #define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk)) static int cam_srch_hi = 0; SYSCTL_INT(_kern_cam, OID_AUTO, cam_srch_hi, CTLFLAG_RWTUN, &cam_srch_hi, 0, "Search above LUN 7 for SCSI3 and greater devices"); #define CAM_SCSI2_MAXLUN 8 #define CAM_CAN_GET_SIMPLE_LUN(x, i) \ ((((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_PERIPH) || \ (((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_FLAT)) #define CAM_GET_SIMPLE_LUN(lp, i, lval) \ if (((lp)->luns[(i)].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ RPL_LUNDATA_ATYP_PERIPH) { \ (lval) = (lp)->luns[(i)].lundata[1]; \ } else { \ (lval) = (lp)->luns[(i)].lundata[0]; \ (lval) &= RPL_LUNDATA_FLAT_LUN_MASK; \ (lval) <<= 8; \ (lval) |= (lp)->luns[(i)].lundata[1]; \ } #define CAM_GET_LUN(lp, i, lval) \ (lval) = scsi_8btou64((lp)->luns[(i)].lundata); \ (lval) = CAM_EXTLUN_BYTE_SWIZZLE(lval); /* * If we're not quirked to search <= the first 8 luns * and we are either quirked to search above lun 8, * or we're > SCSI-2 and we've enabled hilun searching, * or we're > SCSI-2 and the last lun was a success, * we can look for luns above lun 8. */ #define CAN_SRCH_HI_SPARSE(dv) \ (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0) \ && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS) \ || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi))) #define CAN_SRCH_HI_DENSE(dv) \ (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0) \ && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS) \ || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2))) static periph_init_t probe_periph_init; static struct periph_driver probe_driver = { probe_periph_init, "probe", TAILQ_HEAD_INITIALIZER(probe_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(probe, probe_driver); typedef enum { PROBE_TUR, PROBE_INQUIRY, /* this counts as DV0 for Basic Domain Validation */ PROBE_FULL_INQUIRY, PROBE_REPORT_LUNS, PROBE_MODE_SENSE, PROBE_SUPPORTED_VPD_LIST, PROBE_DEVICE_ID, PROBE_EXTENDED_INQUIRY, PROBE_SERIAL_NUM, PROBE_TUR_FOR_NEGOTIATION, PROBE_INQUIRY_BASIC_DV1, PROBE_INQUIRY_BASIC_DV2, PROBE_DV_EXIT, PROBE_DONE, PROBE_INVALID } probe_action; static char *probe_action_text[] = { "PROBE_TUR", "PROBE_INQUIRY", "PROBE_FULL_INQUIRY", "PROBE_REPORT_LUNS", "PROBE_MODE_SENSE", "PROBE_SUPPORTED_VPD_LIST", "PROBE_DEVICE_ID", "PROBE_EXTENDED_INQUIRY", "PROBE_SERIAL_NUM", "PROBE_TUR_FOR_NEGOTIATION", "PROBE_INQUIRY_BASIC_DV1", "PROBE_INQUIRY_BASIC_DV2", "PROBE_DV_EXIT", "PROBE_DONE", "PROBE_INVALID" }; #define PROBE_SET_ACTION(softc, newaction) \ do { \ char **text; \ text = probe_action_text; \ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ ("Probe %s to %s\n", text[(softc)->action], \ text[(newaction)])); \ (softc)->action = (newaction); \ } while(0) typedef enum { PROBE_INQUIRY_CKSUM = 0x01, PROBE_NO_ANNOUNCE = 0x04, PROBE_EXTLUN = 0x08 } probe_flags; typedef struct { TAILQ_HEAD(, ccb_hdr) request_ccbs; probe_action action; probe_flags flags; MD5_CTX context; uint8_t digest[16]; struct cam_periph *periph; } probe_softc; static const char quantum[] = "QUANTUM"; static const char sony[] = "SONY"; static const char west_digital[] = "WDIGTL"; static const char samsung[] = "SAMSUNG"; static const char seagate[] = "SEAGATE"; static const char microp[] = "MICROP"; static struct scsi_quirk_entry scsi_quirk_table[] = { { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Unfortunately, the Quantum Atlas III has the same * problem as the Atlas II drives above. * Reported by: "Johan Granlund" * * For future reference, the drive with the problem was: * QUANTUM QM39100TD-SW N1B0 * * It's possible that Quantum will fix the problem in later * firmware revisions. If that happens, the quirk entry * will need to be made specific to the firmware revisions * with the problem. * */ /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* * 18 Gig Atlas III, same problem as the 9G version. * Reported by: Andre Albsmeier * * * For future reference, the drive with the problem was: * QUANTUM QM318000TD-S N491 */ /* Reports QUEUE FULL for temporary resource shortages */ { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, /*quirks*/0, /*mintags*/24, /*maxtags*/32 }, { /* * Broken tagged queuing drive * Reported by: Bret Ford * and: Martin Renters */ { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, /* * The Seagate Medalist Pro drives have very poor write * performance with anything more than 2 tags. * * Reported by: Paul van der Zwan * Drive: * * Reported by: Jeremy Lea * Drive: * * No one has actually reported that the 9G version * (ST39140*) of the Medalist Pro has the same problem, but * we're assuming that it does because the 4G and 6.5G * versions of the drive are broken. */ { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/2 }, { /* * Experiences command timeouts under load with a * tag count higher than 55. */ { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST3146855LW", "*"}, /*quirks*/0, /*mintags*/2, /*maxtags*/55 }, { /* * Slow when tagged queueing is enabled. Write performance * steadily drops off with more and more concurrent * transactions. Best sequential write performance with * tagged queueing turned off and write caching turned on. * * PR: kern/10398 * Submitted by: Hideaki Okada * Drive: DCAS-34330 w/ "S65A" firmware. * * The drive with the problem had the "S65A" firmware * revision, and has also been reported (by Stephen J. * Roznowski ) for a drive with the "S61A" * firmware revision. * * Although no one has reported problems with the 2 gig * version of the DCAS drive, the assumption is that it * has the same problems as the 4 gig version. Therefore * this quirk entries disables tagged queueing for all * DCAS drives. */ { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* Broken tagged queuing drive */ { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* This does not support other than LUN 0 */ { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 }, { /* * Broken tagged queuing drive. * Submitted by: * NAKAJI Hiroyuki * in PR kern/9535 */ { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Slow when tagged queueing is enabled. (1.5MB/sec versus * 8MB/sec.) * Submitted by: Andrew Gallatin * Best performance with these drives is achieved with * tagged queueing turned off, and write caching turned on. */ { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Slow when tagged queueing is enabled. (1.5MB/sec versus * 8MB/sec.) * Submitted by: Andrew Gallatin * Best performance with these drives is achieved with * tagged queueing turned off, and write caching turned on. */ { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, /*quirks*/0, /*mintags*/0, /*maxtags*/0 }, { /* * Doesn't handle queue full condition correctly, * so we need to limit maxtags to what the device * can handle instead of determining this automatically. */ { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, /*quirks*/0, /*mintags*/2, /*maxtags*/32 }, { /* Really only one LUN */ { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* I can't believe we need a quirk for DPT volumes. */ { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/255 }, { /* * Many Sony CDROM drives don't like multi-LUN probing. */ { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * This drive doesn't like multiple LUN probing. * Submitted by: Parag Patel */ { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * The 8200 doesn't like multi-lun probing, and probably * don't like serial number requests either. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", "EXB-8200*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * Let's try the same as above, but for a drive that says * it's an IPL-6860 but is actually an EXB 8200. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", "IPL-6860*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * These Hitachi drives don't like multi-lun probing. * The PR submitter has a DK319H, but says that the Linux * kernel has a similar work-around for the DK312 and DK314, * so all DK31* drives are quirked here. * PR: misc/18793 * Submitted by: Paul Haddad */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 }, { /* * The Hitachi CJ series with J8A8 firmware apparently has * problems with tagged commands. * PR: 23536 * Reported by: amagai@nue.org */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * These are the large storage arrays. * Submitted by: William Carrel */ { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" }, CAM_QUIRK_HILUNS, 2, 1024 }, { /* * This old revision of the TDC3600 is also SCSI-1, and * hangs upon serial number probing. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 3600", "U07:" }, CAM_QUIRK_NOVPDS, /*mintags*/0, /*maxtags*/0 }, { /* * Would repond to all LUNs if asked for. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", "CP150", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* * Would repond to all LUNs if asked for. */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", "96X2*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* Submitted by: Matthew Dodd */ { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* Submitted by: Matthew Dodd */ { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* TeraSolutions special settings for TRC-22 RAID */ { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" }, /*quirks*/0, /*mintags*/55, /*maxtags*/255 }, { /* Veritas Storage Appliance */ { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" }, CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024 }, { /* * Would respond to all LUNs. Device type and removable * flag are jumper-selectable. */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix", "Tahiti 1", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { /* EasyRAID E5A aka. areca ARC-6010 */ { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" }, CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255 }, { { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" }, CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 }, { { T_DIRECT, SIP_MEDIA_REMOVABLE, "Garmin", "*", "*" }, CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 }, { { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "STORAGE DEVICE*", "120?" }, CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 }, { { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "MassStorageClass", "1533" }, CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255 }, { /* Default tagged queuing parameters for all devices */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, /*quirks*/0, /*mintags*/2, /*maxtags*/255 }, }; static cam_status proberegister(struct cam_periph *periph, void *arg); static void probeschedule(struct cam_periph *probe_periph); static void probestart(struct cam_periph *periph, union ccb *start_ccb); static void proberequestdefaultnegotiation(struct cam_periph *periph); static int proberequestbackoff(struct cam_periph *periph, struct cam_ed *device); static void probedone(struct cam_periph *periph, union ccb *done_ccb); static void probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new, probe_flags flags); static void probecleanup(struct cam_periph *periph); static void scsi_find_quirk(struct cam_ed *device); static void scsi_scan_bus(struct cam_periph *periph, union ccb *ccb); static void scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *ccb); static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); static struct cam_ed * scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static void scsi_devise_transport(struct cam_path *path); static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update); static void scsi_toggle_tags(struct cam_path *path); static void scsi_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static void scsi_action(union ccb *start_ccb); static void scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb); static void scsi_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb); static void scsi_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb); static void scsi_proto_debug_out(union ccb *ccb); static void _scsi_announce_periph(struct cam_periph *, u_int *, u_int *, struct ccb_trans_settings *); static struct xpt_xport_ops scsi_xport_ops = { .alloc_device = scsi_alloc_device, .action = scsi_action, .async = scsi_dev_async, .announce_sbuf = scsi_announce_periph_sbuf, }; #define SCSI_XPT_XPORT(x, X) \ static struct xpt_xport scsi_xport_ ## x = { \ .xport = XPORT_ ## X, \ .name = #x, \ .ops = &scsi_xport_ops, \ }; \ CAM_XPT_XPORT(scsi_xport_ ## x); SCSI_XPT_XPORT(spi, SPI); SCSI_XPT_XPORT(sas, SAS); SCSI_XPT_XPORT(fc, FC); SCSI_XPT_XPORT(usb, USB); SCSI_XPT_XPORT(iscsi, ISCSI); SCSI_XPT_XPORT(srp, SRP); SCSI_XPT_XPORT(ppb, PPB); +SCSI_XPT_XPORT(ufshci, UFSHCI); #undef SCSI_XPORT_XPORT static struct xpt_proto_ops scsi_proto_ops = { .announce_sbuf = scsi_proto_announce_sbuf, .denounce_sbuf = scsi_proto_denounce_sbuf, .debug_out = scsi_proto_debug_out, }; static struct xpt_proto scsi_proto = { .proto = PROTO_SCSI, .name = "scsi", .ops = &scsi_proto_ops, }; CAM_XPT_PROTO(scsi_proto); static void probe_periph_init(void) { } static cam_status proberegister(struct cam_periph *periph, void *arg) { union ccb *request_ccb; /* CCB representing the probe request */ probe_softc *softc; request_ccb = (union ccb *)arg; if (request_ccb == NULL) { printf("proberegister: no probe CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT); if (softc == NULL) { printf("proberegister: Unable to probe new device. Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } TAILQ_INIT(&softc->request_ccbs); TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->flags = 0; periph->softc = softc; softc->periph = periph; softc->action = PROBE_INVALID; if (cam_periph_acquire(periph) != 0) return (CAM_REQ_CMP_ERR); CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); scsi_devise_transport(periph->path); /* * Ensure we've waited at least a bus settle * delay before attempting to probe the device. * For HBAs that don't do bus resets, this won't make a difference. */ cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, scsi_delay); probeschedule(periph); return(CAM_REQ_CMP); } static void probeschedule(struct cam_periph *periph) { struct ccb_pathinq cpi; union ccb *ccb; probe_softc *softc; softc = (probe_softc *)periph->softc; ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); xpt_path_inq(&cpi, periph->path); /* * If a device has gone away and another device, or the same one, * is back in the same place, it should have a unit attention * condition pending. It will not report the unit attention in * response to an inquiry, which may leave invalid transfer * negotiations in effect. The TUR will reveal the unit attention * condition. Only send the TUR for lun 0, since some devices * will get confused by commands other than inquiry to non-existent * luns. If you think a device has gone away start your scan from * lun 0. This will insure that any bogus transfer settings are * invalidated. * * If we haven't seen the device before and the controller supports * some kind of transfer negotiation, negotiate with the first * sent command if no bus reset was performed at startup. This * ensures that the device is not confused by transfer negotiation * settings left over by loader or BIOS action. */ if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) && (ccb->ccb_h.target_lun == 0)) { PROBE_SET_ACTION(softc, PROBE_TUR); } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { proberequestdefaultnegotiation(periph); PROBE_SET_ACTION(softc, PROBE_INQUIRY); } else { PROBE_SET_ACTION(softc, PROBE_INQUIRY); } if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) softc->flags |= PROBE_NO_ANNOUNCE; else softc->flags &= ~PROBE_NO_ANNOUNCE; if (cpi.hba_misc & PIM_EXTLUNS) softc->flags |= PROBE_EXTLUN; else softc->flags &= ~PROBE_EXTLUN; xpt_schedule(periph, CAM_PRIORITY_XPT); } static void probestart(struct cam_periph *periph, union ccb *start_ccb) { /* Probe the device that our peripheral driver points to */ struct ccb_scsiio *csio; probe_softc *softc; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); softc = (probe_softc *)periph->softc; csio = &start_ccb->csio; again: switch (softc->action) { case PROBE_TUR: case PROBE_TUR_FOR_NEGOTIATION: case PROBE_DV_EXIT: { scsi_test_unit_ready(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, /*timeout*/60000); break; } case PROBE_INQUIRY: case PROBE_FULL_INQUIRY: { u_int inquiry_len; struct scsi_inquiry_data *inq_buf; inq_buf = &periph->path->device->inq_data; /* * If the device is currently configured, we calculate an * MD5 checksum of the inquiry data, and if the serial number * length is greater than 0, add the serial number data * into the checksum as well. Once the inquiry and the * serial number check finish, we attempt to figure out * whether we still have the same device. */ if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { softc->flags &= ~PROBE_INQUIRY_CKSUM; } else if ((softc->flags & PROBE_INQUIRY_CKSUM) == 0) { MD5Init(&softc->context); MD5Update(&softc->context, (unsigned char *)inq_buf, sizeof(struct scsi_inquiry_data)); if (periph->path->device->serial_num_len > 0) { MD5Update(&softc->context, periph->path->device->serial_num, periph->path->device->serial_num_len); } MD5Final(softc->digest, &softc->context); softc->flags |= PROBE_INQUIRY_CKSUM; } if (softc->action == PROBE_INQUIRY) inquiry_len = SHORT_INQUIRY_LENGTH; else inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf); /* * Some parallel SCSI devices fail to send an * ignore wide residue message when dealing with * odd length inquiry requests. Round up to be * safe. */ inquiry_len = roundup2(inquiry_len, 2); scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)inq_buf, inquiry_len, /*evpd*/FALSE, /*page_code*/0, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } case PROBE_REPORT_LUNS: { void *rp; rp = malloc(periph->path->target->rpl_size, M_CAMXPT, M_NOWAIT | M_ZERO); if (rp == NULL) { struct scsi_inquiry_data *inq_buf; inq_buf = &periph->path->device->inq_data; xpt_print(periph->path, "Unable to alloc report luns storage\n"); if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); goto again; } scsi_report_luns(csio, 5, probedone, MSG_SIMPLE_Q_TAG, RPL_REPORT_DEFAULT, rp, periph->path->target->rpl_size, SSD_FULL_SIZE, 60000); break; } case PROBE_MODE_SENSE: { void *mode_buf; int mode_buf_len; mode_buf_len = sizeof(struct scsi_mode_header_6) + sizeof(struct scsi_mode_blk_desc) + sizeof(struct scsi_control_page); mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT); if (mode_buf != NULL) { scsi_mode_sense(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, /*dbd*/FALSE, SMS_PAGE_CTRL_CURRENT, SMS_CONTROL_MODE_PAGE, mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60000); break; } xpt_print(periph->path, "Unable to mode sense control page - malloc failure\n"); PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); } /* FALLTHROUGH */ case PROBE_SUPPORTED_VPD_LIST: { struct scsi_vpd_supported_page_list *vpd_list; struct cam_ed *device; vpd_list = NULL; device = periph->path->device; if ((SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOVPDS) == 0) vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT, M_NOWAIT | M_ZERO); if (vpd_list != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)vpd_list, sizeof(*vpd_list), /*evpd*/TRUE, SVPD_SUPPORTED_PAGE_LIST, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } done: /* * We'll have to do without, let our probedone * routine finish up for us. */ start_ccb->csio.data_ptr = NULL; cam_freeze_devq(periph->path); cam_periph_doacquire(periph); probedone(periph, start_ccb); return; } case PROBE_DEVICE_ID: { struct scsi_vpd_device_id *devid; devid = NULL; if (scsi_vpd_supported_page(periph, SVPD_DEVICE_ID)) devid = malloc(SVPD_DEVICE_ID_MAX_SIZE, M_CAMXPT, M_NOWAIT | M_ZERO); if (devid != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)devid, SVPD_DEVICE_ID_MAX_SIZE, /*evpd*/TRUE, SVPD_DEVICE_ID, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } goto done; } case PROBE_EXTENDED_INQUIRY: { struct scsi_vpd_extended_inquiry_data *ext_inq; ext_inq = NULL; if (scsi_vpd_supported_page(periph, SVPD_EXTENDED_INQUIRY_DATA)) ext_inq = malloc(sizeof(*ext_inq), M_CAMXPT, M_NOWAIT | M_ZERO); if (ext_inq != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)ext_inq, sizeof(*ext_inq), /*evpd*/TRUE, SVPD_EXTENDED_INQUIRY_DATA, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } /* * We'll have to do without, let our probedone * routine finish up for us. */ goto done; } case PROBE_SERIAL_NUM: { struct scsi_vpd_unit_serial_number *serial_buf; struct cam_ed* device; serial_buf = NULL; device = periph->path->device; if (device->serial_num != NULL) { free(device->serial_num, M_CAMXPT); device->serial_num = NULL; device->serial_num_len = 0; } if (scsi_vpd_supported_page(periph, SVPD_UNIT_SERIAL_NUMBER)) serial_buf = (struct scsi_vpd_unit_serial_number *) malloc(sizeof(*serial_buf), M_CAMXPT, M_NOWAIT|M_ZERO); if (serial_buf != NULL) { scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)serial_buf, sizeof(*serial_buf), /*evpd*/TRUE, SVPD_UNIT_SERIAL_NUMBER, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } goto done; } case PROBE_INQUIRY_BASIC_DV1: case PROBE_INQUIRY_BASIC_DV2: { u_int inquiry_len; struct scsi_inquiry_data *inq_buf; inq_buf = &periph->path->device->inq_data; inquiry_len = roundup2(SID_ADDITIONAL_LENGTH(inq_buf), 2); inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT); if (inq_buf == NULL) { xpt_print(periph->path, "malloc failure- skipping Basic Domain Validation\n"); PROBE_SET_ACTION(softc, PROBE_DV_EXIT); scsi_test_unit_ready(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, /*timeout*/60000); break; } scsi_inquiry(csio, /*retries*/4, probedone, MSG_SIMPLE_Q_TAG, (uint8_t *)inq_buf, inquiry_len, /*evpd*/FALSE, /*page_code*/0, SSD_MIN_SIZE, /*timeout*/60 * 1000); break; } default: panic("probestart: invalid action state 0x%x\n", softc->action); } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; cam_periph_doacquire(periph); xpt_action(start_ccb); } static void proberequestdefaultnegotiation(struct cam_periph *periph) { struct ccb_trans_settings cts; memset(&cts, 0, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_USER_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { return; } cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); } /* * Backoff Negotiation Code- only pertinent for SPI devices. */ static int proberequestbackoff(struct cam_periph *periph, struct cam_ed *device) { struct ccb_trans_settings cts; struct ccb_trans_settings_spi *spi; memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { if (bootverbose) { xpt_print(periph->path, "failed to get current device settings\n"); } return (0); } if (cts.transport != XPORT_SPI) { if (bootverbose) { xpt_print(periph->path, "not SPI transport\n"); } return (0); } spi = &cts.xport_specific.spi; /* * We cannot renegotiate sync rate if we don't have one. */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { if (bootverbose) { xpt_print(periph->path, "no sync rate known\n"); } return (0); } /* * We'll assert that we don't have to touch PPR options- the * SIM will see what we do with period and offset and adjust * the PPR options as appropriate. */ /* * A sync rate with unknown or zero offset is nonsensical. * A sync period of zero means Async. */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0 || spi->sync_offset == 0 || spi->sync_period == 0) { if (bootverbose) { xpt_print(periph->path, "no sync rate available\n"); } return (0); } if (device->flags & CAM_DEV_DV_HIT_BOTTOM) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("hit async: giving up on DV\n")); return (0); } /* * Jump sync_period up by one, but stop at 5MHz and fall back to Async. * We don't try to remember 'last' settings to see if the SIM actually * gets into the speed we want to set. We check on the SIM telling * us that a requested speed is bad, but otherwise don't try and * check the speed due to the asynchronous and handshake nature * of speed setting. */ spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; for (;;) { spi->sync_period++; if (spi->sync_period >= 0xf) { spi->sync_period = 0; spi->sync_offset = 0; CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("setting to async for DV\n")); /* * Once we hit async, we don't want to try * any more settings. */ device->flags |= CAM_DEV_DV_HIT_BOTTOM; } else if (bootverbose) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("DV: period 0x%x\n", spi->sync_period)); printf("setting period to 0x%x\n", spi->sync_period); } cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb *)&cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) { break; } CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("DV: failed to set period 0x%x\n", spi->sync_period)); if (spi->sync_period == 0) { return (0); } } return (1); } #define CCB_COMPLETED_OK(ccb) (((ccb).status & CAM_STATUS_MASK) == CAM_REQ_CMP) static void probedone(struct cam_periph *periph, union ccb *done_ccb) { probe_softc *softc; struct cam_path *path; struct scsi_inquiry_data *inq_buf; uint32_t priority; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); softc = (probe_softc *)periph->softc; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; cam_periph_assert(periph, MA_OWNED); switch (softc->action) { case PROBE_TUR: { if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, SF_NO_PRINT) == ERESTART) { outr: /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } PROBE_SET_ACTION(softc, PROBE_INQUIRY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); out: /* Drop freeze taken due to CAM_DEV_QFREEZE and release. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_release_locked(periph); return; } case PROBE_INQUIRY: case PROBE_FULL_INQUIRY: { if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { uint8_t periph_qual; path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; scsi_find_quirk(path->device); inq_buf = &path->device->inq_data; periph_qual = SID_QUAL(inq_buf); if (periph_qual == SID_QUAL_LU_CONNECTED || periph_qual == SID_QUAL_LU_OFFLINE) { /* * We conservatively request only * SHORT_INQUIRY_LEN bytes of inquiry * information during our first try * at sending an INQUIRY. If the device * has more information to give, * perform a second request specifying * the amount of information the device * is willing to give. */ if (softc->action == PROBE_INQUIRY && SID_ADDITIONAL_LENGTH(inq_buf) > SHORT_INQUIRY_LENGTH) { PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } scsi_devise_transport(path); if (path->device->lun_id == 0 && SID_ANSI_REV(inq_buf) > SCSI_REV_SPC2 && (SCSI_QUIRK(path->device)->quirks & CAM_QUIRK_NORPTLUNS) == 0) { PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS); /* * Start with room for *one* lun. */ periph->path->target->rpl_size = 16; } else if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } else if (path->device->lun_id == 0 && SID_ANSI_REV(inq_buf) >= SCSI_REV_SPC2 && (SCSI_QUIRK(path->device)->quirks & CAM_QUIRK_NORPTLUNS) == 0) { PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS); periph->path->target->rpl_size = 16; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } } else if (cam_periph_error(done_ccb, 0, done_ccb->ccb_h.target_lun > 0 ? SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA) == ERESTART) { goto outr; } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } path->device->flags &= ~CAM_DEV_INQUIRY_DATA_VALID; } /* * If we get to this point, we got an error status back * from the inquiry and the error status doesn't require * automatically retrying the command. Therefore, the * inquiry failed. If we had inquiry information before * for this device, but this latest inquiry command failed, * the device has probably gone away. If this device isn't * already marked unconfigured, notify the peripheral * drivers that this device is no more. */ if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) /* Send the async notification. */ xpt_async(AC_LOST_DEVICE, path, NULL); PROBE_SET_ACTION(softc, PROBE_INVALID); xpt_release_ccb(done_ccb); break; } case PROBE_REPORT_LUNS: { struct ccb_scsiio *csio; struct scsi_report_luns_data *lp; u_int nlun, maxlun; csio = &done_ccb->csio; lp = (struct scsi_report_luns_data *)csio->data_ptr; nlun = scsi_4btoul(lp->length) / 8; maxlun = (csio->dxfer_len / 8) - 1; if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, done_ccb->ccb_h.target_lun > 0 ? SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA) == ERESTART) { goto outr; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { xpt_release_devq(done_ccb->ccb_h.path, 1, TRUE); } free(lp, M_CAMXPT); lp = NULL; } else if (nlun > maxlun) { /* * Reallocate and retry to cover all luns */ CAM_DEBUG(path, CAM_DEBUG_PROBE, ("Probe: reallocating REPORT_LUNS for %u luns\n", nlun)); free(lp, M_CAMXPT); path->target->rpl_size = (nlun << 3) + 8; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } else if (nlun == 0) { /* * If there don't appear to be any luns, bail. */ free(lp, M_CAMXPT); lp = NULL; } else { lun_id_t lun; int idx; CAM_DEBUG(path, CAM_DEBUG_PROBE, ("Probe: %u lun(s) reported\n", nlun)); CAM_GET_LUN(lp, 0, lun); /* * If the first lun is not lun 0, then either there * is no lun 0 in the list, or the list is unsorted. */ if (lun != 0) { for (idx = 0; idx < nlun; idx++) { CAM_GET_LUN(lp, idx, lun); if (lun == 0) { break; } } if (idx != nlun) { uint8_t tlun[8]; memcpy(tlun, lp->luns[0].lundata, 8); memcpy(lp->luns[0].lundata, lp->luns[idx].lundata, 8); memcpy(lp->luns[idx].lundata, tlun, 8); CAM_DEBUG(path, CAM_DEBUG_PROBE, ("lun 0 in position %u\n", idx)); } } /* * If we have an old lun list, We can either * retest luns that appear to have been dropped, * or just nuke them. We'll opt for the latter. * This function will also install the new list * in the target structure. */ probe_purge_old(path, lp, softc->flags); lp = NULL; } /* The processing above should either exit via a `goto * out` or leave the `lp` variable `NULL` and (if * applicable) `free()` the storage to which it had * pointed. Assert here that is the case. */ KASSERT(lp == NULL, ("%s: lp is not NULL", __func__)); inq_buf = &path->device->inq_data; if (path->device->flags & CAM_DEV_INQUIRY_DATA_VALID && (SID_QUAL(inq_buf) == SID_QUAL_LU_CONNECTED || SID_QUAL(inq_buf) == SID_QUAL_LU_OFFLINE)) { if (INQ_DATA_TQ_ENABLED(inq_buf)) PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); else PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } PROBE_SET_ACTION(softc, PROBE_INVALID); xpt_release_ccb(done_ccb); break; } case PROBE_MODE_SENSE: { struct ccb_scsiio *csio; struct scsi_mode_header_6 *mode_hdr; csio = &done_ccb->csio; mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { struct scsi_control_page *page; uint8_t *offset; offset = ((uint8_t *)&mode_hdr[1]) + mode_hdr->blk_desc_len; page = (struct scsi_control_page *)offset; path->device->queue_flags = page->queue_flags; } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } xpt_release_ccb(done_ccb); free(mode_hdr, M_CAMXPT); PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST); xpt_schedule(periph, priority); goto out; } case PROBE_SUPPORTED_VPD_LIST: { struct ccb_scsiio *csio; struct scsi_vpd_supported_page_list *page_list; csio = &done_ccb->csio; page_list = (struct scsi_vpd_supported_page_list *)csio->data_ptr; if (path->device->supported_vpds != NULL) { free(path->device->supported_vpds, M_CAMXPT); path->device->supported_vpds = NULL; path->device->supported_vpds_len = 0; } if (page_list == NULL) { /* * Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { /* Got vpd list */ path->device->supported_vpds_len = page_list->length + SVPD_SUPPORTED_PAGES_HDR_LEN; path->device->supported_vpds = (uint8_t *)page_list; xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_DEVICE_ID); xpt_schedule(periph, priority); goto out; } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } if (page_list) free(page_list, M_CAMXPT); /* No VPDs available, skip to device check. */ csio->data_ptr = NULL; goto probe_device_check; } case PROBE_DEVICE_ID: { struct scsi_vpd_device_id *devid; struct ccb_scsiio *csio; uint32_t length = 0; csio = &done_ccb->csio; devid = (struct scsi_vpd_device_id *)csio->data_ptr; /* Clean up from previous instance of this device */ if (path->device->device_id != NULL) { path->device->device_id_len = 0; free(path->device->device_id, M_CAMXPT); path->device->device_id = NULL; } if (devid == NULL) { /* Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { length = scsi_2btoul(devid->length); if (length != 0) { /* * NB: device_id_len is actual response * size, not buffer size. */ path->device->device_id_len = length + SVPD_DEVICE_ID_HDR_LEN; path->device->device_id = (uint8_t *)devid; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* Free the device id space if we don't use it */ if (devid && length == 0) free(devid, M_CAMXPT); xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_EXTENDED_INQUIRY); xpt_schedule(periph, priority); goto out; } case PROBE_EXTENDED_INQUIRY: { struct scsi_vpd_extended_inquiry_data *ext_inq; struct ccb_scsiio *csio; int32_t length = 0; csio = &done_ccb->csio; ext_inq = (struct scsi_vpd_extended_inquiry_data *) csio->data_ptr; if (path->device->ext_inq != NULL) { path->device->ext_inq_len = 0; free(path->device->ext_inq, M_CAMXPT); path->device->ext_inq = NULL; } if (ext_inq == NULL) { /* Don't process the command as it was never sent */ } else if (CCB_COMPLETED_OK(csio->ccb_h)) { length = scsi_2btoul(ext_inq->page_length) + __offsetof(struct scsi_vpd_extended_inquiry_data, flags1); length = min(length, sizeof(*ext_inq)); length -= csio->resid; if (length > 0) { path->device->ext_inq_len = length; path->device->ext_inq = (uint8_t *)ext_inq; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* Free the device id space if we don't use it */ if (ext_inq && length <= 0) free(ext_inq, M_CAMXPT); xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM); xpt_schedule(periph, priority); goto out; } probe_device_check: case PROBE_SERIAL_NUM: { struct ccb_scsiio *csio; struct scsi_vpd_unit_serial_number *serial_buf; uint32_t priority; int changed; int have_serialnum; changed = 1; have_serialnum = 0; csio = &done_ccb->csio; priority = done_ccb->ccb_h.pinfo.priority; serial_buf = (struct scsi_vpd_unit_serial_number *)csio->data_ptr; if (serial_buf == NULL) { /* * Don't process the command as it was never sent */ } else if (cam_ccb_status(done_ccb) == CAM_REQ_CMP && (serial_buf->length > 0)) { have_serialnum = 1; path->device->serial_num = (uint8_t *)malloc((serial_buf->length + 1), M_CAMXPT, M_NOWAIT); if (path->device->serial_num != NULL) { int start, slen; start = strspn(serial_buf->serial_num, " "); slen = serial_buf->length - start; if (slen <= 0) { /* * SPC5r05 says that an all-space serial * number means no product serial number * is available */ slen = 0; } /* * In apparent violation of the spec, some * devices pad their serial numbers with * trailing spaces. Remove them. */ while (slen > 0 && serial_buf->serial_num[start + slen - 1] == ' ') slen--; memcpy(path->device->serial_num, &serial_buf->serial_num[start], slen); path->device->serial_num_len = slen; path->device->serial_num[slen] = '\0'; } } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT) == ERESTART) { goto outr; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* * Let's see if we have seen this device before. */ if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { MD5_CTX context; uint8_t digest[16]; MD5Init(&context); MD5Update(&context, (unsigned char *)&path->device->inq_data, sizeof(struct scsi_inquiry_data)); if (have_serialnum) MD5Update(&context, path->device->serial_num, path->device->serial_num_len); MD5Final(digest, &context); if (bcmp(softc->digest, digest, 16) == 0) changed = 0; /* * XXX Do we need to do a TUR in order to ensure * that the device really hasn't changed??? */ if ((changed != 0) && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) xpt_async(AC_LOST_DEVICE, path, NULL); } if (serial_buf != NULL) free(serial_buf, M_CAMXPT); if (changed != 0) { /* * Now that we have all the necessary * information to safely perform transfer * negotiations... Controllers don't perform * any negotiation or tagged queuing until * after the first XPT_SET_TRAN_SETTINGS ccb is * received. So, on a new device, just retrieve * the user settings, and set them as the current * settings to set the device up. */ proberequestdefaultnegotiation(periph); xpt_release_ccb(done_ccb); /* * Perform a TUR to allow the controller to * perform any necessary transfer negotiation. */ PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION); xpt_schedule(periph, priority); goto out; } xpt_release_ccb(done_ccb); break; } case PROBE_TUR_FOR_NEGOTIATION: case PROBE_DV_EXIT: if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, SF_NO_PRINT | SF_NO_RECOVERY | SF_NO_RETRY) == ERESTART) goto outr; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } /* * Do Domain Validation for lun 0 on devices that claim * to support Synchronous Transfer modes. */ if (softc->action == PROBE_TUR_FOR_NEGOTIATION && done_ccb->ccb_h.target_lun == 0 && (path->device->inq_data.flags & SID_Sync) != 0 && (path->device->flags & CAM_DEV_IN_DV) == 0) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Begin Domain Validation\n")); path->device->flags |= CAM_DEV_IN_DV; xpt_release_ccb(done_ccb); PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV1); xpt_schedule(periph, priority); goto out; } if (softc->action == PROBE_DV_EXIT) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Leave Domain Validation\n")); } if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } path->device->flags &= ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM); if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { /* Inform the XPT that a new device has been found */ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); xpt_release_ccb(done_ccb); break; case PROBE_INQUIRY_BASIC_DV1: case PROBE_INQUIRY_BASIC_DV2: { struct scsi_inquiry_data *nbuf; struct ccb_scsiio *csio; if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, SF_NO_PRINT | SF_NO_RECOVERY | SF_NO_RETRY) == ERESTART) goto outr; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, /*run_queue*/TRUE); } csio = &done_ccb->csio; nbuf = (struct scsi_inquiry_data *)csio->data_ptr; if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) { xpt_print(path, "inquiry data fails comparison at DV%d step\n", softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2); if (proberequestbackoff(periph, path->device)) { path->device->flags &= ~CAM_DEV_IN_DV; PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION); } else { /* give up */ PROBE_SET_ACTION(softc, PROBE_DV_EXIT); } free(nbuf, M_CAMXPT); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } free(nbuf, M_CAMXPT); if (softc->action == PROBE_INQUIRY_BASIC_DV1) { PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV2); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; } if (softc->action == PROBE_INQUIRY_BASIC_DV2) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Leave Domain Validation Successfully\n")); } if (path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); } path->device->flags &= ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM); if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { /* Inform the XPT that a new device has been found */ done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, done_ccb); } PROBE_SET_ACTION(softc, PROBE_DONE); xpt_release_ccb(done_ccb); break; } default: panic("probedone: invalid action state 0x%x\n", softc->action); } done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); done_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(done_ccb); if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_release_locked(periph); cam_periph_invalidate(periph); cam_periph_release_locked(periph); } else { probeschedule(periph); goto out; } } static void probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new, probe_flags flags) { struct cam_path *tp; struct scsi_report_luns_data *old; u_int idx1, idx2, nlun_old, nlun_new; lun_id_t this_lun; uint8_t *ol, *nl; if (path->target == NULL) { return; } mtx_lock(&path->target->luns_mtx); old = path->target->luns; path->target->luns = new; mtx_unlock(&path->target->luns_mtx); if (old == NULL) return; nlun_old = scsi_4btoul(old->length) / 8; nlun_new = scsi_4btoul(new->length) / 8; /* * We are not going to assume sorted lists. Deal. */ for (idx1 = 0; idx1 < nlun_old; idx1++) { ol = old->luns[idx1].lundata; for (idx2 = 0; idx2 < nlun_new; idx2++) { nl = new->luns[idx2].lundata; if (memcmp(nl, ol, 8) == 0) { break; } } if (idx2 < nlun_new) { continue; } /* * An 'old' item not in the 'new' list. * Nuke it. Except that if it is lun 0, * that would be what the probe state * machine is currently working on, * so we won't do that. */ CAM_GET_LUN(old, idx1, this_lun); if (this_lun == 0) { continue; } /* * We also cannot nuke it if it is * not in a lun format we understand * and replace the LUN with a "simple" LUN * if that is all the HBA supports. */ if (!(flags & PROBE_EXTLUN)) { if (!CAM_CAN_GET_SIMPLE_LUN(old, idx1)) continue; CAM_GET_SIMPLE_LUN(old, idx1, this_lun); } if (xpt_create_path(&tp, NULL, xpt_path_path_id(path), xpt_path_target_id(path), this_lun) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, tp, NULL); xpt_free_path(tp); } } free(old, M_CAMXPT); } static void probecleanup(struct cam_periph *periph) { free(periph->softc, M_CAMXPT); } static void scsi_find_quirk(struct cam_ed *device) { struct scsi_quirk_entry *quirk; caddr_t match; match = cam_quirkmatch((caddr_t)&device->inq_data, (caddr_t)scsi_quirk_table, nitems(scsi_quirk_table), sizeof(*scsi_quirk_table), scsi_inquiry_match); if (match == NULL) panic("xpt_find_quirk: device didn't match wildcard entry!!"); quirk = (struct scsi_quirk_entry *)match; device->quirk = quirk; device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; } typedef struct { union ccb *request_ccb; struct ccb_pathinq *cpi; int counter; int lunindex[0]; } scsi_scan_bus_info; /* * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. * As the scan progresses, scsi_scan_bus is used as the * callback on completion function. */ static void scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) { struct mtx *mtx; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("scsi_scan_bus\n")); switch (request_ccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_SCAN_TGT: { scsi_scan_bus_info *scan_info; union ccb *work_ccb, *reset_ccb; struct cam_path *path; u_int i; u_int low_target, max_target; u_int initiator_id; /* Find out the characteristics of the bus */ work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); return; } xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_PATH_INQ; xpt_action(work_ccb); if (work_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = work_ccb->ccb_h.status; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { /* * Can't scan the bus on an adapter that * cannot perform the initiator role. */ request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } /* We may need to reset bus first, if we haven't done it yet. */ if ((work_ccb->cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) && !(work_ccb->cpi.hba_misc & PIM_NOBUSRESET) && !timevalisset(&request_ccb->ccb_h.path->bus->last_reset) && (reset_ccb = xpt_alloc_ccb_nowait()) != NULL) { xpt_setup_ccb(&reset_ccb->ccb_h, request_ccb->ccb_h.path, CAM_PRIORITY_NONE); reset_ccb->ccb_h.func_code = XPT_RESET_BUS; xpt_action(reset_ccb); if (reset_ccb->ccb_h.status != CAM_REQ_CMP) { request_ccb->ccb_h.status = reset_ccb->ccb_h.status; xpt_free_ccb(reset_ccb); xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } xpt_free_ccb(reset_ccb); } /* Save some state for use while we probe for devices */ scan_info = (scsi_scan_bus_info *) malloc(sizeof(scsi_scan_bus_info) + (work_ccb->cpi.max_target * sizeof (u_int)), M_CAMXPT, M_ZERO|M_NOWAIT); if (scan_info == NULL) { request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_free_ccb(work_ccb); xpt_done(request_ccb); return; } CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("SCAN start for %p\n", scan_info)); scan_info->request_ccb = request_ccb; scan_info->cpi = &work_ccb->cpi; /* Cache on our stack so we can work asynchronously */ max_target = scan_info->cpi->max_target; low_target = 0; initiator_id = scan_info->cpi->initiator_id; /* * We can scan all targets in parallel, or do it sequentially. */ if (request_ccb->ccb_h.func_code == XPT_SCAN_TGT) { max_target = low_target = request_ccb->ccb_h.target_id; scan_info->counter = 0; } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) { max_target = 0; scan_info->counter = 0; } else { scan_info->counter = scan_info->cpi->max_target + 1; if (scan_info->cpi->initiator_id < scan_info->counter) { scan_info->counter--; } } mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); mtx_unlock(mtx); for (i = low_target; i <= max_target; i++) { cam_status status; if (i == initiator_id) continue; status = xpt_create_path(&path, NULL, request_ccb->ccb_h.path_id, i, 0); if (status != CAM_REQ_CMP) { printf( "scsi_scan_bus: xpt_create_path failed with status %#x, bus scan halted\n", status); free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = status; xpt_free_ccb(work_ccb); xpt_done(request_ccb); break; } work_ccb = xpt_alloc_ccb_nowait(); if (work_ccb == NULL) { xpt_free_ccb((union ccb *)scan_info->cpi); free(scan_info, M_CAMXPT); xpt_free_path(path); request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(request_ccb); break; } xpt_setup_ccb(&work_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); work_ccb->ccb_h.func_code = XPT_SCAN_LUN; work_ccb->ccb_h.cbfcnp = scsi_scan_bus; work_ccb->ccb_h.flags |= CAM_UNLOCKED; work_ccb->ccb_h.ppriv_ptr0 = scan_info; work_ccb->crcn.flags = request_ccb->crcn.flags; xpt_action(work_ccb); } mtx_lock(mtx); break; } case XPT_SCAN_LUN: { cam_status status; struct cam_path *path, *oldpath; scsi_scan_bus_info *scan_info; struct cam_et *target; struct cam_ed *device, *nextdev; int next_target; path_id_t path_id; target_id_t target_id; lun_id_t lun_id; oldpath = request_ccb->ccb_h.path; status = cam_ccb_status(request_ccb); scan_info = (scsi_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; path_id = request_ccb->ccb_h.path_id; target_id = request_ccb->ccb_h.target_id; lun_id = request_ccb->ccb_h.target_lun; target = request_ccb->ccb_h.path->target; next_target = 1; mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path); mtx_lock(mtx); mtx_lock(&target->luns_mtx); if (target->luns) { lun_id_t first; u_int nluns = scsi_4btoul(target->luns->length) / 8; /* * Make sure we skip over lun 0 if it's the first member * of the list as we've actually just finished probing * it. */ CAM_GET_LUN(target->luns, 0, first); if (first == 0 && scan_info->lunindex[target_id] == 0) { scan_info->lunindex[target_id]++; } /* * Skip any LUNs that the HBA can't deal with. */ while (scan_info->lunindex[target_id] < nluns) { if (scan_info->cpi->hba_misc & PIM_EXTLUNS) { CAM_GET_LUN(target->luns, scan_info->lunindex[target_id], lun_id); break; } if (CAM_CAN_GET_SIMPLE_LUN(target->luns, scan_info->lunindex[target_id])) { CAM_GET_SIMPLE_LUN(target->luns, scan_info->lunindex[target_id], lun_id); break; } scan_info->lunindex[target_id]++; } if (scan_info->lunindex[target_id] < nluns) { mtx_unlock(&target->luns_mtx); next_target = 0; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_PROBE, ("next lun to try at index %u is %jx\n", scan_info->lunindex[target_id], (uintmax_t)lun_id)); scan_info->lunindex[target_id]++; } else { mtx_unlock(&target->luns_mtx); /* We're done with scanning all luns. */ } } else { mtx_unlock(&target->luns_mtx); device = request_ccb->ccb_h.path->device; /* Continue sequential LUN scan if: */ /* -- we have more LUNs that need recheck */ mtx_lock(&target->bus->eb_mtx); nextdev = device; while ((nextdev = TAILQ_NEXT(nextdev, links)) != NULL) if ((nextdev->flags & CAM_DEV_UNCONFIGURED) == 0) break; mtx_unlock(&target->bus->eb_mtx); if (nextdev != NULL) { next_target = 0; /* -- stop if CAM_QUIRK_NOLUNS is set. */ } else if (SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOLUNS) { next_target = 1; /* -- this LUN is connected and its SCSI version * allows more LUNs. */ } else if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) { if (lun_id < (CAM_SCSI2_MAXLUN-1) || CAN_SRCH_HI_DENSE(device)) next_target = 0; /* -- this LUN is disconnected, its SCSI version * allows more LUNs and we guess they may be. */ } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) { if (lun_id < (CAM_SCSI2_MAXLUN-1) || CAN_SRCH_HI_SPARSE(device)) next_target = 0; } if (next_target == 0) { lun_id++; if (lun_id > scan_info->cpi->max_lun) next_target = 1; } } /* * Check to see if we scan any further luns. */ if (next_target) { int done; /* * Free the current request path- we're done with it. */ xpt_free_path(oldpath); hop_again: done = 0; if (scan_info->request_ccb->ccb_h.func_code == XPT_SCAN_TGT) { done = 1; } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) { scan_info->counter++; if (scan_info->counter == scan_info->cpi->initiator_id) { scan_info->counter++; } if (scan_info->counter >= scan_info->cpi->max_target+1) { done = 1; } } else { scan_info->counter--; if (scan_info->counter == 0) { done = 1; } } if (done) { mtx_unlock(mtx); xpt_free_ccb(request_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("SCAN done for %p\n", scan_info)); free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(request_ccb); break; } if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) { mtx_unlock(mtx); xpt_free_ccb(request_ccb); break; } status = xpt_create_path(&path, NULL, scan_info->request_ccb->ccb_h.path_id, scan_info->counter, 0); if (status != CAM_REQ_CMP) { mtx_unlock(mtx); printf( "scsi_scan_bus: xpt_create_path failed with status %#x, bus scan halted\n", status); xpt_free_ccb(request_ccb); xpt_free_ccb((union ccb *)scan_info->cpi); request_ccb = scan_info->request_ccb; free(scan_info, M_CAMXPT); request_ccb->ccb_h.status = status; xpt_done(request_ccb); break; } xpt_setup_ccb(&request_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.cbfcnp = scsi_scan_bus; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.ppriv_ptr0 = scan_info; request_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; } else { status = xpt_create_path(&path, NULL, path_id, target_id, lun_id); /* * Free the old request path- we're done with it. We * do this *after* creating the new path so that * we don't remove a target that has our lun list * in the case that lun 0 is not present. */ xpt_free_path(oldpath); if (status != CAM_REQ_CMP) { printf( "scsi_scan_bus: xpt_create_path failed with status %#x, halting LUN scan\n", status); goto hop_again; } xpt_setup_ccb(&request_ccb->ccb_h, path, request_ccb->ccb_h.pinfo.priority); request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.cbfcnp = scsi_scan_bus; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->ccb_h.ppriv_ptr0 = scan_info; request_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; } mtx_unlock(mtx); xpt_action(request_ccb); break; } default: break; } } static void scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *request_ccb) { struct ccb_pathinq cpi; cam_status status; struct cam_path *new_path; struct cam_periph *old_periph; int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("scsi_scan_lun\n")); memset(&cpi, 0, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status != CAM_REQ_CMP) { if (request_ccb != NULL) { request_ccb->ccb_h.status = cpi.ccb_h.status; xpt_done(request_ccb); } return; } if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { /* * Can't scan the bus on an adapter that * cannot perform the initiator role. */ if (request_ccb != NULL) { request_ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(request_ccb); } return; } if (request_ccb == NULL) { request_ccb = xpt_alloc_ccb_nowait(); if (request_ccb == NULL) { xpt_print(path, "scsi_scan_lun: can't allocate CCB, can't continue\n"); return; } status = xpt_create_path(&new_path, NULL, path->bus->path_id, path->target->target_id, path->device->lun_id); if (status != CAM_REQ_CMP) { xpt_print(path, "scsi_scan_lun: can't create path, can't continue\n"); xpt_free_ccb(request_ccb); return; } xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT); request_ccb->ccb_h.cbfcnp = xptscandone; request_ccb->ccb_h.func_code = XPT_SCAN_LUN; request_ccb->ccb_h.flags |= CAM_UNLOCKED; request_ccb->crcn.flags = flags; } lock = (xpt_path_owned(path) == 0); if (lock) xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "probe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { probe_softc *softc; softc = (probe_softc *)old_periph->softc; TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); } else { request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(request_ccb); } } else { status = cam_periph_alloc(proberegister, NULL, probecleanup, probestart, "probe", CAM_PERIPH_BIO, request_ccb->ccb_h.path, NULL, 0, request_ccb); if (status != CAM_REQ_CMP) { xpt_print(path, "scsi_scan_lun: cam_alloc_periph returned an error, can't continue probe\n"); request_ccb->ccb_h.status = status; xpt_done(request_ccb); } } if (lock) xpt_path_unlock(path); } static void xptscandone(struct cam_periph *periph, union ccb *done_ccb) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } static struct cam_ed * scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct scsi_quirk_entry *quirk; struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); /* * Take the default quirk entry until we have inquiry * data and can determine a better quirk to use. */ quirk = &scsi_quirk_table[nitems(scsi_quirk_table) - 1]; device->quirk = (void *)quirk; device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; bzero(&device->inq_data, sizeof(device->inq_data)); device->inq_flags = 0; device->queue_flags = 0; device->serial_num = NULL; device->serial_num_len = 0; device->device_id = NULL; device->device_id_len = 0; device->supported_vpds = NULL; device->supported_vpds_len = 0; return (device); } static void scsi_devise_transport(struct cam_path *path) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct scsi_inquiry_data *inq_buf; /* Get transport information from the SIM */ memset(&cpi, 0, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); inq_buf = NULL; if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) inq_buf = &path->device->inq_data; path->device->protocol = PROTO_SCSI; path->device->protocol_version = inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version; path->device->transport = cpi.transport; path->device->transport_version = cpi.transport_version; /* * Any device not using SPI3 features should * be considered SPI2 or lower. */ if (inq_buf != NULL) { if (path->device->transport == XPORT_SPI && (inq_buf->spi3data & SID_SPI_MASK) == 0 && path->device->transport_version > 2) path->device->transport_version = 2; } else { struct cam_ed* otherdev; for (otherdev = TAILQ_FIRST(&path->target->ed_entries); otherdev != NULL; otherdev = TAILQ_NEXT(otherdev, links)) { if (otherdev != path->device) break; } if (otherdev != NULL) { /* * Initially assume the same versioning as * prior luns for this target. */ path->device->protocol_version = otherdev->protocol_version; path->device->transport_version = otherdev->transport_version; } else { /* Until we know better, opt for safety */ path->device->protocol_version = 2; if (path->device->transport == XPORT_SPI) path->device->transport_version = 2; else path->device->transport_version = 0; } } /* * XXX * For a device compliant with SPC-2 we should be able * to determine the transport version supported by * scrutinizing the version descriptors in the * inquiry buffer. */ /* Tell the controller what we think */ memset(&cts, 0, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.transport = path->device->transport; cts.transport_version = path->device->transport_version; cts.protocol = path->device->protocol; cts.protocol_version = path->device->protocol_version; cts.proto_specific.valid = 0; cts.xport_specific.valid = 0; xpt_action((union ccb *)&cts); } static void scsi_dev_advinfo(union ccb *start_ccb) { struct cam_ed *device; struct ccb_dev_advinfo *cdai; off_t amt; xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED); start_ccb->ccb_h.status = CAM_REQ_INVALID; device = start_ccb->ccb_h.path->device; cdai = &start_ccb->cdai; switch(cdai->buftype) { case CDAI_TYPE_SCSI_DEVID: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->device_id_len; if (device->device_id_len == 0) break; amt = device->device_id_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->device_id, amt); break; case CDAI_TYPE_SERIAL_NUM: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->serial_num_len; if (device->serial_num_len == 0) break; amt = device->serial_num_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->serial_num, amt); break; case CDAI_TYPE_PHYS_PATH: if (cdai->flags & CDAI_FLAG_STORE) { if (device->physpath != NULL) { free(device->physpath, M_CAMXPT); device->physpath = NULL; device->physpath_len = 0; } /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->physpath == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } device->physpath_len = cdai->bufsiz; memcpy(device->physpath, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->physpath_len; if (device->physpath_len == 0) break; amt = device->physpath_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->physpath, amt); } break; case CDAI_TYPE_RCAPLONG: if (cdai->flags & CDAI_FLAG_STORE) { if (device->rcap_buf != NULL) { free(device->rcap_buf, M_CAMXPT); device->rcap_buf = NULL; } device->rcap_len = cdai->bufsiz; /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->rcap_buf = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->rcap_buf == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } memcpy(device->rcap_buf, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->rcap_len; if (device->rcap_len == 0) break; amt = device->rcap_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->rcap_buf, amt); } break; case CDAI_TYPE_EXT_INQ: /* * We fetch extended inquiry data during probe, if * available. We don't allow changing it. */ if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->ext_inq_len; if (device->ext_inq_len == 0) break; amt = device->ext_inq_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->ext_inq, amt); break; default: return; } start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); } } static void scsi_action(union ccb *start_ccb) { if (start_ccb->ccb_h.func_code != XPT_SCSI_IO) { KASSERT((start_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) == 0, ("%s: ccb %p, func_code %#x should not be allocated from UMA zone\n", __func__, start_ccb, start_ccb->ccb_h.func_code)); } switch (start_ccb->ccb_h.func_code) { case XPT_SET_TRAN_SETTINGS: { scsi_set_transfer_settings(&start_ccb->cts, start_ccb->ccb_h.path, /*async_update*/FALSE); break; } case XPT_SCAN_BUS: case XPT_SCAN_TGT: scsi_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); break; case XPT_SCAN_LUN: scsi_scan_lun(start_ccb->ccb_h.path->periph, start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; case XPT_DEV_ADVINFO: { scsi_dev_advinfo(start_ccb); break; } default: xpt_action_default(start_ccb); break; } } static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path, int async_update) { struct ccb_pathinq cpi; struct ccb_trans_settings cur_cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_scsi *cur_scsi; struct scsi_inquiry_data *inq_data; struct cam_ed *device; if (path == NULL || (device = path->device) == NULL) { cts->ccb_h.status = CAM_PATH_INVALID; xpt_done((union ccb *)cts); return; } if (cts->protocol == PROTO_UNKNOWN || cts->protocol == PROTO_UNSPECIFIED) { cts->protocol = device->protocol; cts->protocol_version = device->protocol_version; } if (cts->protocol_version == PROTO_VERSION_UNKNOWN || cts->protocol_version == PROTO_VERSION_UNSPECIFIED) cts->protocol_version = device->protocol_version; if (cts->protocol != device->protocol) { xpt_print(path, "Uninitialized Protocol %x:%x?\n", cts->protocol, device->protocol); cts->protocol = device->protocol; } if (cts->protocol_version > device->protocol_version) { if (bootverbose) { xpt_print(path, "Down reving Protocol Version from %d to %d?\n", cts->protocol_version, device->protocol_version); } cts->protocol_version = device->protocol_version; } if (cts->transport == XPORT_UNKNOWN || cts->transport == XPORT_UNSPECIFIED) { cts->transport = device->transport; cts->transport_version = device->transport_version; } if (cts->transport_version == XPORT_VERSION_UNKNOWN || cts->transport_version == XPORT_VERSION_UNSPECIFIED) cts->transport_version = device->transport_version; if (cts->transport != device->transport) { xpt_print(path, "Uninitialized Transport %x:%x?\n", cts->transport, device->transport); cts->transport = device->transport; } if (cts->transport_version > device->transport_version) { if (bootverbose) { xpt_print(path, "Down reving Transport Version from %d to %d?\n", cts->transport_version, device->transport_version); } cts->transport_version = device->transport_version; } /* * Nothing more of interest to do unless * this is a device connected via the * SCSI protocol. */ if (cts->protocol != PROTO_SCSI) { if (async_update == FALSE) xpt_action_default((union ccb *)cts); return; } inq_data = &device->inq_data; scsi = &cts->proto_specific.scsi; memset(&cpi, 0, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* SCSI specific sanity checking */ if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 || (device->mintags == 0)) { /* * Can't tag on hardware that doesn't support tags, * doesn't have it enabled, or has broken tag support. */ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } if (async_update == FALSE) { /* * Perform sanity checking against what the * controller and device can do. */ memset(&cur_cts, 0, sizeof(cur_cts)); xpt_setup_ccb(&cur_cts.ccb_h, path, CAM_PRIORITY_NONE); cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cur_cts.type = cts->type; xpt_action((union ccb *)&cur_cts); if (cam_ccb_status((union ccb *)&cur_cts) != CAM_REQ_CMP) { return; } cur_scsi = &cur_cts.proto_specific.scsi; if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) { scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB; } if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0) scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } /* SPI specific sanity checking */ if (cts->transport == XPORT_SPI && async_update == FALSE) { u_int spi3caps; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings_spi *cur_spi; spi = &cts->xport_specific.spi; cur_spi = &cur_cts.xport_specific.spi; /* Fill in any gaps in what the user gave us */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = cur_spi->sync_period; if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = 0; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = cur_spi->sync_offset; if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = 0; if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) spi->ppr_options = cur_spi->ppr_options; if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) spi->ppr_options = 0; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) spi->bus_width = cur_spi->bus_width; if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) spi->bus_width = 0; if ((spi->valid & CTS_SPI_VALID_DISC) == 0) { spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB; } if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0) spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 && (inq_data->flags & SID_Sync) == 0 && cts->type == CTS_TYPE_CURRENT_SETTINGS) || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) { /* Force async */ spi->sync_period = 0; spi->sync_offset = 0; } switch (spi->bus_width) { case MSG_EXT_WDTR_BUS_32_BIT: if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 || (inq_data->flags & SID_WBus32) != 0 || cts->type == CTS_TYPE_USER_SETTINGS) && (cpi.hba_inquiry & PI_WIDE_32) != 0) break; /* Fall Through to 16-bit */ case MSG_EXT_WDTR_BUS_16_BIT: if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 || (inq_data->flags & SID_WBus16) != 0 || cts->type == CTS_TYPE_USER_SETTINGS) && (cpi.hba_inquiry & PI_WIDE_16) != 0) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* Fall Through to 8-bit */ default: /* New bus width?? */ case MSG_EXT_WDTR_BUS_8_BIT: /* All targets can do this */ spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } spi3caps = cpi.xport_specific.spi.ppr_options; if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 && cts->type == CTS_TYPE_CURRENT_SETTINGS) spi3caps &= inq_data->spi3data; if ((spi3caps & SID_SPI_CLOCK_DT) == 0) spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; if ((spi3caps & SID_SPI_IUS) == 0) spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; if ((spi3caps & SID_SPI_QAS) == 0) spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ; /* No SPI Transfer settings are allowed unless we are wide */ if (spi->bus_width == 0) spi->ppr_options = 0; if ((spi->valid & CTS_SPI_VALID_DISC) && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) { /* * Can't tag queue without disconnection. */ scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; scsi->valid |= CTS_SCSI_VALID_TQ; } /* * If we are currently performing tagged transactions to * this device and want to change its negotiation parameters, * go non-tagged for a bit to give the controller a chance to * negotiate unhampered by tag messages. */ if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (device->inq_flags & SID_CmdQue) != 0 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE| CTS_SPI_VALID_SYNC_OFFSET| CTS_SPI_VALID_BUS_WIDTH)) != 0) scsi_toggle_tags(path); } if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) { int device_tagenb; /* * If we are transitioning from tags to no-tags or * vice-versa, we need to carefully freeze and restart * the queue so that we don't overlap tagged and non-tagged * commands. We also temporarily stop tags if there is * a change in transfer negotiation settings to allow * "tag-less" negotiation. */ if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (device->inq_flags & SID_CmdQue) != 0) device_tagenb = TRUE; else device_tagenb = FALSE; if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 && device_tagenb == FALSE) || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0 && device_tagenb == TRUE)) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) { /* * Delay change to use tags until after a * few commands have gone to this device so * the controller has time to perform transfer * negotiations without tagged messages getting * in the way. */ device->tag_delay_count = CAM_TAG_DELAY_COUNT; device->flags |= CAM_DEV_TAG_AFTER_COUNT; } else { xpt_stop_tags(path); } } } if (async_update == FALSE) xpt_action_default((union ccb *)cts); } static void scsi_toggle_tags(struct cam_path *path) { struct cam_ed *dev; /* * Give controllers a chance to renegotiate * before starting tag operations. We * "toggle" tagged queuing off then on * which causes the tag enable command delay * counter to come into effect. */ dev = path->device; if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || ((dev->inq_flags & SID_CmdQue) != 0 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) { struct ccb_trans_settings cts; memset(&cts, 0, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.protocol = PROTO_SCSI; cts.protocol_version = PROTO_VERSION_UNSPECIFIED; cts.transport = XPORT_UNSPECIFIED; cts.transport_version = XPORT_VERSION_UNSPECIFIED; cts.proto_specific.scsi.flags = 0; cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; scsi_set_transfer_settings(&cts, path, /*async_update*/TRUE); cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; scsi_set_transfer_settings(&cts, path, /*async_update*/TRUE); } } /* * Handle any per-device event notifications that require action by the XPT. */ static void scsi_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { cam_status status; struct cam_path newpath; /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; /* * We need our own path with wildcards expanded to * handle certain types of events. */ if ((async_code == AC_SENT_BDR) || (async_code == AC_BUS_RESET) || (async_code == AC_INQ_CHANGED)) status = xpt_compile_path(&newpath, NULL, bus->path_id, target->target_id, device->lun_id); else status = CAM_REQ_CMP_ERR; if (status == CAM_REQ_CMP) { /* * Allow transfer negotiation to occur in a * tag free environment and after settle delay. */ if (async_code == AC_SENT_BDR || async_code == AC_BUS_RESET) { cam_freeze_devq(&newpath); cam_release_devq(&newpath, RELSIM_RELEASE_AFTER_TIMEOUT, /*reduction*/0, /*timeout*/scsi_delay, /*getcount_only*/0); scsi_toggle_tags(&newpath); } if (async_code == AC_INQ_CHANGED) { /* * We've sent a start unit command, or * something similar to a device that * may have caused its inquiry data to * change. So we re-scan the device to * refresh the inquiry data for it. */ scsi_scan_lun(newpath.periph, &newpath, CAM_EXPECT_INQ_CHANGE, NULL); } xpt_release_path(&newpath); } else if (async_code == AC_LOST_DEVICE && (device->flags & CAM_DEV_UNCONFIGURED) == 0) { device->flags |= CAM_DEV_UNCONFIGURED; xpt_release_device(device); } else if (async_code == AC_TRANSFER_NEG) { struct ccb_trans_settings *settings; struct cam_path path; settings = (struct ccb_trans_settings *)async_arg; xpt_compile_path(&path, NULL, bus->path_id, target->target_id, device->lun_id); scsi_set_transfer_settings(settings, &path, /*async_update*/TRUE); xpt_release_path(&path); } } static void _scsi_announce_periph(struct cam_periph *periph, u_int *speed, u_int *freq, struct ccb_trans_settings *cts) { struct ccb_pathinq cpi; struct cam_path *path = periph->path; cam_periph_assert(periph, MA_OWNED); xpt_setup_ccb(&cts->ccb_h, path, CAM_PRIORITY_NORMAL); cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts->type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)cts); if (cam_ccb_status((union ccb *)cts) != CAM_REQ_CMP) return; /* Ask the SIM for its base transfer speed */ memset(&cpi, 0, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* Report connection speed */ *speed = cpi.base_transfer_speed; *freq = 0; if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0 && spi->sync_offset != 0) { *freq = scsi_calc_syncsrate(spi->sync_period); *speed = *freq; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) *speed *= (0x01 << spi->bus_width); } if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; if (fc->valid & CTS_FC_VALID_SPEED) *speed = fc->bitrate; } if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) *speed = sas->bitrate; } } static void scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) { struct ccb_trans_settings cts; u_int speed, freq, mb; memset(&cts, 0, sizeof(cts)); _scsi_announce_periph(periph, &speed, &freq, &cts); if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) return; mb = speed / 1000; if (mb > 0) sbuf_printf(sb, "%s%d: %d.%03dMB/s transfers", periph->periph_name, periph->unit_number, mb, speed % 1000); else sbuf_printf(sb, "%s%d: %dKB/s transfers", periph->periph_name, periph->unit_number, speed); /* Report additional information about SPI connections */ if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi; spi = &cts.xport_specific.spi; if (freq != 0) { sbuf_printf(sb, " (%d.%03dMHz%s, offset %d", freq / 1000, freq % 1000, (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 ? " DT" : "", spi->sync_offset); } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 && spi->bus_width > 0) { if (freq != 0) { sbuf_cat(sb, ", "); } else { sbuf_cat(sb, " ("); } sbuf_printf(sb, "%dbit)", 8 * (0x01 << spi->bus_width)); } else if (freq != 0) { sbuf_putc(sb, ')'); } } if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc; fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) sbuf_printf(sb, " WWNN 0x%llx", (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) sbuf_printf(sb, " WWPN 0x%llx", (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) sbuf_printf(sb, " PortID 0x%x", fc->port); } sbuf_putc(sb, '\n'); } static void scsi_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb) { scsi_print_inquiry_sbuf(sb, &device->inq_data); } static void scsi_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb) { scsi_print_inquiry_short_sbuf(sb, &device->inq_data); } static void scsi_proto_debug_out(union ccb *ccb) { char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; struct cam_ed *device; if (ccb->ccb_h.func_code != XPT_SCSI_IO) return; device = ccb->ccb_h.path->device; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_CDB,("%s. CDB: %s\n", scsi_op_desc(scsiio_cdb_ptr(&ccb->csio)[0], &device->inq_data), scsi_cdb_string(scsiio_cdb_ptr(&ccb->csio), cdb_str, sizeof(cdb_str)))); } diff --git a/sys/dev/ufshci/ufshci.c b/sys/dev/ufshci/ufshci.c new file mode 100644 index 000000000000..84a9629e74b0 --- /dev/null +++ b/sys/dev/ufshci/ufshci.c @@ -0,0 +1,76 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include +#include + +#include "ufshci_private.h" + +MALLOC_DEFINE(M_UFSHCI, "ufshci", "ufshci(4) memory allocations"); + +int +ufshci_attach(device_t dev) +{ + struct ufshci_controller *ctrlr = device_get_softc(dev); + int status; + + status = ufshci_ctrlr_construct(ctrlr, dev); + if (status != 0) { + ufshci_ctrlr_destruct(ctrlr, dev); + return (status); + } + + ctrlr->config_hook.ich_func = ufshci_ctrlr_start_config_hook; + ctrlr->config_hook.ich_arg = ctrlr; + + if (config_intrhook_establish(&ctrlr->config_hook) != 0) + return (ENOMEM); + + return (0); +} + +int +ufshci_detach(device_t dev) +{ + struct ufshci_controller *ctrlr = device_get_softc(dev); + + config_intrhook_drain(&ctrlr->config_hook); + + ufshci_ctrlr_destruct(ctrlr, dev); + + return (0); +} + +void +ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl, + bool error) +{ + struct ufshci_completion_poll_status *status = arg; + + /* + * Copy status into the argument passed by the caller, so that the + * caller can check the status to determine if the the request passed + * or failed. + */ + memcpy(&status->cpl.response_upiu, &cpl->response_upiu, cpl->size); + status->error = error; + atomic_store_rel_int(&status->done, 1); +} + +static int +ufshci_modevent(module_t mod __unused, int type __unused, void *argp __unused) +{ + return (0); +} + +static moduledata_t ufshci_mod = { "ufshci", ufshci_modevent, 0 }; + +DECLARE_MODULE(ufshci, ufshci_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); +MODULE_VERSION(ufshci, 1); +MODULE_DEPEND(ufshci, cam, 1, 1, 1); diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h new file mode 100644 index 000000000000..9f0faaadeb57 --- /dev/null +++ b/sys/dev/ufshci/ufshci.h @@ -0,0 +1,939 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#ifndef __UFSHCI_H__ +#define __UFSHCI_H__ + +#include +#include + +/* + * Note: This driver currently assumes a little-endian architecture. + * Big-endian support is not yet implemented. + */ + +/* MIPI UniPro spec 2.0, section 5.8.1 "PHY Adapter Common Attributes" */ +#define PA_AvailTxDataLanes 0x1520 +#define PA_AvailRxDataLanes 0x1540 + +/* + * MIPI UniPro spec 2.0, section 5.8.2 "PHY Adapter M-PHY-Specific + * Attributes" + */ +#define PA_ConnectedTxDataLanes 0x1561 +#define PA_ConnectedRxDataLanes 0x1581 +#define PA_MaxRxHSGear 0x1587 +#define PA_Granularity 0x15AA +#define PA_TActivate 0x15A8 + +#define PA_RemoteVerInfo 0x15A0 +#define PA_LocalVerInfo 0x15A9 + +/* UFSHCI spec 4.1, section 7.4 "UIC Power Mode Change" */ +#define PA_ActiveTxDataLanes 0x1560 +#define PA_ActiveRxDataLanes 0x1580 +#define PA_TxGear 0x1568 +#define PA_RxGear 0x1583 +#define PA_TxTermination 0x1569 +#define PA_RxTermination 0x1584 +#define PA_HSSeries 0x156A +#define PA_PWRModeUserData0 0x15B0 +#define PA_PWRModeUserData1 0x15B1 +#define PA_PWRModeUserData2 0x15B2 +#define PA_PWRModeUserData3 0x15B3 +#define PA_PWRModeUserData4 0x15B4 +#define PA_PWRModeUserData5 0x15B5 + +#define PA_TxHsAdaptType 0x15D4 +#define PA_PWRMode 0x1571 + +#define DME_LocalFC0ProtectionTimeOutVal 0xD041 +#define DME_LocalTC0ReplayTimeOutVal 0xD042 +#define DME_LocalAFC0ReqTimeOutVal 0xD043 + +/* Currently, UFS uses TC0 only. */ +#define DL_FC0ProtectionTimeOutVal_Default 8191 +#define DL_TC0ReplayTimeOutVal_Default 65535 +#define DL_AFC0ReqTimeOutVal_Default 32767 + +/* UFS Spec 4.1, section 6.4 "Reference Clock" */ +enum ufshci_attribute_reference_clock { + UFSHCI_REF_CLK_19_2MHz = 0x0, + UFSHCI_REF_CLK_26MHz = 0x1, + UFSHCI_REF_CLK_38_4MHz = 0x2, + UFSHCI_REF_CLK_OBSOLETE = 0x3, +}; + +/* UFS spec 4.1, section 9 "UFS UIC Layer: MIPI Unipro" */ +enum ufshci_uic_cmd_opcode { + /* Configuration */ + UFSHCI_DME_GET = 0x01, + UFSHCI_DME_SET = 0x02, + UFSHCI_DME_PEER_GET = 0x03, + UFSHCI_DME_PEER_SET = 0x04, + /* Controll */ + UFSHCI_DME_POWER_ON = 0x10, + UFSHCI_DME_POWER_OFF = 0x11, + UFSHCI_DME_ENABLE = 0x12, + UFSHCI_DME_RESET = 0x14, + UFSHCI_DME_ENDPOINT_RESET = 0x15, + UFSHCI_DME_LINK_STARTUP = 0x16, + UFSHCI_DME_HIBERNATE_ENTER = 0x17, + UFSHCI_DME_HIBERNATE_EXIT = 0x18, + UFSHCI_DME_TEST_MODE = 0x1a, +}; + +/* UFSHCI spec 4.1, section 5.6.3 "Offset 98h: UICCMDARG2 – UIC Command + * Argument" */ +enum ufshci_uic_cmd_attr_set_type { + UFSHCI_ATTR_SET_TYPE_NORMAL = 0, /* volatile value */ + UFSHCI_ATTR_SET_TYPE_STATIC = 1, /* non-volatile reset value */ +}; + +struct ufshci_uic_cmd { + uint8_t opcode; + uint32_t argument1; + uint32_t argument2; + uint32_t argument3; +}; + +/* UFS spec 4.1, section 10.5 "UPIU Transactions" */ +enum transaction_code { + UFSHCI_UPIU_TRANSACTION_CODE_NOP_OUT = 0x00, + UFSHCI_UPIU_TRANSACTION_CODE_COMMAND = 0x01, + UFSHCI_UPIU_TRANSACTION_CODE_DATA_OUT = 0x02, + UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_REQUEST = 0x04, + UFSHCI_UPIU_TRANSACTION_CODE_QUERY_REQUEST = 0x16, + UFSHCI_UPIU_TRANSACTION_CODE_NOP_IN = 0x20, + UFSHCI_UPIU_TRANSACTION_CODE_RESPONSE = 0x21, + UFSHCI_UPIU_TRANSACTION_CODE_DATA_IN = 0x22, + UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_RESPONSE = 0x24, + UFSHCI_UPIU_TRANSACTION_CODE_READY_TO_TRANSFER = 0x31, + UFSHCI_UPIU_TRANSACTION_CODE_QUERY_RESPONSE = 0x36, + UFSHCI_UPIU_TRANSACTION_CODE_REJECT_UPIU = 0x3f, +}; + +enum overall_command_status { + UFSHCI_DESC_SUCCESS = 0x0, + UFSHCI_DESC_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01, + UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES = 0x02, + UFSHCI_DESC_MISMATCH_DATA_BUFFER_SIZE = 0x03, + UFSHCI_DESC_MISMATCH_RESPONSE_UPIU_SIZE = 0x04, + UFSHCI_DESC_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05, + UFSHCI_DESC_ABORTED = 0x06, + UFSHCI_DESC_HOST_CONTROLLER_FATAL_ERROR = 0x07, + UFSHCI_DESC_DEVICEFATALERROR = 0x08, + UFSHCI_DESC_INVALID_CRYPTO_CONFIGURATION = 0x09, + UFSHCI_DESC_GENERAL_CRYPTO_ERROR = 0x0A, + UFSHCI_DESC_INVALID = 0x0F, +}; + +enum response_code { + UFSHCI_RESPONSE_CODE_TARGET_SUCCESS = 0x00, + UFSHCI_RESPONSE_CODE_TARGET_FAILURE = 0x01, + UFSHCI_RESPONSE_CODE_PARAMETER_NOTREADABLE = 0xF6, + UFSHCI_RESPONSE_CODE_PARAMETER_NOTWRITEABLE = 0xF7, + UFSHCI_RESPONSE_CODE_PARAMETER_ALREADYWRITTEN = 0xF8, + UFSHCI_RESPONSE_CODE_INVALID_LENGTH = 0xF9, + UFSHCI_RESPONSE_CODE_INVALID_VALUE = 0xFA, + UFSHCI_RESPONSE_CODE_INVALID_SELECTOR = 0xFB, + UFSHCI_RESPONSE_CODE_INVALID_INDEX = 0xFC, + UFSHCI_RESPONSE_CODE_INVALID_IDN = 0xFD, + UFSHCI_RESPONSE_CODE_INVALID_OPCODE = 0xFE, + UFSHCI_RESPONSE_CODE_GENERAL_FAILURE = 0xFF, +}; + +/* UFSHCI spec 4.1, section 6.1.1 "UTP Transfer Request Descriptor" */ +enum ufshci_command_type { + UFSHCI_COMMAND_TYPE_UFS_STORAGE = 0x01, + UFSHCI_COMMAND_TYPE_NULLIFIED_UTRD = 0x0F, +}; + +enum ufshci_data_direction { + UFSHCI_DATA_DIRECTION_NO_DATA_TRANSFER = 0x00, + UFSHCI_DATA_DIRECTION_FROM_SYS_TO_TGT = 0x01, + UFSHCI_DATA_DIRECTION_FROM_TGT_TO_SYS = 0x10, + UFSHCI_DATA_DIRECTION_RESERVED = 0b11, +}; + +enum ufshci_overall_command_status { + UFSHCI_OCS_SUCCESS = 0x0, + UFSHCI_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01, + UFSHCI_OCS_INVALID_PRDT_ATTRIBUTES = 0x02, + UFSHCI_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03, + UFSHCI_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04, + UFSHCI_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05, + UFSHCI_OCS_ABORTED = 0x06, + UFSHCI_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07, + UFSHCI_OCS_DEVICE_FATAL_ERROR = 0x08, + UFSHCI_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09, + UFSHCI_OCS_GENERAL_CRYPTO_ERROR = 0x0A, + UFSHCI_OCS_INVALID = 0xF, +}; + +struct ufshci_utp_xfer_req_desc { + /* dword 0 */ + uint32_t cci : 8; /* [7:0] */ + uint32_t total_ehs_length : 8; /* [15:8] */ + uint32_t reserved0 : 7; /* [22:16] */ + uint32_t ce : 1; /* [23] */ + uint32_t interrupt : 1; /* [24] */ + uint32_t data_direction : 2; /* [26:25] */ + uint32_t reserved1 : 1; /* [27] */ + uint32_t command_type : 4; /* [31:28] */ + + /* dword 1 */ + uint32_t data_unit_number_lower; /* [31:0] */ + + /* dword 2 */ + uint8_t overall_command_status; /* [7:0] */ + uint8_t common_data_size; /* [15:8] */ + uint16_t last_data_byte_count; /* [31:16] */ + + /* dword 3 */ + uint32_t data_unit_number_upper; /* [31:0] */ + + /* dword 4 */ + uint32_t utp_command_descriptor_base_address; /* [31:0] */ + + /* dword 5 */ + uint32_t utp_command_descriptor_base_address_upper; /* [31:0] */ + + /* dword 6 */ + uint16_t response_upiu_length; /* [15:0] */ + uint16_t response_upiu_offset; /* [31:16] */ + + /* dword 7 */ + uint16_t prdt_length; /* [15:0] */ + uint16_t prdt_offset; /* [31:16] */ +} __packed __aligned(8); + +_Static_assert(sizeof(struct ufshci_utp_xfer_req_desc) == 32, + "ufshci_utp_xfer_req_desc must be 32 bytes"); + +/* + * According to the UFSHCI specification, the size of the UTP command + * descriptor is as follows. The size of the transfer request is not limited, + * a transfer response can be as long as 65535 * dwords, and a PRDT can be as + * long as 65565 * PRDT entry size(16 bytes). However, for ease of use, this + * UFSHCI Driver imposes the following limits. The size of the transfer + * request and the transfer response is 1024 bytes or less. The PRDT region + * limits the number of scatter gathers to 256 + 1, using a total of 4096 + + * 16 bytes. Therefore, only 8KB size is allocated for the UTP command + * descriptor. + */ +#define UFSHCI_UTP_COMMAND_DESCRIPTOR_SIZE 8192 +#define UFSHCI_UTP_XFER_REQ_SIZE 512 +#define UFSHCI_UTP_XFER_RESP_SIZE 512 + +/* + * To reduce the size of the UTP Command Descriptor(8KB), we must use only + * 256 + 1 PRDT entries. The reason for adding the 1 is that if the data is + * not aligned, one additional PRDT_ENTRY is used. + */ +#define UFSHCI_MAX_PRDT_ENTRY_COUNT (256 + 1) + +/* UFSHCI spec 4.1, section 6.1.2 "UTP Command Descriptor" */ +struct ufshci_prdt_entry { + /* dword 0 */ + uint32_t data_base_address; /* [31:0] */ + + /* dword 1 */ + uint32_t data_base_address_upper; /* [31:0] */ + + /* dword 2 */ + uint32_t reserved; /* [31:0] */ + + /* dword 3 */ + uint32_t data_byte_count; /* [17:0] Maximum byte + * count is 256KB */ +} __packed __aligned(8); + +_Static_assert(sizeof(struct ufshci_prdt_entry) == 16, + "ufshci_prdt_entry must be 16 bytes"); + +struct ufshci_utp_cmd_desc { + uint8_t command_upiu[UFSHCI_UTP_XFER_REQ_SIZE]; + uint8_t response_upiu[UFSHCI_UTP_XFER_RESP_SIZE]; + uint8_t prd_table[sizeof(struct ufshci_prdt_entry) * + UFSHCI_MAX_PRDT_ENTRY_COUNT]; + uint8_t padding[3072 - sizeof(struct ufshci_prdt_entry)]; +} __packed __aligned(128); + +_Static_assert(sizeof(struct ufshci_utp_cmd_desc) == + UFSHCI_UTP_COMMAND_DESCRIPTOR_SIZE, + "ufshci_utp_cmd_desc must be 8192 bytes"); + +#define UFSHCI_UTP_TASK_MGMT_REQ_SIZE 32 +#define UFSHCI_UTP_TASK_MGMT_RESP_SIZE 32 + +/* UFSHCI spec 4.1, section 6.3.1 "UTP Task Management Request Descriptor" */ +struct ufshci_utp_task_mgmt_req_desc { + /* dword 0 */ + uint32_t reserved0 : 24; /* [23:0] */ + uint32_t interrupt : 1; /* [24] */ + uint32_t reserved1 : 7; /* [31:25] */ + + /* dword 1 */ + uint32_t reserved2; /* [31:0] */ + + /* dword 2 */ + uint8_t overall_command_status; /* [7:0] */ + uint8_t reserved3; /* [15:8] */ + uint16_t reserved4; /* [31:16] */ + + /* dword 3 */ + uint32_t reserved5; /* [31:0] */ + + /* dword 4-11 */ + uint8_t request_upiu[UFSHCI_UTP_TASK_MGMT_REQ_SIZE]; + + /* dword 12-19 */ + uint8_t response_upiu[UFSHCI_UTP_TASK_MGMT_RESP_SIZE]; + +} __packed __aligned(8); + +_Static_assert(sizeof(struct ufshci_utp_task_mgmt_req_desc) == 80, + "ufshci_utp_task_mgmt_req_desc must be 80 bytes"); + +/* UFS spec 4.1, section 10.6.2 "Basic Header Format" */ +struct ufshci_upiu_header { + /* dword 0 */ + union { + struct { + uint8_t trans_code : 6; /* [5:0] */ + uint8_t dd : 1; /* [6] */ + uint8_t hd : 1; /* [7] */ + }; + uint8_t trans_type; + }; + union { + struct { + uint8_t task_attribute : 2; /* [1:0] */ + uint8_t cp : 1; /* [2] */ + uint8_t retransmit_indicator : 1; /* [3] */ +#define UFSHCI_OPERATIONAL_FLAG_W 0x2 +#define UFSHCI_OPERATIONAL_FLAG_R 0x4 + uint8_t operational_flags : 4; /* [7:4] */ + }; + uint8_t flags; + }; + uint8_t lun; + uint8_t task_tag; + + /* dword 1 */ +#define UFSHCI_COMMAND_SET_TYPE_SCSI 0 + uint8_t cmd_set_type : 4; /* [3:0] */ + uint8_t iid : 4; /* [7:4] */ + uint8_t ext_iid_or_function; + uint8_t response; + uint8_t ext_iid_or_status; + + /* dword 2 */ + uint8_t ehs_length; + uint8_t device_infomation; + uint16_t data_segment_length; /* (Big-endian) */ +} __packed __aligned(4); + +_Static_assert(sizeof(struct ufshci_upiu_header) == 12, + "ufshci_upiu_header must be 12 bytes"); + +#define UFSHCI_MAX_UPIU_SIZE 512 +#define UFSHCI_UPIU_ALIGNMENT 8 /* UPIU requires 64-bit alignment. */ + +struct ufshci_upiu { + /* dword 0-2 */ + struct ufshci_upiu_header header; + /* dword 3-127 */ + uint8_t + reserved[UFSHCI_MAX_UPIU_SIZE - sizeof(struct ufshci_upiu_header)]; +} __packed __aligned(8); + +_Static_assert(sizeof(struct ufshci_upiu) == 512, + "ufshci_upiu must be 512 bytes"); + +struct ufshci_cmd_command_upiu { + /* dword 0-2 */ + struct ufshci_upiu_header header; + /* dword 3 */ + uint32_t expected_data_transfer_length; /* (Big-endian) */ + + /* dword 4-7 */ + uint8_t cdb[16]; + +} __packed __aligned(4); + +_Static_assert(sizeof(struct ufshci_cmd_command_upiu) == 32, + "bad size for ufshci_cmd_command_upiu"); +_Static_assert(sizeof(struct ufshci_cmd_command_upiu) <= + UFSHCI_UTP_XFER_REQ_SIZE, + "bad size for ufshci_cmd_command_upiu"); +_Static_assert(sizeof(struct ufshci_cmd_command_upiu) % UFSHCI_UPIU_ALIGNMENT == + 0, + "UPIU requires 64-bit alignment"); + +struct ufshci_cmd_response_upiu { + /* dword 0-2 */ + struct ufshci_upiu_header header; + /* dword 3 */ + uint32_t residual_transfer_count; /* (Big-endian) */ + + /* dword 4-7 */ + uint8_t reserved[16]; + + /* Sense Data */ + uint16_t sense_data_len; /* (Big-endian) */ + uint8_t sense_data[18]; + + /* Add padding to align the kUpiuAlignment. */ + uint8_t padding[4]; +} __packed __aligned(4); + +_Static_assert(sizeof(struct ufshci_cmd_response_upiu) == 56, + "bad size for ufshci_cmd_response_upiu"); +_Static_assert(sizeof(struct ufshci_cmd_response_upiu) <= + UFSHCI_UTP_XFER_RESP_SIZE, + "bad size for ufshci_cmd_response_upiu"); +_Static_assert(sizeof(struct ufshci_cmd_response_upiu) % + UFSHCI_UPIU_ALIGNMENT == + 0, + "UPIU requires 64-bit alignment"); + +/* UFS Spec 4.1, section 10.7.8 "QUERY REQUEST UPIU" */ +enum ufshci_query_function { + UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01, + UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81, +}; + +enum ufshci_query_opcode { + UFSHCI_QUERY_OPCODE_NOP = 0, + UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR, + UFSHCI_QUERY_OPCODE_WRITE_DESCRIPTOR, + UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE, + UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE, + UFSHCI_QUERY_OPCODE_READ_FLAG, + UFSHCI_QUERY_OPCODE_SET_FLAG, + UFSHCI_QUERY_OPCODE_CLEAR_FLAG, + UFSHCI_QUERY_OPCODE_TOGGLE_FLAG, +}; + +struct ufshci_query_param { + enum ufshci_query_function function; + enum ufshci_query_opcode opcode; + uint8_t type; + uint8_t index; + uint8_t selector; + uint64_t value; + size_t desc_size; +}; + +struct ufshci_query_request_upiu { + /* dword 0-2 */ + struct ufshci_upiu_header header; + /* dword 3 */ + uint8_t opcode; + uint8_t idn; + uint8_t index; + uint8_t selector; + + /* dword 4-5 */ + union { + /* The Write Attribute opcode uses 64 - bit value. */ + uint64_t value_64; /* (Big-endian) */ + struct { + uint8_t reserved1[2]; + uint16_t length; /* (Big-endian) */ + uint32_t value_32; /* (Big-endian) */ + }; + } __packed __aligned(4); + + /* dword 6 */ + uint32_t reserved2; + + /* dword 7 */ + uint32_t reserved3; + + uint8_t command_data[256]; +} __packed __aligned(4); + +_Static_assert(sizeof(struct ufshci_query_request_upiu) == 288, + "bad size for ufshci_query_request_upiu"); +_Static_assert(sizeof(struct ufshci_query_request_upiu) <= + UFSHCI_UTP_XFER_REQ_SIZE, + "bad size for ufshci_query_request_upiu"); +_Static_assert(sizeof(struct ufshci_query_request_upiu) % + UFSHCI_UPIU_ALIGNMENT == + 0, + "UPIU requires 64-bit alignment"); + +/* UFS Spec 4.1, section 10.7.9 "QUERY RESPONSE UPIU" */ +enum ufshci_query_response_code { + UFSHCI_QUERY_RESP_CODE_SUCCESS = 0x00, + UFSHCI_QUERY_RESP_CODE_PARAMETER_NOT_READABLE = 0xf6, + UFSHCI_QUERY_RESP_CODE_PARAMETER_NOT_WRITEABLE = 0xf7, + UFSHCI_QUERY_RESP_CODE_PARAMETER_ALREADY_WRITTEN = 0xf8, + UFSHCI_QUERY_RESP_CODE_INVALID_LENGTH = 0xf9, + UFSHCI_QUERY_RESP_CODE_INVALID_VALUE = 0xfa, + UFSHCI_QUERY_RESP_CODE_INVALID_SELECTOR = 0xfb, + UFSHCI_QUERY_RESP_CODE_INVALID_INDEX = 0xfc, + UFSHCI_QUERY_RESP_CODE_INVALID_IDN = 0xfd, + UFSHCI_QUERY_RESP_CODE_INVALID_OPCODE = 0xfe, + UFSHCI_QUERY_RESP_CODE_GENERAL_FAILURE = 0xff, +}; + +struct ufshci_query_response_upiu { + /* dword 0-2 */ + struct ufshci_upiu_header header; + /* dword 3 */ + uint8_t opcode; + uint8_t idn; + uint8_t index; + uint8_t selector; + + /* dword 4-5 */ + union { + /* The Read / Write Attribute opcodes use 64 - bit value. */ + uint64_t value_64; /* (Big-endian) */ + struct { + uint8_t reserved1[2]; + uint16_t length; /* (Big-endian) */ + union { + uint32_t value_32; /* (Big-endian) */ + struct { + uint8_t reserved2[3]; + uint8_t flag_value; + }; + }; + }; + } __packed __aligned(4); + + /* dword 6 */ + uint8_t reserved3[4]; + + /* dword 7 */ + uint8_t reserved4[4]; + + uint8_t command_data[256]; +} __packed __aligned(4); + +_Static_assert(sizeof(struct ufshci_query_response_upiu) == 288, + "bad size for ufshci_query_response_upiu"); +_Static_assert(sizeof(struct ufshci_query_response_upiu) <= + UFSHCI_UTP_XFER_RESP_SIZE, + "bad size for ufshci_query_response_upiu"); +_Static_assert(sizeof(struct ufshci_query_response_upiu) % + UFSHCI_UPIU_ALIGNMENT == + 0, + "UPIU requires 64-bit alignment"); + +/* UFS 4.1, section 10.7.11 "NOP OUT UPIU" */ +struct ufshci_nop_out_upiu { + /* dword 0-2 */ + struct ufshci_upiu_header header; + /* dword 3-7 */ + uint8_t reserved[20]; +} __packed __aligned(8); +_Static_assert(sizeof(struct ufshci_nop_out_upiu) == 32, + "ufshci_upiu_nop_out must be 32 bytes"); + +/* UFS 4.1, section 10.7.12 "NOP IN UPIU" */ +struct ufshci_nop_in_upiu { + /* dword 0-2 */ + struct ufshci_upiu_header header; + /* dword 3-7 */ + uint8_t reserved[20]; +} __packed __aligned(8); +_Static_assert(sizeof(struct ufshci_nop_in_upiu) == 32, + "ufshci_upiu_nop_in must be 32 bytes"); + +union ufshci_reponse_upiu { + struct ufshci_upiu_header header; + struct ufshci_cmd_response_upiu cmd_response_upiu; + struct ufshci_query_response_upiu query_response_upiu; + struct ufshci_nop_in_upiu nop_in_upiu; +}; + +struct ufshci_completion { + union ufshci_reponse_upiu response_upiu; + size_t size; +}; + +typedef void (*ufshci_cb_fn_t)(void *, const struct ufshci_completion *, bool); + +/* + * UFS Spec 4.1, section 14.1 "UFS Descriptors" + * All descriptors use big-endian byte ordering. + */ +enum ufshci_descriptor_type { + UFSHCI_DESC_TYPE_DEVICE = 0x00, + UFSHCI_DESC_TYPE_CONFIGURATION = 0x01, + UFSHCI_DESC_TYPE_UNIT = 0x02, + UFSHCI_DESC_TYPE_INTERCONNECT = 0x04, + UFSHCI_DESC_TYPE_STRING = 0x05, + UFSHCI_DESC_TYPE_GEOMETRY = 0X07, + UFSHCI_DESC_TYPE_POWER = 0x08, + UFSHCI_DESC_TYPE_DEVICE_HEALTH = 0x09, + UFSHCI_DESC_TYPE_FBO_EXTENSION_SPECIFICATION = 0x0a, +}; + +/* + * UFS Spec 4.1, section 14.1.5.2 "Device Descriptor" + * DeviceDescriptor use big-endian byte ordering. + */ +struct ufshci_device_descriptor { + uint8_t bLength; + uint8_t bDescriptorIDN; + uint8_t bDevice; + uint8_t bDeviceClass; + uint8_t bDeviceSubClass; + uint8_t bProtocol; + uint8_t bNumberLU; + uint8_t bNumberWLU; + uint8_t bBootEnable; + uint8_t bDescrAccessEn; + uint8_t bInitPowerMode; + uint8_t bHighPriorityLUN; + uint8_t bSecureRemovalType; + uint8_t bSecurityLU; + uint8_t bBackgroundOpsTermLat; + uint8_t bInitActiveICCLevel; + /* 0x10 */ + uint16_t wSpecVersion; + uint16_t wManufactureDate; + uint8_t iManufacturerName; + uint8_t iProductName; + uint8_t iSerialNumber; + uint8_t iOemID; + uint16_t wManufacturerID; + uint8_t bUD0BaseOffset; + uint8_t bUDConfigPLength; + uint8_t bDeviceRTTCap; + uint16_t wPeriodicRTCUpdate; + uint8_t bUfsFeaturesSupport; + /* 0x20 */ + uint8_t bFFUTimeout; + uint8_t bQueueDepth; + uint16_t wDeviceVersion; + uint8_t bNumSecureWPArea; + uint32_t dPSAMaxDataSize; + uint8_t bPSAStateTimeout; + uint8_t iProductRevisionLevel; + uint8_t Reserved[5]; + /* 0x2a */ + /* 0x30 */ + uint8_t ReservedUME[16]; + /* 0x40 */ + uint8_t ReservedHpb[3]; + uint8_t Reserved2[12]; + uint32_t dExtendedUfsFeaturesSupport; + uint8_t bWriteBoosterBufferPreserveUserSpaceEn; + uint8_t bWriteBoosterBufferType; + uint32_t dNumSharedWriteBoosterBufferAllocUnits; +} __packed; + +_Static_assert(sizeof(struct ufshci_device_descriptor) == 89, + "bad size for ufshci_device_descriptor"); + +/* + * UFS Spec 4.1, section 14.1.5.3 "Configuration Descriptor" + * ConfigurationDescriptor use big-endian byte ordering. + */ +struct ufshci_unit_descriptor_configurable_parameters { + uint8_t bLUEnable; + uint8_t bBootLunID; + uint8_t bLUWriteProtect; + uint8_t bMemoryType; + uint32_t dNumAllocUnits; + uint8_t bDataReliability; + uint8_t bLogicalBlockSize; + uint8_t bProvisioningType; + uint16_t wContextCapabilities; + union { + struct { + uint8_t Reserved[3]; + uint8_t ReservedHpb[6]; + } __packed; + uint16_t wZoneBufferAllocUnits; + }; + uint32_t dLUNumWriteBoosterBufferAllocUnits; +} __packed; + +_Static_assert(sizeof(struct ufshci_unit_descriptor_configurable_parameters) == + 27, + "bad size for ufshci_unit_descriptor_configurable_parameters"); + +#define UFSHCI_CONFIGURATION_DESCEIPTOR_LU_NUM 8 + +struct ufshci_configuration_descriptor { + uint8_t bLength; + uint8_t bDescriptorIDN; + uint8_t bConfDescContinue; + uint8_t bBootEnable; + uint8_t bDescrAccessEn; + uint8_t bInitPowerMode; + uint8_t bHighPriorityLUN; + uint8_t bSecureRemovalType; + uint8_t bInitActiveICCLevel; + uint16_t wPeriodicRTCUpdate; + uint8_t Reserved; + uint8_t bRPMBRegionEnable; + uint8_t bRPMBRegion1Size; + uint8_t bRPMBRegion2Size; + uint8_t bRPMBRegion3Size; + uint8_t bWriteBoosterBufferPreserveUserSpaceEn; + uint8_t bWriteBoosterBufferType; + uint32_t dNumSharedWriteBoosterBufferAllocUnits; + /* 0x16 */ + struct ufshci_unit_descriptor_configurable_parameters + unit_config_params[UFSHCI_CONFIGURATION_DESCEIPTOR_LU_NUM]; +} __packed; + +_Static_assert(sizeof(struct ufshci_configuration_descriptor) == (22 + 27 * 8), + "bad size for ufshci_configuration_descriptor"); + +/* + * UFS Spec 4.1, section 14.1.5.4 "Geometry Descriptor" + * GeometryDescriptor use big-endian byte ordering. + */ +struct ufshci_geometry_descriptor { + uint8_t bLength; + uint8_t bDescriptorIDN; + uint8_t bMediaTechnology; + uint8_t Reserved; + uint64_t qTotalRawDeviceCapacity; + uint8_t bMaxNumberLU; + uint32_t dSegmentSize; + /* 0x11 */ + uint8_t bAllocationUnitSize; + uint8_t bMinAddrBlockSize; + uint8_t bOptimalReadBlockSize; + uint8_t bOptimalWriteBlockSize; + uint8_t bMaxInBufferSize; + uint8_t bMaxOutBufferSize; + uint8_t bRPMB_ReadWriteSize; + uint8_t bDynamicCapacityResourcePolicy; + uint8_t bDataOrdering; + uint8_t bMaxContexIDNumber; + uint8_t bSysDataTagUnitSize; + uint8_t bSysDataTagResSize; + uint8_t bSupportedSecRTypes; + uint16_t wSupportedMemoryTypes; + /* 0x20 */ + uint32_t dSystemCodeMaxNAllocU; + uint16_t wSystemCodeCapAdjFac; + uint32_t dNonPersistMaxNAllocU; + uint16_t wNonPersistCapAdjFac; + uint32_t dEnhanced1MaxNAllocU; + /* 0x30 */ + uint16_t wEnhanced1CapAdjFac; + uint32_t dEnhanced2MaxNAllocU; + uint16_t wEnhanced2CapAdjFac; + uint32_t dEnhanced3MaxNAllocU; + uint16_t wEnhanced3CapAdjFac; + uint32_t dEnhanced4MaxNAllocU; + /* 0x42 */ + uint16_t wEnhanced4CapAdjFac; + uint32_t dOptimalLogicalBlockSize; + uint8_t ReservedHpb[5]; + uint8_t Reserved2[2]; + uint32_t dWriteBoosterBufferMaxNAllocUnits; + uint8_t bDeviceMaxWriteBoosterLUs; + uint8_t bWriteBoosterBufferCapAdjFac; + uint8_t bSupportedWriteBoosterBufferUserSpaceReductionTypes; + uint8_t bSupportedWriteBoosterBufferTypes; +} __packed; + +_Static_assert(sizeof(struct ufshci_geometry_descriptor) == 87, + "bad size for ufshci_geometry_descriptor"); + +/* + * UFS Spec 4.1, section 14.1.5.5 "Unit Descriptor" + * UnitDescriptor use big-endian byte ordering. + */ +struct ufshci_unit_descriptor { + uint8_t bLength; + uint8_t bDescriptorIDN; + uint8_t bUnitIndex; + uint8_t bLUEnable; + uint8_t bBootLunID; + uint8_t bLUWriteProtect; + uint8_t bLUQueueDepth; + uint8_t bPSASensitive; + uint8_t bMemoryType; + uint8_t bDataReliability; + uint8_t bLogicalBlockSize; + uint64_t qLogicalBlockCount; + /* 0x13 */ + uint32_t dEraseBlockSize; + uint8_t bProvisioningType; + uint64_t qPhyMemResourceCount; + /* 0x20 */ + uint16_t wContextCapabilities; + uint8_t bLargeUnitGranularity_M1; + uint8_t ReservedHpb[6]; + uint32_t dLUNumWriteBoosterBufferAllocUnits; +} __packed; +_Static_assert(sizeof(struct ufshci_unit_descriptor) == 45, + "bad size for ufshci_unit_descriptor"); + +enum LUWriteProtect { + kNoWriteProtect = 0x00, + kPowerOnWriteProtect = 0x01, + kPermanentWriteProtect = 0x02, +}; + +/* + * UFS Spec 4.1, section 14.1.5.6 "RPMB Unit Descriptor" + * RpmbUnitDescriptor use big-endian byte ordering. + */ +struct ufshci_rpmb_unit_descriptor { + uint8_t bLength; + uint8_t bDescriptorIDN; + uint8_t bUnitIndex; + uint8_t bLUEnable; + uint8_t bBootLunID; + uint8_t bLUWriteProtect; + uint8_t bLUQueueDepth; + uint8_t bPSASensitive; + uint8_t bMemoryType; + uint8_t Reserved; + uint8_t bLogicalBlockSize; + uint64_t qLogicalBlockCount; + /* 0x13 */ + uint32_t dEraseBlockSize; + uint8_t bProvisioningType; + uint64_t qPhyMemResourceCount; + /* 0x20 */ + uint8_t Reserved1[3]; +} __packed; +_Static_assert(sizeof(struct ufshci_rpmb_unit_descriptor) == 35, + "bad size for RpmbUnitDescriptor"); + +/* + * UFS Spec 4.1, section 14.1.5.7 "Power Parameters Descriptor" + * PowerParametersDescriptor use big-endian byte ordering. + */ +struct ufshci_power_parameters_descriptor { + uint8_t bLength; + uint8_t bDescriptorIDN; + uint16_t wActiveICCLevelsVCC[16]; + uint16_t wActiveICCLevelsVCCQ[16]; + uint16_t wActiveICCLevelsVCCQ2[16]; +} __packed; +_Static_assert(sizeof(struct ufshci_power_parameters_descriptor) == 98, + "bad size for PowerParametersDescriptor"); + +/* + * UFS Spec 4.1, section 14.1.5.8 "Interconnect Descriptor" + * InterconnectDescriptor use big-endian byte ordering. + */ +struct ufshci_interconnect_descriptor { + uint8_t bLength; + uint8_t bDescriptorIDN; + uint16_t bcdUniproVersion; + uint16_t bcdMphyVersion; +} __packed; +_Static_assert(sizeof(struct ufshci_interconnect_descriptor) == 6, + "bad size for InterconnectDescriptor"); + +/* + * UFS Spec 4.1, section 14.1.5.9-13 "String Descriptor" + * StringDescriptor use big-endian byte ordering. + */ +struct ufshci_string_descriptor { + uint8_t bLength; + uint8_t bDescriptorIDN; + uint16_t UC[126]; +} __packed; +_Static_assert(sizeof(struct ufshci_string_descriptor) == 254, + "bad size for StringDescriptor"); + +/* + * UFS Spec 4.1, section 14.1.5.14 "Device Health Descriptor" + * DeviceHealthDescriptor use big-endian byte ordering. + */ +struct ufshci_device_healthd_descriptor { + uint8_t bLength; + uint8_t bDescriptorIDN; + uint8_t bPreEOLInfo; + uint8_t bDeviceLifeTimeEstA; + uint8_t bDeviceLifeTimeEstB; + uint8_t VendorPropInfo[32]; + uint32_t dRefreshTotalCount; + uint32_t dRefreshProgress; +} __packed; +_Static_assert(sizeof(struct ufshci_device_healthd_descriptor) == 45, + "bad size for DeviceHealthDescriptor"); + +/* + * UFS Spec 4.1, section 14.1.5.15 "Vendor Specific Descriptor" + * VendorSpecificDescriptor use big-endian byte ordering. + */ +struct ufshci_vendor_specific_descriptor { + uint8_t bLength; + uint8_t bDescriptorIDN; + uint8_t DATA[254]; +} __packed; +_Static_assert(sizeof(struct ufshci_vendor_specific_descriptor) == 256, + "bad size for VendorSpecificDescriptor"); + +/* UFS Spec 4.1, section 14.2 "Flags" */ +enum ufshci_flags { + UFSHCI_FLAG_F_RESERVED = 0x00, + UFSHCI_FLAG_F_DEVICE_INIT = 0x01, + UFSHCI_FLAG_F_PERMANENT_WP_EN = 0x02, + UFSHCI_FLAS_F_POWER_ON_WP_EN = 0x03, + UFSHCI_FLAG_F_BACKGROUND_OPS_EN = 0x04, + UFSHCI_FLAG_F_DEVICE_LIFE_SPAN_MODE_EN = 0x05, + UFSHCI_FLAG_F_PURGE_ENABLE = 0x06, + UFSHCI_FLAG_F_REFRESH_ENABLE = 0x07, + UFSHCI_FLAG_F_PHY_RESOURCE_REMOVAL = 0x08, + UFSHCI_FLAG_F_BUSY_RTC = 0x09, + UFSHCI_FLAG_F_PERMANENTLY_DISABLE_FW_UPDATE = 0x0b, + UFSHCI_FLAG_F_WRITE_BOOSTER_EN = 0x0e, + UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN = 0x0f, + UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE = 0x10, + UFSHCI_FLAG_F_UNPIN_EN = 0x13, +}; + +/* UFS Spec 4.1, section 14.3 "Attributes" */ +enum ufshci_attributes { + UFSHCI_ATTR_B_BOOT_LUN_EN = 0x00, + UFSHCI_ATTR_B_CURRENT_POWER_MODE = 0x02, + UFSHCI_ATTR_B_ACTIVE_ICC_LEVEL = 0x03, + UFSHCI_ATTR_B_OUT_OF_ORDER_DATA_EN = 0x04, + UFSHCI_ATTR_B_BACKGROUND_OP_STATUS = 0x05, + UFSHCI_ATTR_B_PURGE_STATUS = 0x06, + UFSHCI_ATTR_B_MAX_DATA_IN_SIZE = 0x07, + UFSHCI_ATTR_B_MAX_DATA_OUT_SIZE = 0x08, + UFSHCI_ATTR_D_DYN_CAP_NEEDED = 0x09, + UFSHCI_ATTR_B_REF_CLK_FREQ = 0x0a, + UFSHCI_ATTR_B_CONFIG_DESCR_LOCK = 0x0b, + UFSHCI_ATTR_B_MAX_NUM_OF_RTT = 0x0c, + UFSHCI_ATTR_W_EXCEPTION_EVENT_CONTROL = 0x0d, + UFSHCI_ATTR_W_EXCEPTION_EVENT_STATUS = 0x0e, + UFSHCI_ATTR_D_SECONDS_PASSED = 0x0f, + UFSHCI_ATTR_W_CONTEXT_CONF = 0x10, + UFSHCI_ATTR_B_DEVICE_FFU_STATUS = 0x14, + UFSHCI_ATTR_B_PSA_STATE = 0x15, + UFSHCI_ATTR_D_PSA_DATA_SIZE = 0x16, + UFSHCI_ATTR_B_REF_CLK_GATING_WAIT_TIME = 0x17, + UFSHCI_ATTR_B_DEVICE_CASE_ROUGH_TEMPERAURE = 0x18, + UFSHCI_ATTR_B_DEVICE_TOO_HIGH_TEMP_BOUNDARY = 0x19, + UFSHCI_ATTR_B_DEVICE_TOO_LOW_TEMP_BOUNDARY = 0x1a, + UFSHCI_ATTR_B_THROTTLING_STATUS = 0x1b, + UFSHCI_ATTR_B_WB_BUFFER_FLUSH_STATUS = 0x1c, + UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE = 0x1d, + UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST = 0x1e, + UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE = 0x1f, + UFSHCI_ATTR_B_REFRESH_STATUS = 0x2c, + UFSHCI_ATTR_B_REFRESH_FREQ = 0x2d, + UFSHCI_ATTR_B_REFRESH_UNIT = 0x2e, + UFSHCI_ATTR_B_REFRESH_METHOD = 0x2f, +}; + +#endif /* __UFSHCI_H__ */ diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c new file mode 100644 index 000000000000..55d8363d3287 --- /dev/null +++ b/sys/dev/ufshci/ufshci_ctrlr.c @@ -0,0 +1,503 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include + +#include "ufshci_private.h" +#include "ufshci_reg.h" + +static int +ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr) +{ + int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); + sbintime_t delta_t = SBT_1US; + uint32_t hce; + + hce = ufshci_mmio_read_4(ctrlr, hce); + + /* If UFS host controller is already enabled, disable it. */ + if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) { + hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE); + ufshci_mmio_write_4(ctrlr, hce, hce); + } + + /* Enable UFS host controller */ + hce |= UFSHCIM(UFSHCI_HCE_REG_HCE); + ufshci_mmio_write_4(ctrlr, hce, hce); + + /* + * During the controller initialization, the value of the HCE bit is + * unstable, so we need to read the HCE value after some time after + * initialization is complete. + */ + pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1)); + + /* Wait for the HCE flag to change */ + while (1) { + hce = ufshci_mmio_read_4(ctrlr, hce); + if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) + break; + if (timeout - ticks < 0) { + ufshci_printf(ctrlr, + "host controller failed to enable " + "within %d ms\n", + ctrlr->device_init_timeout_in_ms); + return (ENXIO); + } + + pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1)); + delta_t = min(SBT_1MS, delta_t * 3 / 2); + } + + return (0); +} + +int +ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev) +{ + uint32_t ver, cap, hcs, ie; + uint32_t timeout_period, retry_count; + int error; + + ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS; + ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS; + ctrlr->dev = dev; + ctrlr->sc_unit = device_get_unit(dev); + + snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s", + device_get_nameunit(dev)); + + mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL, + MTX_DEF | MTX_RECURSE); + + mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL, + MTX_DEF); + + ver = ufshci_mmio_read_4(ctrlr, ver); + ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver); + ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver); + ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version, + ctrlr->minor_version); + + /* Read Device Capabilities */ + ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap); + ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap); + /* + * TODO: This driver does not yet support multi-queue. + * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if + * multi-queue support is available. + */ + ctrlr->is_mcq_supported = false; + if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported)) + return (ENXIO); + /* + * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB + * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for + * performance reason. + */ + ctrlr->page_size = PAGE_SIZE; + ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT; + + timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD; + TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period); + timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD); + timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD); + ctrlr->timeout_period = timeout_period; + + retry_count = UFSHCI_DEFAULT_RETRY_COUNT; + TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count); + ctrlr->retry_count = retry_count; + + /* Disable all interrupts */ + ufshci_mmio_write_4(ctrlr, ie, 0); + + /* Enable Host Controller */ + error = ufshci_ctrlr_enable_host_ctrlr(ctrlr); + if (error) + return (error); + + /* Send DME_LINKSTARTUP command to start the link startup procedure */ + error = ufshci_uic_send_dme_link_startup(ctrlr); + if (error) + return (error); + + /* + * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host + * controller has successfully received a Link Startup UIC command + * response and the UFS device has found a physical link to the + * controller. + */ + hcs = ufshci_mmio_read_4(ctrlr, hcs); + if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) { + ufshci_printf(ctrlr, "UFS device not found\n"); + return (ENXIO); + } + + /* Enable additional interrupts by programming the IE register. */ + ie = ufshci_mmio_read_4(ctrlr, ie); + ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */ + ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */ + ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */ + ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */ + ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */ + ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */ + ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */ + ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */ + ufshci_mmio_write_4(ctrlr, ie, ie); + + /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */ + + /* Allocate and initialize UTP Task Management Request List. */ + error = ufshci_utm_req_queue_construct(ctrlr); + if (error) + return (error); + + /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */ + error = ufshci_ut_req_queue_construct(ctrlr); + if (error) + return (error); + + /* TODO: Separate IO and Admin slot */ + /* max_hw_pend_io is the number of slots in the transfer_req_queue */ + ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries; + + return (0); +} + +void +ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev) +{ + if (ctrlr->resource == NULL) + goto nores; + + /* TODO: Flush In-flight IOs */ + + /* Release resources */ + ufshci_utm_req_queue_destroy(ctrlr); + ufshci_ut_req_queue_destroy(ctrlr); + + if (ctrlr->tag) + bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); + + if (ctrlr->res) + bus_release_resource(ctrlr->dev, SYS_RES_IRQ, + rman_get_rid(ctrlr->res), ctrlr->res); + + mtx_lock(&ctrlr->sc_mtx); + + ufshci_sim_detach(ctrlr); + + mtx_unlock(&ctrlr->sc_mtx); + + bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, + ctrlr->resource); +nores: + mtx_destroy(&ctrlr->uic_cmd_lock); + mtx_destroy(&ctrlr->sc_mtx); + + return; +} + +int +ufshci_ctrlr_reset(struct ufshci_controller *ctrlr) +{ + uint32_t ie; + int error; + + /* Backup and disable all interrupts */ + ie = ufshci_mmio_read_4(ctrlr, ie); + ufshci_mmio_write_4(ctrlr, ie, 0); + + /* Release resources */ + ufshci_utm_req_queue_destroy(ctrlr); + ufshci_ut_req_queue_destroy(ctrlr); + + /* Reset Host Controller */ + error = ufshci_ctrlr_enable_host_ctrlr(ctrlr); + if (error) + return (error); + + /* Send DME_LINKSTARTUP command to start the link startup procedure */ + error = ufshci_uic_send_dme_link_startup(ctrlr); + if (error) + return (error); + + /* Enable interrupts */ + ufshci_mmio_write_4(ctrlr, ie, ie); + + /* Allocate and initialize UTP Task Management Request List. */ + error = ufshci_utm_req_queue_construct(ctrlr); + if (error) + return (error); + + /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */ + error = ufshci_ut_req_queue_construct(ctrlr); + if (error) + return (error); + + return (0); +} + +int +ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr, + struct ufshci_request *req) +{ + return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req, + /*is_admin*/ true)); +} + +int +ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr, + struct ufshci_request *req) +{ + return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req, + /*is_admin*/ false)); +} + +int +ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr) +{ + struct ufshci_completion_poll_status status; + + status.done = 0; + ufshci_ctrlr_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status); + ufshci_completion_poll(&status); + if (status.error) { + ufshci_printf(ctrlr, "ufshci_ctrlr_send_nop failed!\n"); + return (ENXIO); + } + + return (0); +} + +static void +ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also) +{ + printf("ufshci(4): ufshci_ctrlr_fail\n"); + + ctrlr->is_failed = true; + + /* TODO: task_mgmt_req_queue should be handled as fail */ + + ufshci_req_queue_fail(ctrlr, + &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]); +} + +static void +ufshci_ctrlr_start(struct ufshci_controller *ctrlr) +{ + TSENTER(); + + if (ufshci_ctrlr_send_nop(ctrlr) != 0) { + ufshci_ctrlr_fail(ctrlr, false); + return; + } + + /* Initialize UFS target drvice */ + if (ufshci_dev_init(ctrlr) != 0) { + ufshci_ctrlr_fail(ctrlr, false); + return; + } + + /* Initialize Reference Clock */ + if (ufshci_dev_init_reference_clock(ctrlr) != 0) { + ufshci_ctrlr_fail(ctrlr, false); + return; + } + + /* Initialize unipro */ + if (ufshci_dev_init_unipro(ctrlr) != 0) { + ufshci_ctrlr_fail(ctrlr, false); + return; + } + + /* + * Initialize UIC Power Mode + * QEMU UFS devices do not support unipro and power mode. + */ + if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) && + ufshci_dev_init_uic_power_mode(ctrlr) != 0) { + ufshci_ctrlr_fail(ctrlr, false); + return; + } + + /* Initialize UFS Power Mode */ + if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) { + ufshci_ctrlr_fail(ctrlr, false); + return; + } + + /* Read Controller Descriptor (Device, Geometry)*/ + if (ufshci_dev_get_descriptor(ctrlr) != 0) { + ufshci_ctrlr_fail(ctrlr, false); + return; + } + + /* TODO: Configure Write Protect */ + + /* TODO: Configure Background Operations */ + + /* TODO: Configure Write Booster */ + + if (ufshci_sim_attach(ctrlr) != 0) { + ufshci_ctrlr_fail(ctrlr, false); + return; + } + + TSEXIT(); +} + +void +ufshci_ctrlr_start_config_hook(void *arg) +{ + struct ufshci_controller *ctrlr = arg; + + TSENTER(); + + if (ufshci_utm_req_queue_enable(ctrlr) == 0 && + ufshci_ut_req_queue_enable(ctrlr) == 0) + ufshci_ctrlr_start(ctrlr); + else + ufshci_ctrlr_fail(ctrlr, false); + + ufshci_sysctl_initialize_ctrlr(ctrlr); + config_intrhook_disestablish(&ctrlr->config_hook); + + TSEXIT(); +} + +/* + * Poll all the queues enabled on the device for completion. + */ +void +ufshci_ctrlr_poll(struct ufshci_controller *ctrlr) +{ + uint32_t is; + + is = ufshci_mmio_read_4(ctrlr, is); + + /* UIC error */ + if (is & UFSHCIM(UFSHCI_IS_REG_UE)) { + uint32_t uecpa, uecdl, uecn, uect, uecdme; + + /* UECPA for Host UIC Error Code within PHY Adapter Layer */ + uecpa = ufshci_mmio_read_4(ctrlr, uecpa); + if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) { + ufshci_printf(ctrlr, "UECPA error code: 0x%x\n", + UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa)); + } + /* UECDL for Host UIC Error Code within Data Link Layer */ + uecdl = ufshci_mmio_read_4(ctrlr, uecdl); + if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) { + ufshci_printf(ctrlr, "UECDL error code: 0x%x\n", + UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl)); + } + /* UECN for Host UIC Error Code within Network Layer */ + uecn = ufshci_mmio_read_4(ctrlr, uecn); + if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) { + ufshci_printf(ctrlr, "UECN error code: 0x%x\n", + UFSHCIV(UFSHCI_UECN_REG_EC, uecn)); + } + /* UECT for Host UIC Error Code within Transport Layer */ + uect = ufshci_mmio_read_4(ctrlr, uect); + if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) { + ufshci_printf(ctrlr, "UECT error code: 0x%x\n", + UFSHCIV(UFSHCI_UECT_REG_EC, uect)); + } + /* UECDME for Host UIC Error Code within DME subcomponent */ + uecdme = ufshci_mmio_read_4(ctrlr, uecdme); + if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) { + ufshci_printf(ctrlr, "UECDME error code: 0x%x\n", + UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme)); + } + ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE)); + } + /* Device Fatal Error Status */ + if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) { + ufshci_printf(ctrlr, "Device fatal error on ISR\n"); + ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES)); + } + /* UTP Error Status */ + if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) { + ufshci_printf(ctrlr, "UTP error on ISR\n"); + ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES)); + } + /* Host Controller Fatal Error Status */ + if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) { + ufshci_printf(ctrlr, "Host controller fatal error on ISR\n"); + ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES)); + } + /* System Bus Fatal Error Status */ + if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) { + ufshci_printf(ctrlr, "System bus fatal error on ISR\n"); + ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES)); + } + /* Crypto Engine Fatal Error Status */ + if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) { + ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n"); + ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES)); + } + /* UTP Task Management Request Completion Status */ + if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) { + ufshci_printf(ctrlr, "TODO: Implement UTMR completion\n"); + ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS)); + /* TODO: Implement UTMR completion */ + } + /* UTP Transfer Request Completion Status */ + if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) { + ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS)); + ufshci_req_queue_process_completions( + &ctrlr->transfer_req_queue); + } + /* MCQ CQ Event Status */ + if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) { + /* TODO: We need to process completion Queue Pairs */ + ufshci_printf(ctrlr, "MCQ completion not yet implemented\n"); + ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES)); + } +} + +/* + * Poll the single-vector interrupt case: num_io_queues will be 1 and + * there's only a single vector. While we're polling, we mask further + * interrupts in the controller. + */ +void +ufshci_ctrlr_shared_handler(void *arg) +{ + struct ufshci_controller *ctrlr = arg; + + ufshci_ctrlr_poll(ctrlr); +} + +void +ufshci_reg_dump(struct ufshci_controller *ctrlr) +{ + ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n"); + + UFSHCI_DUMP_REG(ctrlr, cap); + UFSHCI_DUMP_REG(ctrlr, mcqcap); + UFSHCI_DUMP_REG(ctrlr, ver); + UFSHCI_DUMP_REG(ctrlr, ext_cap); + UFSHCI_DUMP_REG(ctrlr, hcpid); + UFSHCI_DUMP_REG(ctrlr, hcmid); + UFSHCI_DUMP_REG(ctrlr, ahit); + UFSHCI_DUMP_REG(ctrlr, is); + UFSHCI_DUMP_REG(ctrlr, ie); + UFSHCI_DUMP_REG(ctrlr, hcsext); + UFSHCI_DUMP_REG(ctrlr, hcs); + UFSHCI_DUMP_REG(ctrlr, hce); + UFSHCI_DUMP_REG(ctrlr, uecpa); + UFSHCI_DUMP_REG(ctrlr, uecdl); + UFSHCI_DUMP_REG(ctrlr, uecn); + UFSHCI_DUMP_REG(ctrlr, uect); + UFSHCI_DUMP_REG(ctrlr, uecdme); + + ufshci_printf(ctrlr, "========================================\n"); +} diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c new file mode 100644 index 000000000000..ddf28c58fa88 --- /dev/null +++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c @@ -0,0 +1,53 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include "ufshci_private.h" + +void +ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn, + void *cb_arg) +{ + struct ufshci_request *req; + struct ufshci_nop_out_upiu *upiu; + + req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg); + + req->request_size = sizeof(struct ufshci_nop_out_upiu); + req->response_size = sizeof(struct ufshci_nop_in_upiu); + + upiu = (struct ufshci_nop_out_upiu *)&req->request_upiu; + memset(upiu, 0, req->request_size); + upiu->header.trans_type = UFSHCI_UPIU_TRANSACTION_CODE_NOP_OUT; + + ufshci_ctrlr_submit_admin_request(ctrlr, req); +} + +void +ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr, + ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param) +{ + struct ufshci_request *req; + struct ufshci_query_request_upiu *upiu; + + req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg); + + req->request_size = sizeof(struct ufshci_query_request_upiu); + req->response_size = sizeof(struct ufshci_query_response_upiu); + + upiu = (struct ufshci_query_request_upiu *)&req->request_upiu; + memset(upiu, 0, req->request_size); + upiu->header.trans_type = UFSHCI_UPIU_TRANSACTION_CODE_QUERY_REQUEST; + upiu->header.ext_iid_or_function = param.function; + upiu->opcode = param.opcode; + upiu->idn = param.type; + upiu->index = param.index; + upiu->selector = param.selector; + upiu->value_64 = param.value; + upiu->length = param.desc_size; + + ufshci_ctrlr_submit_admin_request(ctrlr, req); +} diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c new file mode 100644 index 000000000000..a0e32914e2aa --- /dev/null +++ b/sys/dev/ufshci/ufshci_dev.c @@ -0,0 +1,428 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include + +#include "ufshci_private.h" +#include "ufshci_reg.h" + +static int +ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr, + enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector, + void *desc, size_t desc_size) +{ + struct ufshci_completion_poll_status status; + struct ufshci_query_param param; + + param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; + param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR; + param.type = desc_type; + param.index = index; + param.selector = selector; + param.value = 0; + param.desc_size = desc_size; + + status.done = 0; + ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, + &status, param); + ufshci_completion_poll(&status); + if (status.error) { + ufshci_printf(ctrlr, "ufshci_dev_read_descriptor failed!\n"); + return (ENXIO); + } + + memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data, + desc_size); + + return (0); +} + +static int +ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr, + struct ufshci_device_descriptor *desc) +{ + return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0, + desc, sizeof(struct ufshci_device_descriptor))); +} + +static int +ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr, + struct ufshci_geometry_descriptor *desc) +{ + return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0, + 0, desc, sizeof(struct ufshci_geometry_descriptor))); +} + +static int +ufshci_dev_read_flag(struct ufshci_controller *ctrlr, + enum ufshci_flags flag_type, uint8_t *flag) +{ + struct ufshci_completion_poll_status status; + struct ufshci_query_param param; + + param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; + param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG; + param.type = flag_type; + param.index = 0; + param.selector = 0; + param.value = 0; + + status.done = 0; + ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, + &status, param); + ufshci_completion_poll(&status); + if (status.error) { + ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n"); + return (ENXIO); + } + + *flag = status.cpl.response_upiu.query_response_upiu.flag_value; + + return (0); +} + +static int +ufshci_dev_set_flag(struct ufshci_controller *ctrlr, + enum ufshci_flags flag_type) +{ + struct ufshci_completion_poll_status status; + struct ufshci_query_param param; + + param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; + param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG; + param.type = flag_type; + param.index = 0; + param.selector = 0; + param.value = 0; + + status.done = 0; + ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, + &status, param); + ufshci_completion_poll(&status); + if (status.error) { + ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n"); + return (ENXIO); + } + + return (0); +} + +static int +ufshci_dev_write_attribute(struct ufshci_controller *ctrlr, + enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, + uint64_t value) +{ + struct ufshci_completion_poll_status status; + struct ufshci_query_param param; + + param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; + param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE; + param.type = attr_type; + param.index = index; + param.selector = selector; + param.value = value; + + status.done = 0; + ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, + &status, param); + ufshci_completion_poll(&status); + if (status.error) { + ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n"); + return (ENXIO); + } + + return (0); +} + +int +ufshci_dev_init(struct ufshci_controller *ctrlr) +{ + int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); + sbintime_t delta_t = SBT_1US; + uint8_t flag; + int error; + const uint8_t device_init_completed = 0; + + error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT); + if (error) + return (error); + + /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */ + while (1) { + error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT, + &flag); + if (error) + return (error); + if (flag == device_init_completed) + break; + if (timeout - ticks < 0) { + ufshci_printf(ctrlr, + "device init did not become %d " + "within %d ms\n", + device_init_completed, + ctrlr->device_init_timeout_in_ms); + return (ENXIO); + } + + pause_sbt("ufshciinit", delta_t, 0, C_PREL(1)); + delta_t = min(SBT_1MS, delta_t * 3 / 2); + } + + return (0); +} + +int +ufshci_dev_reset(struct ufshci_controller *ctrlr) +{ + if (ufshci_uic_send_dme_endpoint_reset(ctrlr)) + return (ENXIO); + + return (ufshci_dev_init(ctrlr)); +} + +int +ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr) +{ + int error; + uint8_t index, selector; + + index = 0; /* bRefClkFreq is device type attribute */ + selector = 0; /* bRefClkFreq is device type attribute */ + + error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ, + index, selector, ctrlr->ref_clk); + if (error) + return (error); + + return (0); +} + +int +ufshci_dev_init_unipro(struct ufshci_controller *ctrlr) +{ + uint32_t pa_granularity, peer_pa_granularity; + uint32_t t_activate, pear_t_activate; + + /* + * Unipro Version: + * - 7~15 = Above 2.0, 6 = 2.0, 5 = 1.8, 4 = 1.61, 3 = 1.6, 2 = 1.41, + * 1 = 1.40, 0 = Reserved + */ + if (ufshci_uic_send_dme_get(ctrlr, PA_LocalVerInfo, + &ctrlr->unipro_version)) + return (ENXIO); + if (ufshci_uic_send_dme_get(ctrlr, PA_RemoteVerInfo, + &ctrlr->ufs_dev.unipro_version)) + return (ENXIO); + + /* + * PA_Granularity: Granularity for PA_TActivate and PA_Hibern8Time + * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us + */ + if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity, &pa_granularity)) + return (ENXIO); + if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, + &peer_pa_granularity)) + return (ENXIO); + + /* + * PA_TActivate: Time to wait before activating a burst in order to + * wake-up peer M-RX + * UniPro automatically sets timing information such as PA_TActivate + * through the PACP_CAP_EXT1_ind command during Link Startup operation. + */ + if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate)) + return (ENXIO); + if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate, &pear_t_activate)) + return (ENXIO); + + if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) { + /* + * Intel Lake-field UFSHCI has a quirk. We need to add 200us to + * the PEER's PA_TActivate. + */ + if (pa_granularity == peer_pa_granularity) { + pear_t_activate = t_activate + 2; + if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate, + pear_t_activate)) + return (ENXIO); + } + } + + return (0); +} + +int +ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) +{ + /* HSSerise: A = 1, B = 2 */ + const uint32_t hs_series = 2; + /* + * TX/RX PWRMode: + * - TX[3:0], RX[7:4] + * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5 + */ + const uint32_t fast_mode = 1; + const uint32_t rx_bit_shift = 4; + const uint32_t power_mode = (fast_mode << rx_bit_shift) | fast_mode; + + /* Update lanes with available TX/RX lanes */ + if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes, + &ctrlr->max_tx_lanes)) + return (ENXIO); + if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes, + &ctrlr->max_rx_lanes)) + return (ENXIO); + + /* Get max HS-GEAR value */ + if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear, + &ctrlr->max_rx_hs_gear)) + return (ENXIO); + + /* Set the data lane to max */ + ctrlr->tx_lanes = ctrlr->max_tx_lanes; + ctrlr->rx_lanes = ctrlr->max_rx_lanes; + if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes, + ctrlr->tx_lanes)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes, + ctrlr->rx_lanes)) + return (ENXIO); + + /* Set HS-GEAR to max gear */ + ctrlr->hs_gear = ctrlr->max_rx_hs_gear; + if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear)) + return (ENXIO); + + /* + * Set termination + * - HS-MODE = ON / LS-MODE = OFF + */ + if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true)) + return (ENXIO); + + /* Set HSSerise (A = 1, B = 2) */ + if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series)) + return (ENXIO); + + /* Set Timeout values */ + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0, + DL_FC0ProtectionTimeOutVal_Default)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1, + DL_TC0ReplayTimeOutVal_Default)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2, + DL_AFC0ReqTimeOutVal_Default)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3, + DL_FC0ProtectionTimeOutVal_Default)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4, + DL_TC0ReplayTimeOutVal_Default)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5, + DL_AFC0ReqTimeOutVal_Default)) + return (ENXIO); + + if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal, + DL_FC0ProtectionTimeOutVal_Default)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal, + DL_TC0ReplayTimeOutVal_Default)) + return (ENXIO); + if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal, + DL_AFC0ReqTimeOutVal_Default)) + return (ENXIO); + + /* Set TX/RX PWRMode */ + if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) + return (ENXIO); + + /* Wait for power mode changed. */ + if (ufshci_uic_power_mode_ready(ctrlr)) { + ufshci_reg_dump(ctrlr); + return (ENXIO); + } + + /* Clear 'Power Mode completion status' */ + ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UPMS)); + + if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) { + /* + * Intel Lake-field UFSHCI has a quirk. + * We need to wait 1250us and clear dme error. + */ + pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1)); + + /* Test with dme_peer_get to make sure there are no errors. */ + if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, NULL)) + return (ENXIO); + } + + return (0); +} + +int +ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr) +{ + /* TODO: Need to implement */ + + return (0); +} + +int +ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr) +{ + struct ufshci_device *device = &ctrlr->ufs_dev; + /* + * The kDeviceDensityUnit is defined in the spec as 512. + * qTotalRawDeviceCapacity use big-endian byte ordering. + */ + const uint32_t device_density_unit = 512; + uint32_t ver; + int error; + + error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc); + if (error) + return (error); + + ver = be16toh(device->dev_desc.wSpecVersion); + ufshci_printf(ctrlr, "UFS device spec version %u.%u%u\n", + UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver), + UFSHCIV(UFSHCI_VER_REG_VS, ver)); + ufshci_printf(ctrlr, "%u enabled LUNs found\n", + device->dev_desc.bNumberLU); + + error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc); + if (error) + return (error); + + if (device->geo_desc.bMaxNumberLU == 0) { + device->max_lun_count = 8; + } else if (device->geo_desc.bMaxNumberLU == 1) { + device->max_lun_count = 32; + } else { + ufshci_printf(ctrlr, + "Invalid Geometry Descriptor bMaxNumberLU value=%d\n", + device->geo_desc.bMaxNumberLU); + return (ENXIO); + } + ctrlr->max_lun_count = device->max_lun_count; + + ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n", + be64toh(device->geo_desc.qTotalRawDeviceCapacity) * + device_density_unit); + + return (0); +} diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c new file mode 100644 index 000000000000..65a69ee0b518 --- /dev/null +++ b/sys/dev/ufshci/ufshci_pci.c @@ -0,0 +1,260 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "ufshci_private.h" + +static int ufshci_pci_probe(device_t); +static int ufshci_pci_attach(device_t); +static int ufshci_pci_detach(device_t); + +static int ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr); + +static device_method_t ufshci_pci_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, ufshci_pci_probe), + DEVMETHOD(device_attach, ufshci_pci_attach), + DEVMETHOD(device_detach, ufshci_pci_detach), + /* TODO: Implement Suspend, Resume */ + { 0, 0 } +}; + +static driver_t ufshci_pci_driver = { + "ufshci", + ufshci_pci_methods, + sizeof(struct ufshci_controller), +}; + +DRIVER_MODULE(ufshci, pci, ufshci_pci_driver, 0, 0); + +static struct _pcsid { + uint32_t devid; + const char *desc; + uint32_t ref_clk; + uint32_t quirks; +} pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz, + UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE }, + { 0x98fa8086, "Intel Lakefield UFS Host Controller", + UFSHCI_REF_CLK_19_2MHz, + UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE | + UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE }, + { 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz }, + { 0x00000000, NULL } }; + +static int +ufshci_pci_probe(device_t device) +{ + struct ufshci_controller *ctrlr = device_get_softc(device); + uint32_t devid = pci_get_devid(device); + struct _pcsid *ep = pci_ids; + + while (ep->devid && ep->devid != devid) + ++ep; + + if (ep->devid) { + ctrlr->quirks = ep->quirks; + ctrlr->ref_clk = ep->ref_clk; + } + + if (ep->desc) { + device_set_desc(device, ep->desc); + return (BUS_PROBE_DEFAULT); + } + + return (ENXIO); +} + +static int +ufshci_pci_allocate_bar(struct ufshci_controller *ctrlr) +{ + ctrlr->resource_id = PCIR_BAR(0); + + ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, + &ctrlr->resource_id, RF_ACTIVE); + + if (ctrlr->resource == NULL) { + ufshci_printf(ctrlr, "unable to allocate pci resource\n"); + return (ENOMEM); + } + + ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); + ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); + ctrlr->regs = (struct ufshci_registers *)ctrlr->bus_handle; + + return (0); +} + +static int +ufshci_pci_attach(device_t dev) +{ + struct ufshci_controller *ctrlr = device_get_softc(dev); + int status; + + ctrlr->dev = dev; + status = ufshci_pci_allocate_bar(ctrlr); + if (status != 0) + goto bad; + pci_enable_busmaster(dev); + status = ufshci_pci_setup_interrupts(ctrlr); + if (status != 0) + goto bad; + + return (ufshci_attach(dev)); +bad: + if (ctrlr->resource != NULL) { + bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, + ctrlr->resource); + } + + if (ctrlr->tag) + bus_teardown_intr(dev, ctrlr->res, ctrlr->tag); + + if (ctrlr->res) + bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res), + ctrlr->res); + + if (ctrlr->msi_count > 0) + pci_release_msi(dev); + + return (status); +} + +static int +ufshci_pci_detach(device_t dev) +{ + struct ufshci_controller *ctrlr = device_get_softc(dev); + int error; + + error = ufshci_detach(dev); + if (ctrlr->msi_count > 0) + pci_release_msi(dev); + pci_disable_busmaster(dev); + return (error); +} + +static int +ufshci_pci_setup_shared(struct ufshci_controller *ctrlr, int rid) +{ + int error; + + ctrlr->num_io_queues = 1; + ctrlr->rid = rid; + ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, + &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); + if (ctrlr->res == NULL) { + ufshci_printf(ctrlr, "unable to allocate shared interrupt\n"); + return (ENOMEM); + } + + error = bus_setup_intr(ctrlr->dev, ctrlr->res, + INTR_TYPE_MISC | INTR_MPSAFE, NULL, ufshci_ctrlr_shared_handler, + ctrlr, &ctrlr->tag); + if (error) { + ufshci_printf(ctrlr, "unable to setup shared interrupt\n"); + return (error); + } + + return (0); +} + +static int +ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr) +{ + device_t dev = ctrlr->dev; + int force_intx = 0; + int num_io_queues, per_cpu_io_queues, min_cpus_per_ioq; + int num_vectors_requested; + + TUNABLE_INT_FETCH("hw.ufshci.force_intx", &force_intx); + if (force_intx) + goto intx; + + if (pci_msix_count(dev) == 0) + goto msi; + + /* + * Try to allocate one MSI-X per core for I/O queues, plus one + * for admin queue, but accept single shared MSI-X if have to. + * Fall back to MSI if can't get any MSI-X. + */ + + /* + * TODO: Need to implement MCQ(Multi Circular Queue) + * Example: num_io_queues = mp_ncpus; + */ + num_io_queues = 1; + + TUNABLE_INT_FETCH("hw.ufshci.num_io_queues", &num_io_queues); + if (num_io_queues < 1 || num_io_queues > mp_ncpus) + num_io_queues = mp_ncpus; + + per_cpu_io_queues = 1; + TUNABLE_INT_FETCH("hw.ufshci.per_cpu_io_queues", &per_cpu_io_queues); + if (per_cpu_io_queues == 0) + num_io_queues = 1; + + min_cpus_per_ioq = smp_threads_per_core; + TUNABLE_INT_FETCH("hw.ufshci.min_cpus_per_ioq", &min_cpus_per_ioq); + if (min_cpus_per_ioq > 1) { + num_io_queues = min(num_io_queues, + max(1, mp_ncpus / min_cpus_per_ioq)); + } + + num_io_queues = min(num_io_queues, max(1, pci_msix_count(dev) - 1)); + +again: + if (num_io_queues > vm_ndomains) + num_io_queues -= num_io_queues % vm_ndomains; + num_vectors_requested = min(num_io_queues + 1, pci_msix_count(dev)); + ctrlr->msi_count = num_vectors_requested; + if (pci_alloc_msix(dev, &ctrlr->msi_count) != 0) { + ufshci_printf(ctrlr, "unable to allocate MSI-X\n"); + ctrlr->msi_count = 0; + goto msi; + } + if (ctrlr->msi_count == 1) + return (ufshci_pci_setup_shared(ctrlr, 1)); + if (ctrlr->msi_count != num_vectors_requested) { + pci_release_msi(dev); + num_io_queues = ctrlr->msi_count - 1; + goto again; + } + + ctrlr->num_io_queues = num_io_queues; + return (0); + +msi: + /* + * Try to allocate 2 MSIs (admin and I/O queues), but accept single + * shared if have to. Fall back to INTx if can't get any MSI. + */ + ctrlr->msi_count = min(pci_msi_count(dev), 2); + if (ctrlr->msi_count > 0) { + if (pci_alloc_msi(dev, &ctrlr->msi_count) != 0) { + ufshci_printf(ctrlr, "unable to allocate MSI\n"); + ctrlr->msi_count = 0; + } else if (ctrlr->msi_count == 2) { + ctrlr->num_io_queues = 1; + return (0); + } + } + +intx: + return (ufshci_pci_setup_shared(ctrlr, ctrlr->msi_count > 0 ? 1 : 0)); +} diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h new file mode 100644 index 000000000000..17cef2bdf2a8 --- /dev/null +++ b/sys/dev/ufshci/ufshci_private.h @@ -0,0 +1,508 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#ifndef __UFSHCI_PRIVATE_H__ +#define __UFSHCI_PRIVATE_H__ + +#ifdef _KERNEL +#include +#else /* !_KERNEL */ +#include +#include +#endif /* _KERNEL */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ufshci.h" + +MALLOC_DECLARE(M_UFSHCI); + +#define UFSHCI_DEVICE_INIT_TIMEOUT_MS (2000) /* in milliseconds */ +#define UFSHCI_UIC_CMD_TIMEOUT_MS (500) /* in milliseconds */ +#define UFSHCI_DEFAULT_TIMEOUT_PERIOD (10) /* in seconds */ +#define UFSHCI_MIN_TIMEOUT_PERIOD (5) /* in seconds */ +#define UFSHCI_MAX_TIMEOUT_PERIOD (120) /* in seconds */ + +#define UFSHCI_DEFAULT_RETRY_COUNT (4) + +#define UFSHCI_UTR_ENTRIES (32) +#define UFSHCI_UTRM_ENTRIES (8) + +struct ufshci_controller; + +struct ufshci_completion_poll_status { + struct ufshci_completion cpl; + int done; + bool error; +}; + +struct ufshci_request { + struct ufshci_upiu request_upiu; + size_t request_size; + size_t response_size; + + struct memdesc payload; + enum ufshci_data_direction data_direction; + ufshci_cb_fn_t cb_fn; + void *cb_arg; + bool is_admin; + int32_t retries; + bool payload_valid; + bool timeout; + bool spare[2]; /* Future use */ + STAILQ_ENTRY(ufshci_request) stailq; +}; + +enum ufshci_slot_state { + UFSHCI_SLOT_STATE_FREE = 0x0, + UFSHCI_SLOT_STATE_RESERVED = 0x1, + UFSHCI_SLOT_STATE_SCHEDULED = 0x2, + UFSHCI_SLOT_STATE_TIMEOUT = 0x3, + UFSHCI_SLOT_STATE_NEED_ERROR_HANDLING = 0x4, +}; + +struct ufshci_tracker { + struct ufshci_request *req; + struct ufshci_req_queue *req_queue; + struct ufshci_hw_queue *hwq; + uint8_t slot_num; + enum ufshci_slot_state slot_state; + size_t response_size; + sbintime_t deadline; + + bus_dmamap_t payload_dma_map; + uint64_t payload_addr; + + struct ufshci_utp_cmd_desc *ucd; + bus_addr_t ucd_bus_addr; + + uint16_t prdt_off; + uint16_t prdt_entry_cnt; +}; + +enum ufshci_queue_mode { + UFSHCI_Q_MODE_SDB = 0x00, /* Single Doorbell Mode*/ + UFSHCI_Q_MODE_MCQ = 0x01, /* Multi-Circular Queue Mode*/ +}; + +/* + * UFS uses slot-based Single Doorbell (SDB) mode for request submission by + * default and additionally supports Multi-Circular Queue (MCQ) in UFS 4.0. To + * minimize duplicated code between SDB and MCQ, mode dependent operations are + * extracted into ufshci_qops. + */ +struct ufshci_qops { + int (*construct)(struct ufshci_controller *ctrlr, + struct ufshci_req_queue *req_queue, uint32_t num_entries, + bool is_task_mgmt); + void (*destroy)(struct ufshci_controller *ctrlr, + struct ufshci_req_queue *req_queue); + struct ufshci_hw_queue *(*get_hw_queue)( + struct ufshci_req_queue *req_queue); + int (*enable)(struct ufshci_controller *ctrlr, + struct ufshci_req_queue *req_queue); + int (*reserve_slot)(struct ufshci_req_queue *req_queue, + struct ufshci_tracker **tr); + int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue, + struct ufshci_tracker **tr); + void (*ring_doorbell)(struct ufshci_controller *ctrlr, + struct ufshci_tracker *tr); + void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr, + struct ufshci_tracker *tr); + bool (*process_cpl)(struct ufshci_req_queue *req_queue); + int (*get_inflight_io)(struct ufshci_controller *ctrlr); +}; + +#define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */ + +/* + * Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ + * (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and + * cq_head are not used in SDB but used in MCQ. + */ +struct ufshci_hw_queue { + uint32_t id; + int domain; + int cpu; + + struct ufshci_utp_xfer_req_desc *utrd; + + bus_dma_tag_t dma_tag_queue; + bus_dmamap_t queuemem_map; + bus_addr_t req_queue_addr; + + uint32_t num_entries; + uint32_t num_trackers; + + /* + * A Request List using the single doorbell method uses a dedicated + * ufshci_tracker, one per slot. + */ + struct ufshci_tracker **act_tr; + + uint32_t sq_head; /* MCQ mode */ + uint32_t sq_tail; /* MCQ mode */ + uint32_t cq_head; /* MCQ mode */ + + uint32_t phase; + int64_t num_cmds; + int64_t num_intr_handler_calls; + int64_t num_retries; + int64_t num_failures; + + struct mtx_padalign qlock; +}; + +struct ufshci_req_queue { + struct ufshci_controller *ctrlr; + int domain; + + /* + * queue_mode: active transfer scheme + * UFSHCI_Q_MODE_SDB – legacy single‑doorbell list + * UFSHCI_Q_MODE_MCQ – modern multi‑circular queue (UFSHCI 4.0+) + */ + enum ufshci_queue_mode queue_mode; + + uint8_t num_q; + struct ufshci_hw_queue *hwq; + + struct ufshci_qops qops; + + bool is_task_mgmt; + uint32_t num_entries; + uint32_t num_trackers; + + /* Shared DMA resource */ + struct ufshci_utp_cmd_desc *ucd; + + bus_dma_tag_t dma_tag_ucd; + bus_dma_tag_t dma_tag_payload; + + bus_dmamap_t ucdmem_map; + + bus_addr_t ucd_addr; +}; + +struct ufshci_device { + uint32_t max_lun_count; + + struct ufshci_device_descriptor dev_desc; + struct ufshci_geometry_descriptor geo_desc; + + uint32_t unipro_version; +}; + +/* + * One of these per allocated device. + */ +struct ufshci_controller { + device_t dev; + + uint32_t quirks; +#define UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE \ + 1 /* QEMU does not support UIC POWER MODE */ +#define UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE \ + 2 /* Need an additional 200 ms of PA_TActivate */ +#define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \ + 4 /* Need to wait 1250us after power mode change */ + + uint32_t ref_clk; + + struct cam_sim *ufshci_sim; + struct cam_path *ufshci_path; + + struct mtx sc_mtx; + uint32_t sc_unit; + uint8_t sc_name[16]; + + struct ufshci_device ufs_dev; + + bus_space_tag_t bus_tag; + bus_space_handle_t bus_handle; + int resource_id; + struct resource *resource; + + /* Currently, there is no UFSHCI that supports MSI, MSI-X. */ + int msi_count; + + /* Fields for tracking progress during controller initialization. */ + struct intr_config_hook config_hook; + + /* For shared legacy interrupt. */ + int rid; + struct resource *res; + void *tag; + + uint32_t major_version; + uint32_t minor_version; + + uint32_t num_io_queues; + uint32_t max_hw_pend_io; + + /* Maximum logical unit number */ + uint32_t max_lun_count; + + /* Maximum i/o size in bytes */ + uint32_t max_xfer_size; + + /* Controller capacity */ + uint32_t cap; + + /* Page size and log2(page_size) - 12 that we're currently using */ + uint32_t page_size; + + /* Timeout value on device initialization */ + uint32_t device_init_timeout_in_ms; + + /* Timeout value on UIC command */ + uint32_t uic_cmd_timeout_in_ms; + + /* UTMR/UTR queue timeout period in seconds */ + uint32_t timeout_period; + + /* UTMR/UTR queue retry count */ + uint32_t retry_count; + + /* UFS Host Controller Interface Registers */ + struct ufshci_registers *regs; + + /* UFS Transport Protocol Layer (UTP) */ + struct ufshci_req_queue task_mgmt_req_queue; + struct ufshci_req_queue transfer_req_queue; + bool is_single_db_supported; /* 0 = supported */ + bool is_mcq_supported; /* 1 = supported */ + + /* UFS Interconnect Layer (UIC) */ + struct mtx uic_cmd_lock; + uint32_t unipro_version; + uint8_t hs_gear; + uint32_t tx_lanes; + uint32_t rx_lanes; + uint32_t max_rx_hs_gear; + uint32_t max_tx_lanes; + uint32_t max_rx_lanes; + + bool is_failed; +}; + +#define ufshci_mmio_offsetof(reg) offsetof(struct ufshci_registers, reg) + +#define ufshci_mmio_read_4(sc, reg) \ + bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \ + ufshci_mmio_offsetof(reg)) + +#define ufshci_mmio_write_4(sc, reg, val) \ + bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \ + ufshci_mmio_offsetof(reg), val) + +#define ufshci_printf(ctrlr, fmt, args...) \ + device_printf(ctrlr->dev, fmt, ##args) + +/* UFSHCI */ +void ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl, + bool error); + +/* SIM */ +int ufshci_sim_attach(struct ufshci_controller *ctrlr); +void ufshci_sim_detach(struct ufshci_controller *ctrlr); + +/* Controller */ +int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev); +void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev); +int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr); +/* ctrlr defined as void * to allow use with config_intrhook. */ +void ufshci_ctrlr_start_config_hook(void *arg); +void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr); + +int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr, + struct ufshci_request *req); +int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr, + struct ufshci_request *req); +int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr); + +void ufshci_reg_dump(struct ufshci_controller *ctrlr); + +/* Device */ +int ufshci_dev_init(struct ufshci_controller *ctrlr); +int ufshci_dev_reset(struct ufshci_controller *ctrlr); +int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr); +int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr); +int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr); +int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr); +int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr); + +/* Controller Command */ +void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr, + ufshci_cb_fn_t cb_fn, void *cb_arg); +void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr, + ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param); +void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr, + ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t *cmd_ptr, uint8_t cmd_len, + uint32_t data_len, uint8_t lun, bool is_write); + +/* Request Queue */ +bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue); +int ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr); +int ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr); +void ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr); +void ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr); +int ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr); +int ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr); +void ufshci_req_queue_fail(struct ufshci_controller *ctrlr, + struct ufshci_hw_queue *hwq); +int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue, + struct ufshci_request *req, bool is_admin); +void ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr); + +/* Request Single Doorbell Queue */ +int ufshci_req_sdb_construct(struct ufshci_controller *ctrlr, + struct ufshci_req_queue *req_queue, uint32_t num_entries, + bool is_task_mgmt); +void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr, + struct ufshci_req_queue *req_queue); +struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue( + struct ufshci_req_queue *req_queue); +int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr, + struct ufshci_req_queue *req_queue); +int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue, + struct ufshci_tracker **tr); +void ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr, + struct ufshci_tracker *tr); +void ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr, + struct ufshci_tracker *tr); +bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue); +int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr); + +/* UIC Command */ +int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr); +int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr); +int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr); +int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute, + uint32_t *return_value); +int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute, + uint32_t value); +int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr, + uint16_t attribute, uint32_t *return_value); +int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr, + uint16_t attribute, uint32_t value); +int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr); + +/* SYSCTL */ +void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr); + +int ufshci_attach(device_t dev); +int ufshci_detach(device_t dev); + +/* + * Wait for a command to complete using the ufshci_completion_poll_cb. Used in + * limited contexts where the caller knows it's OK to block briefly while the + * command runs. The ISR will run the callback which will set status->done to + * true, usually within microseconds. If not, then after one second timeout + * handler should reset the controller and abort all outstanding requests + * including this polled one. If still not after ten seconds, then something is + * wrong with the driver, and panic is the only way to recover. + * + * Most commands using this interface aren't actual I/O to the drive's media so + * complete within a few microseconds. Adaptively spin for one tick to catch the + * vast majority of these without waiting for a tick plus scheduling delays. + * Since these are on startup, this drastically reduces startup time. + */ +static __inline void +ufshci_completion_poll(struct ufshci_completion_poll_status *status) +{ + int timeout = ticks + 10 * hz; + sbintime_t delta_t = SBT_1US; + + while (!atomic_load_acq_int(&status->done)) { + if (timeout - ticks < 0) + panic( + "UFSHCI polled command failed to complete within 10s."); + pause_sbt("ufshci_cpl", delta_t, 0, C_PREL(1)); + delta_t = min(SBT_1MS, delta_t * 3 / 2); + } +} + +static __inline void +ufshci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) +{ + uint64_t *bus_addr = (uint64_t *)arg; + + KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg)); + if (error != 0) + printf("ufshci_single_map err %d\n", error); + *bus_addr = seg[0].ds_addr; +} + +static __inline struct ufshci_request * +_ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) +{ + struct ufshci_request *req; + + KASSERT(how == M_WAITOK || how == M_NOWAIT, + ("nvme_allocate_request: invalid how %d", how)); + + req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO); + if (req != NULL) { + req->cb_fn = cb_fn; + req->cb_arg = cb_arg; + req->timeout = true; + } + return (req); +} + +static __inline struct ufshci_request * +ufshci_allocate_request_vaddr(void *payload, uint32_t payload_size, + const int how, ufshci_cb_fn_t cb_fn, void *cb_arg) +{ + struct ufshci_request *req; + + req = _ufshci_allocate_request(how, cb_fn, cb_arg); + if (req != NULL) { + if (payload_size) { + req->payload = memdesc_vaddr(payload, payload_size); + req->payload_valid = true; + } + } + return (req); +} + +static __inline struct ufshci_request * +ufshci_allocate_request_bio(struct bio *bio, const int how, + ufshci_cb_fn_t cb_fn, void *cb_arg) +{ + struct ufshci_request *req; + + req = _ufshci_allocate_request(how, cb_fn, cb_arg); + if (req != NULL) { + req->payload = memdesc_bio(bio); + req->payload_valid = true; + } + return (req); +} + +#define ufshci_free_request(req) free(req, M_UFSHCI) + +void ufshci_ctrlr_shared_handler(void *arg); + +static devclass_t ufshci_devclass; + +#endif /* __UFSHCI_PRIVATE_H__ */ diff --git a/sys/dev/ufshci/ufshci_reg.h b/sys/dev/ufshci/ufshci_reg.h new file mode 100644 index 000000000000..6c9b3e2c8c04 --- /dev/null +++ b/sys/dev/ufshci/ufshci_reg.h @@ -0,0 +1,469 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ +#ifndef __UFSHCI_REG_H__ +#define __UFSHCI_REG_H__ + +#include +#include + +/* UFSHCI 4.1, section 5.1 Register Map */ +struct ufshci_registers { + /* Host Capabilities (00h) */ + uint32_t cap; /* Host Controller Capabiities */ + uint32_t mcqcap; /* Multi-Circular Queue Capability Register */ + uint32_t ver; /* UFS Version */ + uint32_t ext_cap; /* Extended Controller Capabilities */ + uint32_t hcpid; /* Product ID */ + uint32_t hcmid; /* Manufacturer ID */ + uint32_t ahit; /* Auto-Hibernate Idle Timer */ + uint32_t reserved1; + /* Operation and Runtime (20h) */ + uint32_t is; /* Interrupt Status */ + uint32_t ie; /* Interrupt Enable */ + uint32_t reserved2; + uint32_t hcsext; /* Host Controller Status Extended */ + uint32_t hcs; /* Host Controller Status */ + uint32_t hce; /* Host Controller Enable */ + uint32_t uecpa; /* Host UIC Error Code PHY Adapter Layer */ + uint32_t uecdl; /* Host UIC Error Code Data Link Layer */ + uint32_t uecn; /* Host UIC Error Code Network Layer */ + uint32_t uect; /* Host UIC Error Code Transport Layer */ + uint32_t uecdme; /* Host UIC Error Code DME */ + uint32_t utriacr; /* Interrupt Aggregation Control */ + /* UTP Transfer (50h) */ + uint32_t utrlba; /* UTRL Base Address */ + uint32_t utrlbau; /* UTRL Base Address Upper 32-Bits */ + uint32_t utrldbr; /* UTRL DoorBell Register */ + uint32_t utrlclr; /* UTRL CLear Register */ + uint32_t utrlrsr; /* UTR Run-Stop Register */ + uint32_t utrlcnr; /* UTRL Completion Notification */ + uint64_t reserved3; + /* UTP Task Managemeng (70h) */ + uint32_t utmrlba; /* UTRL Base Address */ + uint32_t utmrlbau; /* UTMRL Base Address Upper 32-Bits */ + uint32_t utmrldbr; /* UTMRL DoorBell Register */ + uint32_t utmrlclr; /* UTMRL CLear Register */ + uint32_t utmrlrsr; /* UTM Run-Stop Register */ + uint8_t reserved4[12]; + /* UIC Command (90h) */ + uint32_t uiccmd; /* UIC Command Register */ + uint32_t ucmdarg1; /* UIC Command Argument 1 */ + uint32_t ucmdarg2; /* UIC Command Argument 2 */ + uint32_t ucmdarg3; /* UIC Command Argument 3 */ + uint8_t reserved5[16]; + /* UMA (B0h) */ + uint8_t reserved6[16]; /* Reserved for Unified Memory Extension */ + /* Vendor Specific (C0h) */ + uint8_t vendor[64]; /* Vendor Specific Registers */ + /* Crypto (100h) */ + uint32_t ccap; /* Crypto Capability */ + uint32_t reserved7[511]; + /* Config (300h) */ + uint32_t config; /* Global Configuration */ + uint8_t reserved9[124]; + /* MCQ Configuration (380h) */ + uint32_t mcqconfig; /* MCQ Config Register */ + /* Event Specific Interrupt Lower Base Address */ + uint32_t esilba; + /* Event Specific Interrupt Upper Base Address */ + uint32_t esiuba; + /* TODO: Need to define SQ/CQ registers */ +}; + +/* Register field definitions */ +#define UFSHCI__REG__SHIFT (0) +#define UFSHCI__REG__MASK (0) + +/* + * UFSHCI 4.1, section 5.2.1, Offset 00h: CAP + * Controller Capabilities + */ +#define UFSHCI_CAP_REG_NUTRS_SHIFT (0) +#define UFSHCI_CAP_REG_NUTRS_MASK (0xFF) +#define UFSHCI_CAP_REG_NORTT_SHIFT (8) +#define UFSHCI_CAP_REG_NORTT_MASK (0xFF) +#define UFSHCI_CAP_REG_NUTMRS_SHIFT (16) +#define UFSHCI_CAP_REG_NUTMRS_MASK (0x7) +#define UFSHCI_CAP_REG_EHSLUTRDS_SHIFT (22) +#define UFSHCI_CAP_REG_EHSLUTRDS_MASK (0x1) +#define UFSHCI_CAP_REG_AUTOH8_SHIFT (23) +#define UFSHCI_CAP_REG_AUTOH8_MASK (0x1) +#define UFSHCI_CAP_REG_64AS_SHIFT (24) +#define UFSHCI_CAP_REG_64AS_MASK (0x1) +#define UFSHCI_CAP_REG_OODDS_SHIFT (25) +#define UFSHCI_CAP_REG_OODDS_MASK (0x1) +#define UFSHCI_CAP_REG_UICDMETMS_SHIFT (26) +#define UFSHCI_CAP_REG_UICDMETMS_MASK (0x1) +#define UFSHCI_CAP_REG_CS_SHIFT (28) +#define UFSHCI_CAP_REG_CS_MASK (0x1) +#define UFSHCI_CAP_REG_LSDBS_SHIFT (29) +#define UFSHCI_CAP_REG_LSDBS_MASK (0x1) +#define UFSHCI_CAP_REG_MCQS_SHIFT (30) +#define UFSHCI_CAP_REG_MCQS_MASK (0x1) +#define UFSHCI_CAP_REG_EIS_SHIFT (31) +#define UFSHCI_CAP_REG_EIS_MASK (0x1) + +/* + * UFSHCI 4.1, section 5.2.2, Offset 04h: MCQCAP + * Multi-Circular Queue Capability Register + */ +#define UFSHCI_MCQCAP_REG_MAXQ_SHIFT (0) +#define UFSHCI_MCQCAP_REG_MAXQ_MASK (0xFF) +#define UFSHCI_MCQCAP_REG_SP_SHIFT (8) +#define UFSHCI_MCQCAP_REG_SP_MASK (0x1) +#define UFSHCI_MCQCAP_REG_RRP_SHIFT (9) +#define UFSHCI_MCQCAP_REG_RRP_MASK (0x1) +#define UFSHCI_MCQCAP_REG_EIS_SHIFT (10) +#define UFSHCI_MCQCAP_REG_EIS_MASK (0x1) +#define UFSHCI_MCQCAP_REG_QCFGPTR_SHIFT (16) +#define UFSHCI_MCQCAP_REG_QCFGPTR_MASK (0xFF) +#define UFSHCI_MCQCAP_REG_MIAG_SHIFT (24) +#define UFSHCI_MCQCAP_REG_MIAG_MASK (0xFF) + +/* + * UFSHCI 4.1, section 5.2.3, Offset 08h: VER + * UFS Version + */ +#define UFSHCI_VER_REG_VS_SHIFT (0) +#define UFSHCI_VER_REG_VS_MASK (0xF) +#define UFSHCI_VER_REG_MNR_SHIFT (4) +#define UFSHCI_VER_REG_MNR_MASK (0xF) +#define UFSHCI_VER_REG_MJR_SHIFT (8) +#define UFSHCI_VER_REG_MJR_MASK (0xFF) + +/* + * UFSHCI 4.1, section 5.2.4, Offset 0Ch: EXT_CAP + * Extended Controller Capabilities + */ +#define UFSHCI_EXTCAP_REG_HOST_HINT_CACAHE_SIZE_SHIFT (0) +#define UFSHCI_EXTCAP_REG_HOST_HINT_CACAHE_SIZE_MASK (0xFFFF) + +/* + * UFSHCI 4.1, section 5.2.5, Offset 10h: HCPID + * Host Controller Identification Descriptor – Product ID + */ +#define UFSHCI_HCPID_REG_PID_SHIFT (0) +#define UFSHCI_HCPID_REG_PID_MASK (0xFFFFFFFF) + +/* + * UFSHCI 4.1, section 5.2.6, Offset 14h: HCMID + * Host Controller Identification Descriptor – Manufacturer ID + */ +#define UFSHCI_HCMID_REG_MIC_SHIFT (0) +#define UFSHCI_HCMID_REG_MIC_MASK (0xFFFF) +#define UFSHCI_HCMID_REG_BI_SHIFT (8) +#define UFSHCI_HCMID_REG_BI_MASK (0xFFFF) + +/* + * UFSHCI 4.1, section 5.2.7, Offset 18h: AHIT + * Auto-Hibernate Idle Timer + */ +#define UFSHCI_AHIT_REG_AH8ITV_SHIFT (0) +#define UFSHCI_AHIT_REG_AH8ITV_MASK (0x3FF) +#define UFSHCI_AHIT_REG_TS_SHIFT (10) +#define UFSHCI_AHIT_REG_TS_MASK (0x7) + +/* + * UFSHCI 4.1, section 5.3.1, Offset 20h: IS + * Interrupt Status + */ +#define UFSHCI_IS_REG_UTRCS_SHIFT (0) +#define UFSHCI_IS_REG_UTRCS_MASK (0x1) +#define UFSHCI_IS_REG_UDEPRI_SHIFT (1) +#define UFSHCI_IS_REG_UDEPRI_MASK (0x1) +#define UFSHCI_IS_REG_UE_SHIFT (2) +#define UFSHCI_IS_REG_UE_MASK (0x1) +#define UFSHCI_IS_REG_UTMS_SHIFT (3) +#define UFSHCI_IS_REG_UTMS_MASK (0x1) +#define UFSHCI_IS_REG_UPMS_SHIFT (4) +#define UFSHCI_IS_REG_UPMS_MASK (0x1) +#define UFSHCI_IS_REG_UHXS_SHIFT (5) +#define UFSHCI_IS_REG_UHXS_MASK (0x1) +#define UFSHCI_IS_REG_UHES_SHIFT (6) +#define UFSHCI_IS_REG_UHES_MASK (0x1) +#define UFSHCI_IS_REG_ULLS_SHIFT (7) +#define UFSHCI_IS_REG_ULLS_MASK (0x1) +#define UFSHCI_IS_REG_ULSS_SHIFT (8) +#define UFSHCI_IS_REG_ULSS_MASK (0x1) +#define UFSHCI_IS_REG_UTMRCS_SHIFT (9) +#define UFSHCI_IS_REG_UTMRCS_MASK (0x1) +#define UFSHCI_IS_REG_UCCS_SHIFT (10) +#define UFSHCI_IS_REG_UCCS_MASK (0x1) +#define UFSHCI_IS_REG_DFES_SHIFT (11) +#define UFSHCI_IS_REG_DFES_MASK (0x1) +#define UFSHCI_IS_REG_UTPES_SHIFT (12) +#define UFSHCI_IS_REG_UTPES_MASK (0x1) +#define UFSHCI_IS_REG_HCFES_SHIFT (16) +#define UFSHCI_IS_REG_HCFES_MASK (0x1) +#define UFSHCI_IS_REG_SBFES_SHIFT (17) +#define UFSHCI_IS_REG_SBFES_MASK (0x1) +#define UFSHCI_IS_REG_CEFES_SHIFT (18) +#define UFSHCI_IS_REG_CEFES_MASK (0x1) +#define UFSHCI_IS_REG_SQES_SHIFT (19) +#define UFSHCI_IS_REG_SQES_MASK (0x1) +#define UFSHCI_IS_REG_CQES_SHIFT (20) +#define UFSHCI_IS_REG_CQES_MASK (0x1) +#define UFSHCI_IS_REG_IAGES_SHIFT (21) +#define UFSHCI_IS_REG_IAGES_MASK (0x1) + +/* + * UFSHCI 4.1, section 5.3.2, Offset 24h: IE + * Interrupt Enable + */ +#define UFSHCI_IE_REG_UTRCE_SHIFT (0) +#define UFSHCI_IE_REG_UTRCE_MASK (0x1) +#define UFSHCI_IE_REG_UDEPRIE_SHIFT (1) +#define UFSHCI_IE_REG_UDEPRIE_MASK (0x1) +#define UFSHCI_IE_REG_UEE_SHIFT (2) +#define UFSHCI_IE_REG_UEE_MASK (0x1) +#define UFSHCI_IE_REG_UTMSE_SHIFT (3) +#define UFSHCI_IE_REG_UTMSE_MASK (0x1) +#define UFSHCI_IE_REG_UPMSE_SHIFT (4) +#define UFSHCI_IE_REG_UPMSE_MASK (0x1) +#define UFSHCI_IE_REG_UHXSE_SHIFT (5) +#define UFSHCI_IE_REG_UHXSE_MASK (0x1) +#define UFSHCI_IE_REG_UHESE_SHIFT (6) +#define UFSHCI_IE_REG_UHESE_MASK (0x1) +#define UFSHCI_IE_REG_ULLSE_SHIFT (7) +#define UFSHCI_IE_REG_ULLSE_MASK (0x1) +#define UFSHCI_IE_REG_ULSSE_SHIFT (8) +#define UFSHCI_IE_REG_ULSSE_MASK (0x1) +#define UFSHCI_IE_REG_UTMRCE_SHIFT (9) +#define UFSHCI_IE_REG_UTMRCE_MASK (0x1) +#define UFSHCI_IE_REG_UCCE_SHIFT (10) +#define UFSHCI_IE_REG_UCCE_MASK (0x1) +#define UFSHCI_IE_REG_DFEE_SHIFT (11) +#define UFSHCI_IE_REG_DFEE_MASK (0x1) +#define UFSHCI_IE_REG_UTPEE_SHIFT (12) +#define UFSHCI_IE_REG_UTPEE_MASK (0x1) +#define UFSHCI_IE_REG_HCFEE_SHIFT (16) +#define UFSHCI_IE_REG_HCFEE_MASK (0x1) +#define UFSHCI_IE_REG_SBFEE_SHIFT (17) +#define UFSHCI_IE_REG_SBFEE_MASK (0x1) +#define UFSHCI_IE_REG_CEFEE_SHIFT (18) +#define UFSHCI_IE_REG_CEFEE_MASK (0x1) +#define UFSHCI_IE_REG_SQEE_SHIFT (19) +#define UFSHCI_IE_REG_SQEE_MASK (0x1) +#define UFSHCI_IE_REG_CQEE_SHIFT (20) +#define UFSHCI_IE_REG_CQEE_MASK (0x1) +#define UFSHCI_IE_REG_IAGEE_SHIFT (21) +#define UFSHCI_IE_REG_IAGEE_MASK (0x1) + +/* + * UFSHCI 4.1, section 5.3.3, Offset 2Ch: HCSEXT + * Host Controller Status Extended + */ +#define UFSHCI_HCSEXT_IIDUTPE_SHIFT (0) +#define UFSHCI_HCSEXT_IIDUTPE_MASK (0xF) +#define UFSHCI_HCSEXT_EXT_IIDUTPE_SHIFT (4) +#define UFSHCI_HCSEXT_EXT_IIDUTPE_MASK (0xF) + +/* + * UFSHCI 4.1, section 5.3.4, Offset 30h: HCS + * Host Controller Status + */ +#define UFSHCI_HCS_REG_DP_SHIFT (0) +#define UFSHCI_HCS_REG_DP_MASK (0x1) +#define UFSHCI_HCS_REG_UTRLRDY_SHIFT (1) +#define UFSHCI_HCS_REG_UTRLRDY_MASK (0x1) +#define UFSHCI_HCS_REG_UTMRLRDY_SHIFT (2) +#define UFSHCI_HCS_REG_UTMRLRDY_MASK (0x1) +#define UFSHCI_HCS_REG_UCRDY_SHIFT (3) +#define UFSHCI_HCS_REG_UCRDY_MASK (0x1) +#define UFSHCI_HCS_REG_UPMCRS_SHIFT (7) +#define UFSHCI_HCS_REG_UPMCRS_MASK (0x7) +#define UFSHCI_HCS_REG_UTPEC_SHIFT (12) +#define UFSHCI_HCS_REG_UTPEC_MASK (0xF) +#define UFSHCI_HCS_REG_TTAGUTPE_SHIFT (16) +#define UFSHCI_HCS_REG_TTAGUTPE_MASK (0xFF) +#define UFSHCI_HCS_REG_TLUNUTPE_SHIFT (24) +#define UFSHCI_HCS_REG_TLUNUTPE_MASK (0xFF) + +/* + * UFSHCI 4.1, section 5.3.5, Offset 34h: HCE + * Host Controller Enable + */ +#define UFSHCI_HCE_REG_HCE_SHIFT (0) +#define UFSHCI_HCE_REG_HCE_MASK (0x1) +#define UFSHCI_HCE_REG_CGE_SHIFT (1) +#define UFSHCI_HCE_REG_CGE_MASK (0x1) + +/* + * UFSHCI 4.1, section 5.3.6, Offset 38h: UECPA + * Host UIC Error Code PHY Adapter Layer + */ +#define UFSHCI_UECPA_REG_EC_SHIFT (0) +#define UFSHCI_UECPA_REG_EC_MASK (0xF) +#define UFSHCI_UECPA_REG_ERR_SHIFT (31) +#define UFSHCI_UECPA_REG_ERR_MASK (0x1) + +/* + * UFSHCI 4.1, section 5.3.7, Offset 3Ch: UECDL + * Host UIC Error Code Data Link Layer + */ +#define UFSHCI_UECDL_REG_EC_SHIFT (0) +#define UFSHCI_UECDL_REG_EC_MASK (0xFFFF) +#define UFSHCI_UECDL_REG_ERR_SHIFT (31) +#define UFSHCI_UECDL_REG_ERR_MASK (0x1) + +/* + * UFSHCI 4.1, section 5.3.8, Offset 40h: UECN + * Host UIC Error Code Network Layer + */ +#define UFSHCI_UECN_REG_EC_SHIFT (0) +#define UFSHCI_UECN_REG_EC_MASK (0x7) +#define UFSHCI_UECN_REG_ERR_SHIFT (31) +#define UFSHCI_UECN_REG_ERR_MASK (0x1) + +/* + * UFSHCI 4.1, section 5.3.9, Offset 44h: UECT + * Host UIC Error Code Transport Layer + */ +#define UFSHCI_UECT_REG_EC_SHIFT (0) +#define UFSHCI_UECT_REG_EC_MASK (0x7F) +#define UFSHCI_UECT_REG_ERR_SHIFT (31) +#define UFSHCI_UECT_REG_ERR_MASK (0x1) + +/* + * UFSHCI 4.1, section 5.3.10, Offset 48h: UECDME + * Host UIC Error Code + */ +#define UFSHCI_UECDME_REG_EC_SHIFT (0) +#define UFSHCI_UECDME_REG_EC_MASK (0xF) +#define UFSHCI_UECDME_REG_ERR_SHIFT (31) +#define UFSHCI_UECDME_REG_ERR_MASK (0x1) + +/* + * UFSHCI 4.1, section 5.4.1, Offset 50h: UTRLBA + * UTP Transfer Request List Base Address + */ +#define UFSHCI_UTRLBA_REG_UTRLBA_SHIFT (0) +#define UFSHCI_UTRLBA_REG_UTRLBA_MASK (0xFFFFFFFF) + +/* + * UFSHCI 4.1, section 5.4.2, Offset 54h: UTRLBAU + * UTP Transfer Request List Base Address Upper 32-bits + */ +#define UFSHCI_UTRLBAU_REG_UTRLBAU_SHIFT (0) +#define UFSHCI_UTRLBAU_REG_UTRLBAU_MASK (0xFFFFFFFF) + +/* + * UFSHCI 4.1, section 5.4.3, Offset 58h: UTRLDBR + * UTP Transfer Request List Door Bell Register + */ +#define UFSHCI_UTRLDBR_REG_UTRLDBR_SHIFT (0) +#define UFSHCI_UTRLDBR_REG_UTRLDBR_MASK (0xFFFFFFFF) + +/* + * UFSHCI 4.1, section 5.4.4, Offset 5Ch: UTRLCLR + * UTP Transfer Request List Clear Register + */ +#define UFSHCI_UTRLCLR_REG_UTRLCLR_SHIFT (0) +#define UFSHCI_UTRLCLR_REG_UTRLCLR_MASK (0xFFFFFFFF) + +/* + * UFSHCI 4.1, section 5.4.5, Offset 60h: UTRLRSR + * UTP Transfer Request List Run Stop Register + */ +#define UFSHCI_UTRLRSR_REG_UTRLRSR_SHIFT (0) +#define UFSHCI_UTRLRSR_REG_UTRLRSR_MASK (0x1) + +/* + * UFSHCI 4.1, section 5.4.6, Offset 64h: UTRLCNR + * UTP Transfer Request List Completion Notification Register + */ +#define UFSHCI_UTRLCNR_REG_UTRLCNR_SHIFT (0) +#define UFSHCI_UTRLCNR_REG_UTRLCNR_MASK (0xFFFFFFFF) + +/* + * UFSHCI 4.1, section 5.5.1, Offset 70h: UTMRLBA + * UTP Task Management Request List Base Address + */ +#define UFSHCI_UTMRLBA_REG_UTMRLBA_SHIFT (0) +#define UFSHCI_UTMRLBA_REG_UTMRLBA_MASK (0xFFFFFFFF) + +/* + * UFSHCI 4.1, section 5.5.2, Offset 74h: UTMRLBAU + * UTP Task Management Request List Base Address Upper 32-bits + */ +#define UFSHCI_UTMRLBAU_REG_UTMRLBAU_SHIFT (0) +#define UFSHCI_UTMRLBAU_REG_UTMRLBAU_MASK (0xFFFFFFFF) + +/* + * UFSHCI 4.1, section 5.5.3, Offset 78h: UTMRLDBR + * UTP Task Management Request List Door Bell Register + */ +#define UFSHCI_UTMRLDBR_REG_UTMRLDBR_SHIFT (0) +#define UFSHCI_UTMRLDBR_REG_UTMRLDBR_MASK (0xFF) + +/* + * UFSHCI 4.1, section 5.5.4, Offset 7Ch: UTMRLCLR + * UTP Task Management Request List CLear Register + */ +#define UFSHCI_UTMRLCLR_REG_UTMRLCLR_SHIFT (0) +#define UFSHCI_UTMRLCLR_REG_UTMRLCLR_MASK (0xFF) + +/* + * UFSHCI 4.1, section 5.5.5, Offset 80h: UTMRLRSR + * UTP Task Management Request List Run Stop Register + */ +#define UFSHCI_UTMRLRSR_REG_UTMRLRSR_SHIFT (0) +#define UFSHCI_UTMRLRSR_REG_UTMRLRSR_MASK (0xFF) + +/* + * UFSHCI 4.1, section 5.6.1 + * Offset 90h: UICCMD – UIC Command + */ +#define UFSHCI_UICCMD_REG_CMDOP_SHIFT (0) +#define UFSHCI_UICCMD_REG_CMDOP_MASK (0xFF) + +/* + * UFSHCI 4.1, section 5.6.2 + * Offset 94h: UICCMDARG1 – UIC Command Argument 1 + */ +#define UFSHCI_UICCMDARG1_REG_ARG1_SHIFT (0) +#define UFSHCI_UICCMDARG1_REG_ARG1_MASK (0xFFFFFFFF) +#define UFSHCI_UICCMDARG1_REG_GEN_SELECTOR_INDEX_SHIFT (0) +#define UFSHCI_UICCMDARG1_REG_GEN_SELECTOR_INDEX_MASK (0xFFFF) +#define UFSHCI_UICCMDARG1_REG_MIB_ATTR_SHIFT (16) +#define UFSHCI_UICCMDARG1_REG_MIB_ATTR_MASK (0xFFFF) + +/* + * UFSHCI 4.1, section 5.6.3 + * Offset 98h: UICCMDARG2 – UIC Command Argument 2 + */ +#define UFSHCI_UICCMDARG2_REG_ARG2_SHIFT (0) +#define UFSHCI_UICCMDARG2_REG_ARG2_MASK (0xFFFFFFFF) +#define UFSHCI_UICCMDARG2_REG_ERROR_CODE_SHIFT (0) +#define UFSHCI_UICCMDARG2_REG_ERROR_CODE_MASK (0xFF) +#define UFSHCI_UICCMDARG2_REG_ATTR_SET_TYPE_SHIFT (16) +#define UFSHCI_UICCMDARG2_REG_ATTR_SET_TYPE_MASK (0xFF) + +/* + * UFSHCI 4.1, section 5.6.4 + * Offset 9Ch: UICCMDARG3 – UIC Command Argument 3 + */ +#define UFSHCI_UICCMDARG3_REG_ARG3_SHIFT (0) +#define UFSHCI_UICCMDARG3_REG_ARG3_MASK (0xFFFFFFFF) + +/* Helper macro to combine *_MASK and *_SHIFT defines */ +#define UFSHCIM(name) (name##_MASK << name##_SHIFT) + +/* Helper macro to extract value from x */ +#define UFSHCIV(name, x) (((x) >> name##_SHIFT) & name##_MASK) + +/* Helper macro to construct a field value */ +#define UFSHCIF(name, x) (((x)&name##_MASK) << name##_SHIFT) + +#define UFSHCI_DUMP_REG(ctrlr, member) \ + do { \ + uint32_t _val = ufshci_mmio_read_4(ctrlr, member); \ + ufshci_printf(ctrlr, " %-15s (0x%03lx) : 0x%08x\n", #member, \ + ufshci_mmio_offsetof(member), _val); \ + } while (0) + +#endif /* __UFSHCI_REG_H__ */ diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c new file mode 100644 index 000000000000..cc9a2ddae768 --- /dev/null +++ b/sys/dev/ufshci/ufshci_req_queue.c @@ -0,0 +1,490 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include +#include +#include + +#include + +#include "sys/kassert.h" +#include "ufshci_private.h" + +static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue, + struct ufshci_tracker *tr, enum ufshci_data_direction data_direction); + +static const struct ufshci_qops sdb_qops = { + .construct = ufshci_req_sdb_construct, + .destroy = ufshci_req_sdb_destroy, + .get_hw_queue = ufshci_req_sdb_get_hw_queue, + .enable = ufshci_req_sdb_enable, + .reserve_slot = ufshci_req_sdb_reserve_slot, + .reserve_admin_slot = ufshci_req_sdb_reserve_slot, + .ring_doorbell = ufshci_req_sdb_ring_doorbell, + .clear_cpl_ntf = ufshci_req_sdb_clear_cpl_ntf, + .process_cpl = ufshci_req_sdb_process_cpl, + .get_inflight_io = ufshci_req_sdb_get_inflight_io, +}; + +int +ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr) +{ + struct ufshci_req_queue *req_queue; + int error; + + /* + * UTP Task Management Request only supports Legacy Single Doorbell + * Queue. + */ + req_queue = &ctrlr->task_mgmt_req_queue; + req_queue->queue_mode = UFSHCI_Q_MODE_SDB; + req_queue->qops = sdb_qops; + + error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES, + /*is_task_mgmt*/ true); + + return (error); +} + +void +ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr) +{ + ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr, + &ctrlr->task_mgmt_req_queue); +} + +int +ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr) +{ + return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr, + &ctrlr->task_mgmt_req_queue)); +} + +int +ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr) +{ + struct ufshci_req_queue *req_queue; + int error; + + /* + * Currently, it does not support MCQ mode, so it should be set to SDB + * mode by default. + * TODO: Determine queue mode by checking Capability Registers + */ + req_queue = &ctrlr->transfer_req_queue; + req_queue->queue_mode = UFSHCI_Q_MODE_SDB; + req_queue->qops = sdb_qops; + + error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES, + /*is_task_mgmt*/ false); + + return (error); +} + +void +ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr) +{ + ctrlr->transfer_req_queue.qops.destroy(ctrlr, + &ctrlr->transfer_req_queue); +} + +int +ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr) +{ + return (ctrlr->transfer_req_queue.qops.enable(ctrlr, + &ctrlr->transfer_req_queue)); +} + +static bool +ufshci_req_queue_response_is_error(struct ufshci_req_queue *req_queue, + uint8_t ocs, union ufshci_reponse_upiu *response) +{ + bool is_error = false; + + /* Check request descriptor */ + if (ocs != UFSHCI_DESC_SUCCESS) { + ufshci_printf(req_queue->ctrlr, "Invalid OCS = 0x%x\n", ocs); + is_error = true; + } + + /* Check response UPIU header */ + if (response->header.response != UFSHCI_RESPONSE_CODE_TARGET_SUCCESS) { + ufshci_printf(req_queue->ctrlr, + "Invalid response code = 0x%x\n", + response->header.response); + is_error = true; + } + + return (is_error); +} + +static void +ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker *tr, uint8_t ocs, + uint8_t rc) +{ + struct ufshci_utp_xfer_req_desc *desc; + struct ufshci_upiu_header *resp_header; + + mtx_assert(&tr->hwq->qlock, MA_NOTOWNED); + + resp_header = (struct ufshci_upiu_header *)tr->ucd->response_upiu; + resp_header->response = rc; + + desc = &tr->hwq->utrd[tr->slot_num]; + desc->overall_command_status = ocs; + + ufshci_req_queue_complete_tracker(tr); +} + +static void +ufshci_req_queue_manual_complete_request(struct ufshci_req_queue *req_queue, + struct ufshci_request *req, uint8_t ocs, uint8_t rc) +{ + struct ufshci_completion cpl; + bool error; + + memset(&cpl, 0, sizeof(cpl)); + cpl.response_upiu.header.response = rc; + error = ufshci_req_queue_response_is_error(req_queue, ocs, + &cpl.response_upiu); + + if (error) { + ufshci_printf(req_queue->ctrlr, + "Manual complete request error:0x%x", error); + } + + if (req->cb_fn) + req->cb_fn(req->cb_arg, &cpl, error); + + ufshci_free_request(req); +} + +void +ufshci_req_queue_fail(struct ufshci_controller *ctrlr, + struct ufshci_hw_queue *hwq) +{ + struct ufshci_req_queue *req_queue; + struct ufshci_tracker *tr; + struct ufshci_request *req; + int i; + + if (!mtx_initialized(&hwq->qlock)) + return; + + mtx_lock(&hwq->qlock); + + req_queue = &ctrlr->transfer_req_queue; + + for (i = 0; i < req_queue->num_entries; i++) { + tr = hwq->act_tr[i]; + req = tr->req; + + if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED) { + mtx_unlock(&hwq->qlock); + ufshci_req_queue_manual_complete_request(req_queue, req, + UFSHCI_DESC_ABORTED, + UFSHCI_RESPONSE_CODE_GENERAL_FAILURE); + mtx_lock(&hwq->qlock); + } else if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) { + /* + * Do not remove the tracker. The abort_tracker path + * will do that for us. + */ + mtx_unlock(&hwq->qlock); + ufshci_req_queue_manual_complete_tracker(tr, + UFSHCI_DESC_ABORTED, + UFSHCI_RESPONSE_CODE_GENERAL_FAILURE); + mtx_lock(&hwq->qlock); + } + } + + mtx_unlock(&hwq->qlock); +} + +void +ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr) +{ + struct ufshci_req_queue *req_queue = tr->req_queue; + struct ufshci_request *req = tr->req; + struct ufshci_completion cpl; + struct ufshci_utp_xfer_req_desc *desc; + uint8_t ocs; + bool retry, error, retriable; + + mtx_assert(&tr->hwq->qlock, MA_NOTOWNED); + + bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + cpl.size = tr->response_size; + memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu, cpl.size); + + desc = &tr->hwq->utrd[tr->slot_num]; + ocs = desc->overall_command_status; + + error = ufshci_req_queue_response_is_error(req_queue, ocs, + &cpl.response_upiu); + + /* TODO: Implement retry */ + // retriable = ufshci_completion_is_retry(cpl); + retriable = false; + retry = error && retriable && + req->retries < req_queue->ctrlr->retry_count; + if (retry) + tr->hwq->num_retries++; + if (error && req->retries >= req_queue->ctrlr->retry_count && retriable) + tr->hwq->num_failures++; + + KASSERT(tr->req, ("there is no request assigned to the tracker\n")); + KASSERT(cpl.response_upiu.header.task_tag == + req->request_upiu.header.task_tag, + ("response task_tag does not match request task_tag\n")); + + if (!retry) { + if (req->payload_valid) { + bus_dmamap_sync(req_queue->dma_tag_payload, + tr->payload_dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + } + /* Copy response from the command descriptor */ + if (req->cb_fn) + req->cb_fn(req->cb_arg, &cpl, error); + } + + mtx_lock(&tr->hwq->qlock); + + /* Clear the UTRL Completion Notification register */ + req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr); + + if (retry) { + req->retries++; + ufshci_req_queue_submit_tracker(req_queue, tr, + req->data_direction); + } else { + if (req->payload_valid) { + bus_dmamap_unload(req_queue->dma_tag_payload, + tr->payload_dma_map); + } + + /* Clear tracker */ + ufshci_free_request(req); + tr->req = NULL; + tr->slot_state = UFSHCI_SLOT_STATE_FREE; + } + + mtx_unlock(&tr->hwq->qlock); +} + +bool +ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue) +{ + return (req_queue->qops.process_cpl(req_queue)); +} + +static void +ufshci_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) +{ + struct ufshci_tracker *tr = arg; + struct ufshci_prdt_entry *prdt_entry; + int i; + + /* + * If the mapping operation failed, return immediately. The caller + * is responsible for detecting the error status and failing the + * tracker manually. + */ + if (error != 0) { + ufshci_printf(tr->req_queue->ctrlr, + "Failed to map payload %d\n", error); + return; + } + + prdt_entry = (struct ufshci_prdt_entry *)tr->ucd->prd_table; + + tr->prdt_entry_cnt = nseg; + + for (i = 0; i < nseg; i++) { + prdt_entry->data_base_address = htole64(seg[i].ds_addr) & + 0xffffffff; + prdt_entry->data_base_address_upper = htole64(seg[i].ds_addr) >> + 32; + prdt_entry->data_byte_count = htole32(seg[i].ds_len - 1); + + ++prdt_entry; + } + + bus_dmamap_sync(tr->req_queue->dma_tag_payload, tr->payload_dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); +} + +static void +ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr) +{ + struct ufshci_request *req = tr->req; + struct ufshci_utp_cmd_desc *cmd_desc = tr->ucd; + int error; + + tr->prdt_off = UFSHCI_UTP_XFER_REQ_SIZE + UFSHCI_UTP_XFER_RESP_SIZE; + + memset(cmd_desc->prd_table, 0, sizeof(cmd_desc->prd_table)); + + /* Filling PRDT enrties with payload */ + error = bus_dmamap_load_mem(tr->req_queue->dma_tag_payload, + tr->payload_dma_map, &req->payload, ufshci_payload_map, tr, + BUS_DMA_NOWAIT); + if (error != 0) { + /* + * The dmamap operation failed, so we manually fail the + * tracker here with UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES. + * + * ufshci_req_queue_manual_complete_tracker must not be called + * with the req_queue lock held. + */ + ufshci_printf(tr->req_queue->ctrlr, + "bus_dmamap_load_mem returned with error:0x%x!\n", error); + + mtx_unlock(&tr->hwq->qlock); + ufshci_req_queue_manual_complete_tracker(tr, + UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES, + UFSHCI_RESPONSE_CODE_GENERAL_FAILURE); + mtx_lock(&tr->hwq->qlock); + } +} + +static void +ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc, + uint8_t data_direction, const uint64_t paddr, const uint16_t response_off, + const uint16_t response_len, const uint16_t prdt_off, + const uint16_t prdt_entry_cnt) +{ + uint8_t command_type; + /* Value to convert bytes to dwords */ + const uint16_t dword_size = 4; + + /* + * Set command type to UFS storage. + * The UFS 4.1 spec only defines 'UFS Storage' as a command type. + */ + command_type = UFSHCI_COMMAND_TYPE_UFS_STORAGE; + + memset(desc, 0, sizeof(struct ufshci_utp_xfer_req_desc)); + desc->command_type = command_type; + desc->data_direction = data_direction; + desc->interrupt = true; + /* Set the initial value to Invalid. */ + desc->overall_command_status = UFSHCI_OCS_INVALID; + desc->utp_command_descriptor_base_address = (uint32_t)(paddr & + 0xffffffff); + desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >> + 32); + + desc->response_upiu_offset = response_off / dword_size; + desc->response_upiu_length = response_len / dword_size; + desc->prdt_offset = prdt_off / dword_size; + desc->prdt_length = prdt_entry_cnt; +} + +/* + * Submit the tracker to the hardware. + */ +static void +ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue, + struct ufshci_tracker *tr, enum ufshci_data_direction data_direction) +{ + struct ufshci_controller *ctrlr = req_queue->ctrlr; + struct ufshci_request *req = tr->req; + uint64_t ucd_paddr; + uint16_t request_len, response_off, response_len; + uint8_t slot_num = tr->slot_num; + + mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED); + + /* TODO: Check timeout */ + + request_len = req->request_size; + response_off = UFSHCI_UTP_XFER_REQ_SIZE; + response_len = req->response_size; + + /* Prepare UTP Command Descriptor */ + memcpy(tr->ucd, &req->request_upiu, request_len); + memset((uint8_t *)tr->ucd + response_off, 0, response_len); + + /* Prepare PRDT */ + if (req->payload_valid) + ufshci_req_queue_prepare_prdt(tr); + + bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* Prepare UTP Transfer Request Descriptor. */ + ucd_paddr = tr->ucd_bus_addr; + ufshci_req_queue_fill_descriptor(&tr->hwq->utrd[slot_num], + data_direction, ucd_paddr, response_off, response_len, tr->prdt_off, + tr->prdt_entry_cnt); + + bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + tr->slot_state = UFSHCI_SLOT_STATE_SCHEDULED; + + /* Ring the doorbell */ + req_queue->qops.ring_doorbell(ctrlr, tr); +} + +static int +_ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue, + struct ufshci_request *req) +{ + struct ufshci_tracker *tr = NULL; + int error; + + mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED); + + error = req_queue->qops.reserve_slot(req_queue, &tr); + if (error != 0) { + ufshci_printf(req_queue->ctrlr, "Failed to get tracker"); + return (error); + } + KASSERT(tr, ("There is no tracker allocated.")); + + if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED || + tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) + return (EBUSY); + + /* Set the task_tag value to slot_num for traceability. */ + req->request_upiu.header.task_tag = tr->slot_num; + + tr->slot_state = UFSHCI_SLOT_STATE_RESERVED; + tr->response_size = req->response_size; + tr->deadline = SBT_MAX; + tr->req = req; + + ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction); + + return (0); +} + +int +ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue, + struct ufshci_request *req, bool is_admin) +{ + struct ufshci_hw_queue *hwq; + uint32_t error; + + /* TODO: MCQs should use a separate Admin queue. */ + + hwq = req_queue->qops.get_hw_queue(req_queue); + KASSERT(hwq, ("There is no HW queue allocated.")); + + mtx_lock(&hwq->qlock); + error = _ufshci_req_queue_submit_request(req_queue, req); + mtx_unlock(&hwq->qlock); + + return (error); +} diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c new file mode 100644 index 000000000000..4670281d367a --- /dev/null +++ b/sys/dev/ufshci/ufshci_req_sdb.c @@ -0,0 +1,427 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include +#include +#include + +#include "sys/kassert.h" +#include "ufshci_private.h" +#include "ufshci_reg.h" + +static void +ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue) +{ + struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q]; + struct ufshci_tracker *tr; + int i; + + for (i = 0; i < req_queue->num_trackers; i++) { + tr = hwq->act_tr[i]; + bus_dmamap_destroy(req_queue->dma_tag_payload, + tr->payload_dma_map); + free(tr, M_UFSHCI); + } + + if (hwq->act_tr) { + free(hwq->act_tr, M_UFSHCI); + hwq->act_tr = NULL; + } + + if (req_queue->ucd) { + bus_dmamap_unload(req_queue->dma_tag_ucd, + req_queue->ucdmem_map); + bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd, + req_queue->ucdmem_map); + req_queue->ucd = NULL; + } + + if (req_queue->dma_tag_ucd) { + bus_dma_tag_destroy(req_queue->dma_tag_ucd); + req_queue->dma_tag_ucd = NULL; + } +} + +static int +ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue, + uint32_t num_entries, struct ufshci_controller *ctrlr) +{ + struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q]; + struct ufshci_tracker *tr; + size_t ucd_allocsz, payload_allocsz; + uint64_t ucdmem_phys; + uint8_t *ucdmem; + int i, error; + + /* + * Each component must be page aligned, and individual PRP lists + * cannot cross a page boundary. + */ + ucd_allocsz = num_entries * sizeof(struct ufshci_utp_cmd_desc); + ucd_allocsz = roundup2(ucd_allocsz, ctrlr->page_size); + payload_allocsz = num_entries * ctrlr->max_xfer_size; + + /* + * Allocate physical memory for UTP Command Descriptor (UCD) + * Note: UFSHCI UCD format is restricted to 128-byte alignment. + */ + error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 128, + ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, + ucd_allocsz, howmany(ucd_allocsz, ctrlr->page_size), + ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_ucd); + if (error != 0) { + ufshci_printf(ctrlr, "request cmd desc tag create failed %d\n", + error); + goto out; + } + + if (bus_dmamem_alloc(req_queue->dma_tag_ucd, (void **)&ucdmem, + BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &req_queue->ucdmem_map)) { + ufshci_printf(ctrlr, "failed to allocate cmd desc memory\n"); + goto out; + } + + if (bus_dmamap_load(req_queue->dma_tag_ucd, req_queue->ucdmem_map, + ucdmem, ucd_allocsz, ufshci_single_map, &ucdmem_phys, 0) != 0) { + ufshci_printf(ctrlr, "failed to load cmd desc memory\n"); + bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd, + req_queue->ucdmem_map); + goto out; + } + + req_queue->ucd = (struct ufshci_utp_cmd_desc *)ucdmem; + req_queue->ucd_addr = ucdmem_phys; + + /* + * Allocate physical memory for PRDT + * Note: UFSHCI PRDT format is restricted to 8-byte alignment. + */ + error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 8, + ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, + payload_allocsz, howmany(payload_allocsz, ctrlr->page_size) + 1, + ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_payload); + if (error != 0) { + ufshci_printf(ctrlr, "request prdt tag create failed %d\n", + error); + goto out; + } + + hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) * + req_queue->num_entries, + M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK); + + for (i = 0; i < req_queue->num_trackers; i++) { + tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI, + DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK); + + bus_dmamap_create(req_queue->dma_tag_payload, 0, + &tr->payload_dma_map); + + tr->req_queue = req_queue; + tr->slot_num = i; + tr->slot_state = UFSHCI_SLOT_STATE_FREE; + + tr->ucd = (struct ufshci_utp_cmd_desc *)ucdmem; + tr->ucd_bus_addr = ucdmem_phys; + + ucdmem += sizeof(struct ufshci_utp_cmd_desc); + ucdmem_phys += sizeof(struct ufshci_utp_cmd_desc); + + hwq->act_tr[i] = tr; + } + + return (0); +out: + ufshci_req_sdb_cmd_desc_destroy(req_queue); + return (ENOMEM); +} + +static bool +ufshci_req_sdb_is_doorbell_cleared(struct ufshci_controller *ctrlr, + uint8_t slot) +{ + uint32_t utrldbr; + + utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr); + return (!(utrldbr & (1 << slot))); +} + +int +ufshci_req_sdb_construct(struct ufshci_controller *ctrlr, + struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt) +{ + struct ufshci_hw_queue *hwq; + size_t allocsz; + uint64_t queuemem_phys; + uint8_t *queuemem; + int error; + + req_queue->ctrlr = ctrlr; + req_queue->is_task_mgmt = is_task_mgmt; + req_queue->num_entries = num_entries; + /* + * In Single Doorbell mode, the number of queue entries and the number + * of trackers are the same. + */ + req_queue->num_trackers = num_entries; + + /* Single Doorbell mode uses only one queue. (UFSHCI_SDB_Q = 0) */ + req_queue->hwq = malloc(sizeof(struct ufshci_hw_queue), M_UFSHCI, + M_ZERO | M_NOWAIT); + hwq = &req_queue->hwq[UFSHCI_SDB_Q]; + + mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF); + + /* + * Allocate physical memory for request queue (UTP Transfer Request + * Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD)) + * Note: UTRD/UTMRD format is restricted to 1024-byte alignment. + */ + allocsz = num_entries * sizeof(struct ufshci_utp_xfer_req_desc); + error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024, + ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, + allocsz, 1, allocsz, 0, NULL, NULL, &hwq->dma_tag_queue); + if (error != 0) { + ufshci_printf(ctrlr, "request queue tag create failed %d\n", + error); + goto out; + } + + if (bus_dmamem_alloc(hwq->dma_tag_queue, (void **)&queuemem, + BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &hwq->queuemem_map)) { + ufshci_printf(ctrlr, + "failed to allocate request queue memory\n"); + goto out; + } + + if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem, + allocsz, ufshci_single_map, &queuemem_phys, 0) != 0) { + ufshci_printf(ctrlr, "failed to load request queue memory\n"); + bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd, + hwq->queuemem_map); + goto out; + } + + hwq->num_cmds = 0; + hwq->num_intr_handler_calls = 0; + hwq->num_retries = 0; + hwq->num_failures = 0; + hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem; + hwq->req_queue_addr = queuemem_phys; + + if (is_task_mgmt) { + /* UTP Task Management Request (UTMR) */ + uint32_t utmrlba, utmrlbau; + + utmrlba = hwq->req_queue_addr & 0xffffffff; + utmrlbau = hwq->req_queue_addr >> 32; + ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba); + ufshci_mmio_write_4(ctrlr, utmrlbau, utmrlbau); + } else { + /* UTP Transfer Request (UTR) */ + uint32_t utrlba, utrlbau; + + /* + * Allocate physical memory for the command descriptor. + * UTP Transfer Request (UTR) requires memory for a separate + * command in addition to the queue. + */ + if (ufshci_req_sdb_cmd_desc_construct(req_queue, num_entries, + ctrlr) != 0) { + ufshci_printf(ctrlr, + "failed to construct cmd descriptor memory\n"); + bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd, + hwq->queuemem_map); + goto out; + } + + utrlba = hwq->req_queue_addr & 0xffffffff; + utrlbau = hwq->req_queue_addr >> 32; + ufshci_mmio_write_4(ctrlr, utrlba, utrlba); + ufshci_mmio_write_4(ctrlr, utrlbau, utrlbau); + } + + return (0); +out: + ufshci_req_sdb_destroy(ctrlr, req_queue); + return (ENOMEM); +} + +void +ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr, + struct ufshci_req_queue *req_queue) +{ + struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q]; + + if (!req_queue->is_task_mgmt) + ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue); + + if (hwq->utrd != NULL) { + bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map); + bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd, + hwq->queuemem_map); + hwq->utrd = NULL; + } + + if (hwq->dma_tag_queue) { + bus_dma_tag_destroy(hwq->dma_tag_queue); + hwq->dma_tag_queue = NULL; + } + + if (mtx_initialized(&hwq->qlock)) + mtx_destroy(&hwq->qlock); + + free(req_queue->hwq, M_UFSHCI); +} + +struct ufshci_hw_queue * +ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue) +{ + return &req_queue->hwq[UFSHCI_SDB_Q]; +} + +int +ufshci_req_sdb_enable(struct ufshci_controller *ctrlr, + struct ufshci_req_queue *req_queue) +{ + if (req_queue->is_task_mgmt) { + uint32_t hcs, utmrldbr, utmrlrsr; + + hcs = ufshci_mmio_read_4(ctrlr, hcs); + if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTMRLRDY))) { + ufshci_printf(ctrlr, + "UTP task management request list is not ready\n"); + return (ENXIO); + } + + utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr); + if (utmrldbr != 0) { + ufshci_printf(ctrlr, + "UTP task management request list door bell is not ready\n"); + return (ENXIO); + } + + utmrlrsr = UFSHCIM(UFSHCI_UTMRLRSR_REG_UTMRLRSR); + ufshci_mmio_write_4(ctrlr, utmrlrsr, utmrlrsr); + } else { + uint32_t hcs, utrldbr, utrlcnr, utrlrsr; + + hcs = ufshci_mmio_read_4(ctrlr, hcs); + if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTRLRDY))) { + ufshci_printf(ctrlr, + "UTP transfer request list is not ready\n"); + return (ENXIO); + } + + utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr); + if (utrldbr != 0) { + ufshci_printf(ctrlr, + "UTP transfer request list door bell is not ready\n"); + ufshci_printf(ctrlr, + "Clear the UTP transfer request list door bell\n"); + ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr); + } + + utrlcnr = ufshci_mmio_read_4(ctrlr, utrlcnr); + if (utrlcnr != 0) { + ufshci_printf(ctrlr, + "UTP transfer request list notification is not ready\n"); + ufshci_printf(ctrlr, + "Clear the UTP transfer request list notification\n"); + ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr); + } + + utrlrsr = UFSHCIM(UFSHCI_UTRLRSR_REG_UTRLRSR); + ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr); + } + + return (0); +} + +int +ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue, + struct ufshci_tracker **tr) +{ + struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q]; + uint8_t i; + + for (i = 0; i < req_queue->num_entries; i++) { + if (hwq->act_tr[i]->slot_state == UFSHCI_SLOT_STATE_FREE) { + *tr = hwq->act_tr[i]; + (*tr)->hwq = hwq; + return (0); + } + } + return (EBUSY); +} + +void +ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr, + struct ufshci_tracker *tr) +{ + uint32_t utrlcnr; + + utrlcnr = 1 << tr->slot_num; + ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr); +} + +void +ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr, + struct ufshci_tracker *tr) +{ + uint32_t utrldbr = 0; + + utrldbr |= 1 << tr->slot_num; + ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr); + + tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++; + + // utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr); + // printf("DB=0x%08x\n", utrldbr); +} + +bool +ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue) +{ + struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q]; + struct ufshci_tracker *tr; + uint8_t slot; + bool done = false; + + hwq->num_intr_handler_calls++; + + bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + for (slot = 0; slot < req_queue->num_entries; slot++) { + tr = hwq->act_tr[slot]; + + KASSERT(tr, ("there is no tracker assigned to the slot")); + /* + * When the response is delivered from the device, the doorbell + * is cleared. + */ + if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED && + ufshci_req_sdb_is_doorbell_cleared(req_queue->ctrlr, + slot)) { + ufshci_req_queue_complete_tracker(tr); + done = true; + } + } + + return (done); +} + +int +ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr) +{ + /* TODO: Implement inflight io*/ + + return (0); +} diff --git a/sys/dev/ufshci/ufshci_sim.c b/sys/dev/ufshci/ufshci_sim.c new file mode 100644 index 000000000000..db24561f4169 --- /dev/null +++ b/sys/dev/ufshci/ufshci_sim.c @@ -0,0 +1,372 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "ufshci_private.h" + +#define sim2ctrlr(sim) ((struct ufshci_controller *)cam_sim_softc(sim)) + +static void +ufshci_sim_scsiio_done(void *ccb_arg, const struct ufshci_completion *cpl, + bool error) +{ + const uint8_t *sense_data; + uint16_t sense_data_max_size; + uint16_t sense_data_len; + + union ccb *ccb = (union ccb *)ccb_arg; + + /* + * Let the periph know the completion, and let it sort out what + * it means. Report an error or success based on OCS and UPIU + * response code. And We need to copy the sense data to be handled + * by the CAM. + */ + sense_data = cpl->response_upiu.cmd_response_upiu.sense_data; + sense_data_max_size = sizeof( + cpl->response_upiu.cmd_response_upiu.sense_data); + sense_data_len = be16toh( + cpl->response_upiu.cmd_response_upiu.sense_data_len); + memcpy(&ccb->csio.sense_data, sense_data, + min(sense_data_len, sense_data_max_size)); + + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + if (error) { + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + xpt_done(ccb); + } else { + ccb->ccb_h.status = CAM_REQ_CMP; + xpt_done_direct(ccb); + } +} + +/* + * Complete the command as an illegal command with invalid field + */ +static void +ufshci_sim_illegal_request(union ccb *ccb) +{ + scsi_set_sense_data(&ccb->csio.sense_data, + /*sense_format*/ SSD_TYPE_NONE, + /*current_error*/ 1, + /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, + /*asc*/ 0x24, /* 24h/00h INVALID FIELD IN CDB */ + /*ascq*/ 0x00, + /*extra args*/ SSD_ELEM_NONE); + ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; + ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID | + CAM_DEV_QFRZN; + xpt_freeze_devq(ccb->ccb_h.path, 1); + xpt_done(ccb); +} + +static void +ufshchi_sim_scsiio(struct cam_sim *sim, union ccb *ccb) +{ + struct ccb_scsiio *csio = &ccb->csio; + struct ufshci_request *req; + void *payload; + struct ufshci_cmd_command_upiu *upiu; + uint8_t *cdb; + uint32_t payload_len; + bool is_write; + struct ufshci_controller *ctrlr; + uint8_t data_direction; + int error; + + /* UFS device cannot process these commands */ + if (csio->cdb_io.cdb_bytes[0] == MODE_SENSE_6 || + csio->cdb_io.cdb_bytes[0] == MODE_SELECT_6 || + csio->cdb_io.cdb_bytes[0] == READ_12 || + csio->cdb_io.cdb_bytes[0] == WRITE_12) { + ufshci_sim_illegal_request(ccb); + return; + } + + ctrlr = sim2ctrlr(sim); + payload = csio->data_ptr; + + payload_len = csio->dxfer_len; + is_write = csio->ccb_h.flags & CAM_DIR_OUT; + + /* TODO: Check other data type */ + if ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) + req = ufshci_allocate_request_bio((struct bio *)payload, + M_NOWAIT, ufshci_sim_scsiio_done, ccb); + else + req = ufshci_allocate_request_vaddr(payload, payload_len, + M_NOWAIT, ufshci_sim_scsiio_done, ccb); + + req->request_size = sizeof(struct ufshci_cmd_command_upiu); + req->response_size = sizeof(struct ufshci_cmd_response_upiu); + + switch (ccb->ccb_h.flags & CAM_DIR_MASK) { + case CAM_DIR_IN: + data_direction = UFSHCI_DATA_DIRECTION_FROM_TGT_TO_SYS; + break; + case CAM_DIR_OUT: + data_direction = UFSHCI_DATA_DIRECTION_FROM_SYS_TO_TGT; + break; + default: + data_direction = UFSHCI_DATA_DIRECTION_NO_DATA_TRANSFER; + } + req->data_direction = data_direction; + + upiu = (struct ufshci_cmd_command_upiu *)&req->request_upiu; + memset(upiu, 0, req->request_size); + upiu->header.trans_type = UFSHCI_UPIU_TRANSACTION_CODE_COMMAND; + upiu->header.operational_flags = is_write ? UFSHCI_OPERATIONAL_FLAG_W : + UFSHCI_OPERATIONAL_FLAG_R; + upiu->header.lun = csio->ccb_h.target_lun; + upiu->header.cmd_set_type = UFSHCI_COMMAND_SET_TYPE_SCSI; + + upiu->expected_data_transfer_length = htobe32(payload_len); + + ccb->ccb_h.status |= CAM_SIM_QUEUED; + + if (csio->ccb_h.flags & CAM_CDB_POINTER) + cdb = csio->cdb_io.cdb_ptr; + else + cdb = csio->cdb_io.cdb_bytes; + + if (cdb == NULL || csio->cdb_len > sizeof(upiu->cdb)) { + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + return; + } + memcpy(upiu->cdb, cdb, csio->cdb_len); + + error = ufshci_ctrlr_submit_io_request(ctrlr, req); + if (error == EBUSY) { + ccb->ccb_h.status = CAM_SCSI_BUSY; + xpt_done(ccb); + return; + } else if (error) { + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + return; + } +} + +static uint32_t +ufshci_link_kBps(struct ufshci_controller *ctrlr) +{ + uint32_t gear = ctrlr->hs_gear; + uint32_t lanes = ctrlr->rx_lanes; + + /* + * per-lane effective bandwidth (KB/s, SI 1 KB = 1000 B) + * All HS-Gears use 8b/10b line coding, i.e. 80 % efficiency. + * - KB/s per lane = raw-rate(Gbps) × 0.8(8b/10b) / 8(bit) + */ + static const uint32_t kbps_per_lane[] = { + 0, /* unused */ + 145920, /* HS-Gear1 : 1459.2 Mbps */ + 291840, /* HS-Gear2 : 2918.4 Mbps */ + 583680, /* HS-Gear3 : 5836.8 Mbps */ + 1167360, /* HS-Gear4 : 11673.6 Mbps */ + 2334720 /* HS-Gear5 : 23347.2 Mbps */ + }; + + /* Sanity checks */ + if (gear >= nitems(kbps_per_lane)) + gear = 0; /* out-of-range -> treat as invalid */ + + if (lanes == 0 || lanes > 2) + lanes = 1; /* UFS spec allows 1–2 data lanes */ + + return kbps_per_lane[gear] * lanes; +} + +static void +ufshci_cam_action(struct cam_sim *sim, union ccb *ccb) +{ + struct ufshci_controller *ctrlr = sim2ctrlr(sim); + + if (ctrlr == NULL) { + ccb->ccb_h.status = CAM_SEL_TIMEOUT; + xpt_done(ccb); + return; + } + + /* Perform the requested action */ + switch (ccb->ccb_h.func_code) { + case XPT_SCSI_IO: + ufshchi_sim_scsiio(sim, ccb); + return; + case XPT_PATH_INQ: { + struct ccb_pathinq *cpi = &ccb->cpi; + + cpi->version_num = 1; + cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; + cpi->target_sprt = 0; + cpi->hba_misc = PIM_UNMAPPED | PIM_NO_6_BYTE; + cpi->hba_eng_cnt = 0; + cpi->max_target = 0; + cpi->max_lun = ctrlr->max_lun_count; + cpi->async_flags = 0; + cpi->maxio = ctrlr->max_xfer_size; + cpi->initiator_id = 1; + strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); + strlcpy(cpi->hba_vid, "UFSHCI", HBA_IDLEN); + strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); + cpi->unit_number = cam_sim_unit(sim); + cpi->base_transfer_speed = ufshci_link_kBps(ctrlr); + cpi->transport = XPORT_UFSHCI; + cpi->transport_version = 1; + cpi->protocol = PROTO_SCSI; + cpi->protocol_version = SCSI_REV_SPC5; + ccb->ccb_h.status = CAM_REQ_CMP; + break; + } + case XPT_RESET_BUS: + ccb->ccb_h.status = CAM_REQ_CMP; + break; + case XPT_RESET_DEV: + if (ufshci_dev_reset(ctrlr)) + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + else + ccb->ccb_h.status = CAM_REQ_CMP; + break; + case XPT_ABORT: + /* TODO: Implement Task Management CMD*/ + ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; + break; + case XPT_SET_TRAN_SETTINGS: + ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; + break; + case XPT_GET_TRAN_SETTINGS: { + struct ccb_trans_settings *cts; + struct ccb_trans_settings_ufshci *ufshcix; + + cts = &ccb->cts; + ufshcix = &cts->xport_specific.ufshci; + + ufshcix->hs_gear = ctrlr->hs_gear; + ufshcix->tx_lanes = ctrlr->tx_lanes; + ufshcix->rx_lanes = ctrlr->rx_lanes; + ufshcix->max_hs_gear = ctrlr->max_rx_hs_gear; + ufshcix->max_tx_lanes = ctrlr->max_tx_lanes; + ufshcix->max_rx_lanes = ctrlr->max_rx_lanes; + ufshcix->valid = CTS_UFSHCI_VALID_LINK; + + cts->transport = XPORT_UFSHCI; + cts->transport_version = 1; + cts->protocol = PROTO_SCSI; + cts->protocol_version = SCSI_REV_SPC5; + ccb->ccb_h.status = CAM_REQ_CMP; + break; + } + case XPT_CALC_GEOMETRY: + cam_calc_geometry(&ccb->ccg, 1); + break; + case XPT_NOOP: + ccb->ccb_h.status = CAM_REQ_CMP; + break; + default: + printf("invalid ccb=%p func=%#x\n", ccb, ccb->ccb_h.func_code); + break; + } + xpt_done(ccb); + + return; +} + +static void +ufshci_cam_poll(struct cam_sim *sim) +{ + struct ufshci_controller *ctrlr = sim2ctrlr(sim); + + ufshci_ctrlr_poll(ctrlr); +} + +int +ufshci_sim_attach(struct ufshci_controller *ctrlr) +{ + device_t dev; + struct cam_devq *devq; + int max_trans; + + dev = ctrlr->dev; + max_trans = ctrlr->max_hw_pend_io; + if ((devq = cam_simq_alloc(max_trans)) == NULL) { + printf("Failed to allocate a simq\n"); + return (ENOMEM); + } + + ctrlr->ufshci_sim = cam_sim_alloc(ufshci_cam_action, ufshci_cam_poll, + "ufshci", ctrlr, device_get_unit(dev), &ctrlr->sc_mtx, max_trans, + max_trans, devq); + if (ctrlr->ufshci_sim == NULL) { + printf("Failed to allocate a sim\n"); + cam_simq_free(devq); + return (ENOMEM); + } + + mtx_lock(&ctrlr->sc_mtx); + if (xpt_bus_register(ctrlr->ufshci_sim, ctrlr->dev, 0) != CAM_SUCCESS) { + cam_sim_free(ctrlr->ufshci_sim, /*free_devq*/ TRUE); + cam_simq_free(devq); + mtx_unlock(&ctrlr->sc_mtx); + printf("Failed to create a bus\n"); + return (ENOMEM); + } + + if (xpt_create_path(&ctrlr->ufshci_path, /*periph*/ NULL, + cam_sim_path(ctrlr->ufshci_sim), CAM_TARGET_WILDCARD, + CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + xpt_bus_deregister(cam_sim_path(ctrlr->ufshci_sim)); + cam_sim_free(ctrlr->ufshci_sim, /*free_devq*/ TRUE); + cam_simq_free(devq); + mtx_unlock(&ctrlr->sc_mtx); + printf("Failed to create a path\n"); + return (ENOMEM); + } + mtx_unlock(&ctrlr->sc_mtx); + + return (0); +} + +void +ufshci_sim_detach(struct ufshci_controller *ctrlr) +{ + int error; + + if (ctrlr->ufshci_path != NULL) { + xpt_free_path(ctrlr->ufshci_path); + ctrlr->ufshci_path = NULL; + } + + if (ctrlr->ufshci_sim != NULL) { + error = xpt_bus_deregister(cam_sim_path(ctrlr->ufshci_sim)); + if (error == 0) { + /* accessing the softc is not possible after this */ + ctrlr->ufshci_sim->softc = NULL; + ufshci_printf(ctrlr, + "%s: %s:%d:%d caling " + "cam_sim_free sim %p refc %u mtx %p\n", + __func__, ctrlr->sc_name, + cam_sim_path(ctrlr->ufshci_sim), ctrlr->sc_unit, + ctrlr->ufshci_sim, ctrlr->ufshci_sim->refcount, + ctrlr->ufshci_sim->mtx); + } else { + panic("%s: %s: CAM layer is busy: errno %d\n", __func__, + ctrlr->sc_name, error); + } + + cam_sim_free(ctrlr->ufshci_sim, /* free_devq */ TRUE); + ctrlr->ufshci_sim = NULL; + } +} diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c new file mode 100644 index 000000000000..5e5069f12e5f --- /dev/null +++ b/sys/dev/ufshci/ufshci_sysctl.c @@ -0,0 +1,233 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include +#include + +#include "ufshci_private.h" + +static int +ufshci_sysctl_timeout_period(SYSCTL_HANDLER_ARGS) +{ + uint32_t *ptr = arg1; + uint32_t newval = *ptr; + int error = sysctl_handle_int(oidp, &newval, 0, req); + + if (error || (req->newptr == NULL)) + return (error); + + if (newval > UFSHCI_MAX_TIMEOUT_PERIOD || + newval < UFSHCI_MIN_TIMEOUT_PERIOD) { + return (EINVAL); + } else { + *ptr = newval; + } + + return (0); +} + +static int +ufshci_sysctl_num_cmds(SYSCTL_HANDLER_ARGS) +{ + struct ufshci_controller *ctrlr = arg1; + int64_t num_cmds = 0; + int i; + + num_cmds = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_cmds; + + if (ctrlr->transfer_req_queue.hwq != NULL) { + for (i = 0; i < ctrlr->num_io_queues; i++) + num_cmds += ctrlr->transfer_req_queue.hwq[i].num_cmds; + } + + return (sysctl_handle_64(oidp, &num_cmds, 0, req)); +} + +static int +ufshci_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS) +{ + struct ufshci_controller *ctrlr = arg1; + int64_t num_intr_handler_calls = 0; + int i; + + num_intr_handler_calls = + ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_intr_handler_calls; + + if (ctrlr->transfer_req_queue.hwq != NULL) { + for (i = 0; i < ctrlr->num_io_queues; i++) + num_intr_handler_calls += ctrlr->transfer_req_queue + .hwq[i] + .num_intr_handler_calls; + } + + return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req)); +} + +static int +ufshci_sysctl_num_retries(SYSCTL_HANDLER_ARGS) +{ + struct ufshci_controller *ctrlr = arg1; + int64_t num_retries = 0; + int i; + + num_retries = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_retries; + + if (ctrlr->transfer_req_queue.hwq != NULL) { + for (i = 0; i < ctrlr->num_io_queues; i++) + num_retries += + ctrlr->transfer_req_queue.hwq[i].num_retries; + } + + return (sysctl_handle_64(oidp, &num_retries, 0, req)); +} + +static int +ufshci_sysctl_num_failures(SYSCTL_HANDLER_ARGS) +{ + struct ufshci_controller *ctrlr = arg1; + int64_t num_failures = 0; + int i; + + num_failures = + ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_failures; + + if (ctrlr->transfer_req_queue.hwq != NULL) { + for (i = 0; i < ctrlr->num_io_queues; i++) + num_failures += + ctrlr->transfer_req_queue.hwq[i].num_failures; + } + + return (sysctl_handle_64(oidp, &num_failures, 0, req)); +} + +static void +ufshci_sysctl_initialize_queue(struct ufshci_hw_queue *hwq, + struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree) +{ + struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree); + + SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries", + CTLFLAG_RD, &hwq->num_entries, 0, + "Number of entries in hardware queue"); + SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers", + CTLFLAG_RD, &hwq->num_trackers, 0, + "Number of trackers pre-allocated for this queue pair"); + SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head", CTLFLAG_RD, + &hwq->sq_head, 0, + "Current head of submission queue (as observed by driver)"); + SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail", CTLFLAG_RD, + &hwq->sq_tail, 0, + "Current tail of submission queue (as observed by driver)"); + SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head", CTLFLAG_RD, + &hwq->cq_head, 0, + "Current head of completion queue (as observed by driver)"); + + SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds", CTLFLAG_RD, + &hwq->num_cmds, "Number of commands submitted"); + SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls", + CTLFLAG_RD, &hwq->num_intr_handler_calls, + "Number of times interrupt handler was invoked (will typically be " + "less than number of actual interrupts generated due to " + "interrupt aggregation)"); + SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_retries", + CTLFLAG_RD, &hwq->num_retries, "Number of commands retried"); + SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_failures", + CTLFLAG_RD, &hwq->num_failures, + "Number of commands ending in failure after all retries"); + + /* TODO: Implement num_ignored */ + /* TODO: Implement recovery state */ + /* TODO: Implement dump debug */ +} + +void +ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr) +{ + struct sysctl_ctx_list *ctrlr_ctx; + struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree; + struct sysctl_oid_list *ctrlr_list, *ioq_list; +#define QUEUE_NAME_LENGTH 16 + char queue_name[QUEUE_NAME_LENGTH]; + int i; + + ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev); + ctrlr_tree = device_get_sysctl_tree(ctrlr->dev); + ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "major_version", + CTLFLAG_RD, &ctrlr->major_version, 0, "UFS spec major version"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "minor_version", + CTLFLAG_RD, &ctrlr->minor_version, 0, "UFS spec minor version"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "io_queue_mode", + CTLFLAG_RD, &ctrlr->transfer_req_queue.queue_mode, 0, + "Active host-side queuing scheme " + "(Single-Doorbell or Multi-Circular-Queue)"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_io_queues", + CTLFLAG_RD, &ctrlr->num_io_queues, 0, "Number of I/O queue pairs"); + + SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD, + &ctrlr->cap, 0, "Number of I/O queue pairs"); + + SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period", + CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period, + 0, ufshci_sysctl_timeout_period, "IU", + "Timeout period for I/O queues (in seconds)"); + + SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_cmds", + CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, + ufshci_sysctl_num_cmds, "IU", "Number of commands submitted"); + + SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, + "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, + ctrlr, 0, ufshci_sysctl_num_intr_handler_calls, "IU", + "Number of times interrupt handler was invoked (will " + "typically be less than number of actual interrupts " + "generated due to coalescing)"); + + SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_retries", + CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, + ufshci_sysctl_num_retries, "IU", "Number of commands retried"); + + SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_failures", + CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0, + ufshci_sysctl_num_failures, "IU", + "Number of commands ending in failure after all retries"); + + que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "utmrq", + CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, + "UTP Task Management Request Queue"); + + ufshci_sysctl_initialize_queue( + &ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q], ctrlr_ctx, que_tree); + + /* + * Make sure that we've constructed the I/O queues before setting up the + * sysctls. Failed controllers won't allocate it, but we want the rest + * of the sysctls to diagnose things. + */ + if (ctrlr->transfer_req_queue.hwq != NULL) { + ioq_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, + "ioq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, + "UTP Transfer Request Queue (I/O Queue)"); + ioq_list = SYSCTL_CHILDREN(ioq_tree); + + for (i = 0; i < ctrlr->num_io_queues; i++) { + snprintf(queue_name, QUEUE_NAME_LENGTH, "%d", i); + que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ioq_list, + OID_AUTO, queue_name, CTLFLAG_RD | CTLFLAG_MPSAFE, + NULL, "IO Queue"); + ufshci_sysctl_initialize_queue( + &ctrlr->transfer_req_queue.hwq[i], ctrlr_ctx, + que_tree); + } + } +} diff --git a/sys/dev/ufshci/ufshci_uic_cmd.c b/sys/dev/ufshci/ufshci_uic_cmd.c new file mode 100644 index 000000000000..2c5f635dc11e --- /dev/null +++ b/sys/dev/ufshci/ufshci_uic_cmd.c @@ -0,0 +1,224 @@ +/*- + * Copyright (c) 2025, Samsung Electronics Co., Ltd. + * Written by Jaeyoon Choi + * + * SPDX-License-Identifier: BSD-2-Clause + */ +#include +#include +#include + +#include "ufshci_private.h" +#include "ufshci_reg.h" + +int +ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr) +{ + uint32_t is; + int timeout; + + /* Wait for the IS flag to change */ + timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); + + while (1) { + is = ufshci_mmio_read_4(ctrlr, is); + if (UFSHCIV(UFSHCI_IS_REG_UPMS, is)) { + ufshci_mmio_write_4(ctrlr, is, + UFSHCIM(UFSHCI_IS_REG_UPMS)); + break; + } + + if (timeout - ticks < 0) { + ufshci_printf(ctrlr, + "Power mode is not changed " + "within %d ms\n", + ctrlr->uic_cmd_timeout_in_ms); + return (ENXIO); + } + + /* TODO: Replace busy-wait with interrupt-based pause. */ + DELAY(10); + } + + return (0); +} + +int +ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr) +{ + uint32_t hcs; + int timeout; + + /* Wait for the HCS flag to change */ + timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms); + + while (1) { + hcs = ufshci_mmio_read_4(ctrlr, hcs); + if (UFSHCIV(UFSHCI_HCS_REG_UCRDY, hcs)) + break; + + if (timeout - ticks < 0) { + ufshci_printf(ctrlr, + "UIC command is not ready " + "within %d ms\n", + ctrlr->uic_cmd_timeout_in_ms); + return (ENXIO); + } + + /* TODO: Replace busy-wait with interrupt-based pause. */ + DELAY(10); + } + + return (0); +} + +static int +ufshci_uic_wait_cmd(struct ufshci_controller *ctrlr, + struct ufshci_uic_cmd *uic_cmd) +{ + uint32_t is; + int timeout; + + mtx_assert(&ctrlr->uic_cmd_lock, MA_OWNED); + + /* Wait for the IS flag to change */ + timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms); + int delta = 10; + + while (1) { + is = ufshci_mmio_read_4(ctrlr, is); + if (UFSHCIV(UFSHCI_IS_REG_UCCS, is)) { + ufshci_mmio_write_4(ctrlr, is, + UFSHCIM(UFSHCI_IS_REG_UCCS)); + break; + } + if (timeout - ticks < 0) { + ufshci_printf(ctrlr, + "UIC command is not completed " + "within %d ms\n", + ctrlr->uic_cmd_timeout_in_ms); + return (ENXIO); + } + + DELAY(delta); + delta = min(1000, delta * 2); + } + + return (0); +} + +static int +ufshci_uic_send_cmd(struct ufshci_controller *ctrlr, + struct ufshci_uic_cmd *uic_cmd, uint32_t *return_value) +{ + int error; + + mtx_lock(&ctrlr->uic_cmd_lock); + + error = ufshci_uic_cmd_ready(ctrlr); + if (error) { + mtx_unlock(&ctrlr->uic_cmd_lock); + return (ENXIO); + } + + ufshci_mmio_write_4(ctrlr, ucmdarg1, uic_cmd->argument1); + ufshci_mmio_write_4(ctrlr, ucmdarg2, uic_cmd->argument2); + ufshci_mmio_write_4(ctrlr, ucmdarg3, uic_cmd->argument3); + + ufshci_mmio_write_4(ctrlr, uiccmd, uic_cmd->opcode); + + error = ufshci_uic_wait_cmd(ctrlr, uic_cmd); + + mtx_unlock(&ctrlr->uic_cmd_lock); + + if (error) + return (ENXIO); + + if (return_value != NULL) + *return_value = ufshci_mmio_read_4(ctrlr, ucmdarg3); + + return (0); +} + +int +ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr) +{ + struct ufshci_uic_cmd uic_cmd; + uic_cmd.opcode = UFSHCI_DME_LINK_STARTUP; + uic_cmd.argument1 = 0; + uic_cmd.argument2 = 0; + uic_cmd.argument3 = 0; + + return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); +} + +int +ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute, + uint32_t *return_value) +{ + struct ufshci_uic_cmd uic_cmd; + + uic_cmd.opcode = UFSHCI_DME_GET; + uic_cmd.argument1 = attribute << 16; + uic_cmd.argument2 = 0; + uic_cmd.argument3 = 0; + + return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, return_value)); +} + +int +ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute, + uint32_t value) +{ + struct ufshci_uic_cmd uic_cmd; + + uic_cmd.opcode = UFSHCI_DME_SET; + uic_cmd.argument1 = attribute << 16; + /* This drvier always sets only volatile values. */ + uic_cmd.argument2 = UFSHCI_ATTR_SET_TYPE_NORMAL << 16; + uic_cmd.argument3 = value; + + return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); +} + +int +ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr, + uint16_t attribute, uint32_t *return_value) +{ + struct ufshci_uic_cmd uic_cmd; + + uic_cmd.opcode = UFSHCI_DME_PEER_GET; + uic_cmd.argument1 = attribute << 16; + uic_cmd.argument2 = 0; + uic_cmd.argument3 = 0; + + return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, return_value)); +} + +int +ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr, + uint16_t attribute, uint32_t value) +{ + struct ufshci_uic_cmd uic_cmd; + + uic_cmd.opcode = UFSHCI_DME_PEER_SET; + uic_cmd.argument1 = attribute << 16; + /* This drvier always sets only volatile values. */ + uic_cmd.argument2 = UFSHCI_ATTR_SET_TYPE_NORMAL << 16; + uic_cmd.argument3 = value; + + return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); +} + +int +ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr) +{ + struct ufshci_uic_cmd uic_cmd; + + uic_cmd.opcode = UFSHCI_DME_ENDPOINT_RESET; + uic_cmd.argument1 = 0; + uic_cmd.argument2 = 0; + uic_cmd.argument3 = 0; + + return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL)); +} diff --git a/sys/modules/ufshci/Makefile b/sys/modules/ufshci/Makefile new file mode 100644 index 000000000000..ab5f3eaf88d0 --- /dev/null +++ b/sys/modules/ufshci/Makefile @@ -0,0 +1,22 @@ +.PATH: ${SRCTOP}/sys/dev/ufshci + +KMOD = ufshci + +SRCS = ufshci.c \ + ufshci_pci.c \ + ufshci_ctrlr.c \ + ufshci_dev.c \ + ufshci_ctrlr_cmd.c \ + ufshci_uic_cmd.c \ + ufshci_req_queue.c \ + ufshci_req_sdb.c \ + ufshci_sim.c \ + ufshci_sysctl.c \ + bus_if.h \ + device_if.h \ + opt_cam.h \ + pci_if.h + +EXPORT_SYMS= YES + +.include