Index: head/lib/libcam/camlib.c =================================================================== --- head/lib/libcam/camlib.c (revision 300546) +++ head/lib/libcam/camlib.c (revision 300547) @@ -1,754 +1,754 @@ /* * Copyright (c) 1997, 1998, 1999, 2002 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "camlib.h" static const char *nonrewind_devs[] = { "sa" }; char cam_errbuf[CAM_ERRBUF_SIZE]; static struct cam_device *cam_real_open_device(const char *path, int flags, struct cam_device *device, const char *given_path, const char *given_dev_name, int given_unit_number); static struct cam_device *cam_lookup_pass(const char *dev_name, int unit, int flags, const char *given_path, struct cam_device *device); /* * Send a ccb to a passthrough device. */ int cam_send_ccb(struct cam_device *device, union ccb *ccb) { return(ioctl(device->fd, CAMIOCOMMAND, ccb)); } /* * Malloc a CCB, zero out the header and set its path, target and lun ids. */ union ccb * cam_getccb(struct cam_device *dev) { union ccb *ccb; ccb = (union ccb *)malloc(sizeof(union ccb)); if (ccb != NULL) { bzero(&ccb->ccb_h, sizeof(struct ccb_hdr)); ccb->ccb_h.path_id = dev->path_id; ccb->ccb_h.target_id = dev->target_id; ccb->ccb_h.target_lun = dev->target_lun; } return(ccb); } /* * Free a CCB. */ void cam_freeccb(union ccb *ccb) { free(ccb); } /* * Take a device name or path passed in by the user, and attempt to figure * out the device name and unit number. Some possible device name formats are: * /dev/foo0 * foo0 * nfoo0 * * Some peripheral drivers create separate device nodes with 'n' prefix for * non-rewind operations. Currently only sa(4) tape driver has this feature. * We extract pure peripheral name as device name for this special case. * * Input parameters: device name/path, length of devname string * Output: device name, unit number * Return values: returns 0 for success, -1 for failure */ int cam_get_device(const char *path, char *dev_name, int devnamelen, int *unit) { char *func_name = "cam_get_device"; char *tmpstr, *tmpstr2; char *newpath; int unit_offset; int i; if (path == NULL) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: device pathname was NULL", func_name); return(-1); } /* * We can be rather destructive to the path string. Make a copy of * it so we don't hose the user's string. */ newpath = (char *)strdup(path); tmpstr = newpath; /* * Check to see whether we have an absolute pathname. */ if (*tmpstr == '/') { tmpstr2 = tmpstr; tmpstr = strrchr(tmpstr2, '/'); if ((tmpstr != NULL) && (*tmpstr != '\0')) tmpstr++; } if (*tmpstr == '\0') { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: no text after slash", func_name); free(newpath); return(-1); } /* * Check to see whether the user has given us a nonrewound tape * device. */ if (*tmpstr == 'n' || *tmpstr == 'e') { for (i = 0; i < sizeof(nonrewind_devs)/sizeof(char *); i++) { int len = strlen(nonrewind_devs[i]); if (strncmp(tmpstr + 1, nonrewind_devs[i], len) == 0) { if (isdigit(tmpstr[len + 1])) { tmpstr++; break; } } } } /* * We should now have just a device name and unit number. * That means that there must be at least 2 characters. * If we only have 1, we don't have a valid device name. */ if (strlen(tmpstr) < 2) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: must have both device name and unit number", func_name); free(newpath); return(-1); } /* * If the first character of the string is a digit, then the user * has probably given us all numbers. Point out the error. */ if (isdigit(*tmpstr)) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: device name cannot begin with a number", func_name); free(newpath); return(-1); } /* * At this point, if the last character of the string isn't a * number, we know the user either didn't give us a device number, * or he gave us a device name/number format we don't recognize. */ if (!isdigit(tmpstr[strlen(tmpstr) - 1])) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: unable to find device unit number", func_name); free(newpath); return(-1); } /* * Attempt to figure out where the device name ends and the unit * number begins. As long as unit_offset is at least 1 less than * the length of the string, we can still potentially have a device * name at the front of the string. When we get to something that * isn't a digit, we've hit the device name. Because of the check * above, we know that this cannot happen when unit_offset == 1. * Therefore it is okay to decrement unit_offset -- it won't cause * us to go past the end of the character array. */ for (unit_offset = 1; (unit_offset < (strlen(tmpstr))) && (isdigit(tmpstr[strlen(tmpstr) - unit_offset])); unit_offset++); unit_offset--; /* * Grab the unit number. */ *unit = atoi(&tmpstr[strlen(tmpstr) - unit_offset]); /* * Put a null in place of the first number of the unit number so * that all we have left is the device name. */ tmpstr[strlen(tmpstr) - unit_offset] = '\0'; strlcpy(dev_name, tmpstr, devnamelen); /* Clean up allocated memory */ free(newpath); return(0); } /* * Backwards compatible wrapper for the real open routine. This translates * a pathname into a device name and unit number for use with the real open * routine. */ struct cam_device * cam_open_device(const char *path, int flags) { int unit; char dev_name[DEV_IDLEN + 1]; /* * cam_get_device() has already put an error message in cam_errbuf, * so we don't need to. */ if (cam_get_device(path, dev_name, sizeof(dev_name), &unit) == -1) return(NULL); return(cam_lookup_pass(dev_name, unit, flags, path, NULL)); } /* * Open the passthrough device for a given bus, target and lun, if the * passthrough device exists. */ struct cam_device * cam_open_btl(path_id_t path_id, target_id_t target_id, lun_id_t target_lun, int flags, struct cam_device *device) { union ccb ccb; struct periph_match_pattern *match_pat; char *func_name = "cam_open_btl"; int fd, bufsize; if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: couldn't open %s\n%s: %s", func_name, XPT_DEVICE, func_name, strerror(errno)); return(NULL); } bzero(&ccb, sizeof(union ccb)); ccb.ccb_h.func_code = XPT_DEV_MATCH; ccb.ccb_h.path_id = CAM_XPT_PATH_ID; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; /* Setup the result buffer */ bufsize = sizeof(struct dev_match_result); ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = (struct dev_match_result *)malloc(bufsize); if (ccb.cdm.matches == NULL) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: couldn't malloc match buffer", func_name); close(fd); return(NULL); } ccb.cdm.num_matches = 0; /* Setup the pattern buffer */ ccb.cdm.num_patterns = 1; ccb.cdm.pattern_buf_len = sizeof(struct dev_match_pattern); ccb.cdm.patterns = (struct dev_match_pattern *)malloc( sizeof(struct dev_match_pattern)); if (ccb.cdm.patterns == NULL) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: couldn't malloc pattern buffer", func_name); free(ccb.cdm.matches); close(fd); return(NULL); } ccb.cdm.patterns[0].type = DEV_MATCH_PERIPH; match_pat = &ccb.cdm.patterns[0].pattern.periph_pattern; /* * We're looking for the passthrough device associated with this * particular bus/target/lun. */ sprintf(match_pat->periph_name, "pass"); match_pat->path_id = path_id; match_pat->target_id = target_id; match_pat->target_lun = target_lun; /* Now set the flags to indicate what we're looking for. */ match_pat->flags = PERIPH_MATCH_PATH | PERIPH_MATCH_TARGET | PERIPH_MATCH_LUN | PERIPH_MATCH_NAME; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: CAMIOCOMMAND ioctl failed\n" "%s: %s", func_name, func_name, strerror(errno)); goto btl_bailout; } /* * Check for an outright error. */ if ((ccb.ccb_h.status != CAM_REQ_CMP) || ((ccb.cdm.status != CAM_DEV_MATCH_LAST) && (ccb.cdm.status != CAM_DEV_MATCH_MORE))) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: CAM error %#x, CDM error %d " "returned from XPT_DEV_MATCH ccb", func_name, ccb.ccb_h.status, ccb.cdm.status); goto btl_bailout; } if (ccb.cdm.status == CAM_DEV_MATCH_MORE) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: CDM reported more than one" " passthrough device at %d:%d:%jx!!\n", func_name, path_id, target_id, (uintmax_t)target_lun); goto btl_bailout; } if (ccb.cdm.num_matches == 0) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: no passthrough device found at" " %d:%d:%jx", func_name, path_id, target_id, (uintmax_t)target_lun); goto btl_bailout; } switch(ccb.cdm.matches[0].type) { case DEV_MATCH_PERIPH: { int pass_unit; char dev_path[256]; struct periph_match_result *periph_result; periph_result = &ccb.cdm.matches[0].result.periph_result; pass_unit = periph_result->unit_number; free(ccb.cdm.matches); free(ccb.cdm.patterns); close(fd); sprintf(dev_path, "/dev/pass%d", pass_unit); return(cam_real_open_device(dev_path, flags, device, NULL, NULL, 0)); break; /* NOTREACHED */ } default: snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: asked for a peripheral match, but" " got a bus or device match", func_name); goto btl_bailout; break; /* NOTREACHED */ } btl_bailout: free(ccb.cdm.matches); free(ccb.cdm.patterns); close(fd); return(NULL); } struct cam_device * cam_open_spec_device(const char *dev_name, int unit, int flags, struct cam_device *device) { return(cam_lookup_pass(dev_name, unit, flags, NULL, device)); } struct cam_device * cam_open_pass(const char *path, int flags, struct cam_device *device) { return(cam_real_open_device(path, flags, device, path, NULL, 0)); } static struct cam_device * cam_lookup_pass(const char *dev_name, int unit, int flags, const char *given_path, struct cam_device *device) { int fd; union ccb ccb; char dev_path[256]; char *func_name = "cam_lookup_pass"; /* * The flags argument above only applies to the actual passthrough * device open, not our open of the given device to find the * passthrough device. */ if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: couldn't open %s\n%s: %s", func_name, XPT_DEVICE, func_name, strerror(errno)); return(NULL); } /* This isn't strictly necessary for the GETPASSTHRU ioctl. */ ccb.ccb_h.func_code = XPT_GDEVLIST; /* These two are necessary for the GETPASSTHRU ioctl to work. */ strlcpy(ccb.cgdl.periph_name, dev_name, sizeof(ccb.cgdl.periph_name)); ccb.cgdl.unit_number = unit; /* * Attempt to get the passthrough device. This ioctl will fail if * the device name is null, if the device doesn't exist, or if the * passthrough driver isn't in the kernel. */ if (ioctl(fd, CAMGETPASSTHRU, &ccb) == -1) { char tmpstr[256]; /* * If we get ENOENT from the transport layer version of * the CAMGETPASSTHRU ioctl, it means one of two things: * either the device name/unit number passed in doesn't * exist, or the passthrough driver isn't in the kernel. */ if (errno == ENOENT) { snprintf(tmpstr, sizeof(tmpstr), "\n%s: either the pass driver isn't in " "your kernel\n%s: or %s%d doesn't exist", func_name, func_name, dev_name, unit); } snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: CAMGETPASSTHRU ioctl failed\n" "%s: %s%s", func_name, func_name, strerror(errno), (errno == ENOENT) ? tmpstr : ""); close(fd); return(NULL); } close(fd); /* * If the ioctl returned the right status, but we got an error back * in the ccb, that means that the kernel found the device the user * passed in, but was unable to find the passthrough device for * the device the user gave us. */ if (ccb.cgdl.status == CAM_GDEVLIST_ERROR) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: device %s%d does not exist!", func_name, dev_name, unit); return(NULL); } sprintf(dev_path, "/dev/%s%d", ccb.cgdl.periph_name, ccb.cgdl.unit_number); return(cam_real_open_device(dev_path, flags, device, NULL, dev_name, unit)); } /* * Open a given device. The path argument isn't strictly necessary, but it * is copied into the cam_device structure as a convenience to the user. */ static struct cam_device * cam_real_open_device(const char *path, int flags, struct cam_device *device, const char *given_path, const char *given_dev_name, int given_unit_number) { char *func_name = "cam_real_open_device"; union ccb ccb; int fd = -1, malloced_device = 0; /* * See if the user wants us to malloc a device for him. */ if (device == NULL) { if ((device = (struct cam_device *)malloc( sizeof(struct cam_device))) == NULL) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: device structure malloc" " failed\n%s: %s", func_name, func_name, strerror(errno)); return(NULL); } device->fd = -1; malloced_device = 1; } /* * If the user passed in a path, save it for him. */ if (given_path != NULL) strlcpy(device->device_path, given_path, sizeof(device->device_path)); else device->device_path[0] = '\0'; /* * If the user passed in a device name and unit number pair, save * those as well. */ if (given_dev_name != NULL) strlcpy(device->given_dev_name, given_dev_name, sizeof(device->given_dev_name)); else device->given_dev_name[0] = '\0'; device->given_unit_number = given_unit_number; if ((fd = open(path, flags)) < 0) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: couldn't open passthrough device %s\n" "%s: %s", func_name, path, func_name, strerror(errno)); goto crod_bailout; } device->fd = fd; bzero(&ccb, sizeof(union ccb)); /* * Unlike the transport layer version of the GETPASSTHRU ioctl, * we don't have to set any fields. */ ccb.ccb_h.func_code = XPT_GDEVLIST; /* * We're only doing this to get some information on the device in * question. Otherwise, we'd have to pass in yet another * parameter: the passthrough driver unit number. */ if (ioctl(fd, CAMGETPASSTHRU, &ccb) == -1) { /* * At this point we know the passthrough device must exist * because we just opened it above. The only way this * ioctl can fail is if the ccb size is wrong. */ snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: CAMGETPASSTHRU ioctl failed\n" "%s: %s", func_name, func_name, strerror(errno)); goto crod_bailout; } /* * If the ioctl returned the right status, but we got an error back * in the ccb, that means that the kernel found the device the user * passed in, but was unable to find the passthrough device for * the device the user gave us. */ if (ccb.cgdl.status == CAM_GDEVLIST_ERROR) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: passthrough device does not exist!", func_name); goto crod_bailout; } device->dev_unit_num = ccb.cgdl.unit_number; strlcpy(device->device_name, ccb.cgdl.periph_name, sizeof(device->device_name)); device->path_id = ccb.ccb_h.path_id; device->target_id = ccb.ccb_h.target_id; device->target_lun = ccb.ccb_h.target_lun; ccb.ccb_h.func_code = XPT_PATH_INQ; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: Path Inquiry CCB failed\n" "%s: %s", func_name, func_name, strerror(errno)); goto crod_bailout; } strlcpy(device->sim_name, ccb.cpi.dev_name, sizeof(device->sim_name)); device->sim_unit_number = ccb.cpi.unit_number; device->bus_id = ccb.cpi.bus_id; /* * It doesn't really matter what is in the payload for a getdev * CCB, the kernel doesn't look at it. */ ccb.ccb_h.func_code = XPT_GDEV_TYPE; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: Get Device Type CCB failed\n" "%s: %s", func_name, func_name, strerror(errno)); goto crod_bailout; } device->pd_type = SID_TYPE(&ccb.cgd.inq_data); bcopy(&ccb.cgd.inq_data, &device->inq_data, sizeof(struct scsi_inquiry_data)); device->serial_num_len = ccb.cgd.serial_num_len; bcopy(&ccb.cgd.serial_num, &device->serial_num, device->serial_num_len); /* * Zero the payload, the kernel does look at the flags. */ - bzero(&(&ccb.ccb_h)[1], sizeof(struct ccb_trans_settings)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb.cts); /* * Get transfer settings for this device. */ ccb.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; ccb.cts.type = CTS_TYPE_CURRENT_SETTINGS; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: Get Transfer Settings CCB failed\n" "%s: %s", func_name, func_name, strerror(errno)); goto crod_bailout; } if (ccb.cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &ccb.cts.xport_specific.spi; device->sync_period = spi->sync_period; device->sync_offset = spi->sync_offset; device->bus_width = spi->bus_width; } else { device->sync_period = 0; device->sync_offset = 0; device->bus_width = 0; } return(device); crod_bailout: if (fd >= 0) close(fd); if (malloced_device) free(device); return(NULL); } void cam_close_device(struct cam_device *dev) { if (dev == NULL) return; cam_close_spec_device(dev); free(dev); } void cam_close_spec_device(struct cam_device *dev) { if (dev == NULL) return; if (dev->fd >= 0) { close(dev->fd); dev->fd = -1; } } char * cam_path_string(struct cam_device *dev, char *str, int len) { if (dev == NULL) { snprintf(str, len, "No path"); return(str); } snprintf(str, len, "(%s%d:%s%d:%d:%d:%jx): ", (dev->device_name[0] != '\0') ? dev->device_name : "pass", dev->dev_unit_num, (dev->sim_name[0] != '\0') ? dev->sim_name : "unknown", dev->sim_unit_number, dev->bus_id, dev->target_id, (uintmax_t)dev->target_lun); return(str); } /* * Malloc/duplicate a CAM device structure. */ struct cam_device * cam_device_dup(struct cam_device *device) { char *func_name = "cam_device_dup"; struct cam_device *newdev; if (device == NULL) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: device is NULL", func_name); return(NULL); } newdev = malloc(sizeof(struct cam_device)); if (newdev == NULL) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: couldn't malloc CAM device structure", func_name); return(NULL); } bcopy(device, newdev, sizeof(struct cam_device)); return(newdev); } /* * Copy a CAM device structure. */ void cam_device_copy(struct cam_device *src, struct cam_device *dst) { char *func_name = "cam_device_copy"; if (src == NULL) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: source device struct was NULL", func_name); return; } if (dst == NULL) { snprintf(cam_errbuf, CAM_ERRBUF_SIZE, "%s: destination device struct was NULL", func_name); return; } bcopy(src, dst, sizeof(struct cam_device)); } Index: head/sbin/camcontrol/attrib.c =================================================================== --- head/sbin/camcontrol/attrib.c (revision 300546) +++ head/sbin/camcontrol/attrib.c (revision 300547) @@ -1,509 +1,508 @@ /*- * Copyright (c) 2014 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * Authors: Ken Merry (Spectra Logic Corporation) */ /* * SCSI Read and Write Attribute support for camcontrol(8). */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "camcontrol.h" #if 0 struct scsi_attr_desc { int attr_id; STAILQ_ENTRY(scsi_attr_desc) links; }; #endif static struct scsi_nv elem_type_map[] = { { "all", ELEMENT_TYPE_ALL }, { "picker", ELEMENT_TYPE_MT }, { "slot", ELEMENT_TYPE_ST }, { "portal", ELEMENT_TYPE_IE }, { "drive", ELEMENT_TYPE_DT }, }; static struct scsi_nv sa_map[] = { { "attr_values", SRA_SA_ATTR_VALUES }, { "attr_list", SRA_SA_ATTR_LIST }, { "lv_list", SRA_SA_LOG_VOL_LIST }, { "part_list", SRA_SA_PART_LIST }, { "supp_attr", SRA_SA_SUPPORTED_ATTRS } }; static struct scsi_nv output_format_map[] = { { "text_esc", SCSI_ATTR_OUTPUT_TEXT_ESC }, { "text_raw", SCSI_ATTR_OUTPUT_TEXT_RAW }, { "nonascii_esc", SCSI_ATTR_OUTPUT_NONASCII_ESC }, { "nonascii_trim", SCSI_ATTR_OUTPUT_NONASCII_TRIM }, { "nonascii_raw", SCSI_ATTR_OUTPUT_NONASCII_RAW }, { "field_all", SCSI_ATTR_OUTPUT_FIELD_ALL }, { "field_none", SCSI_ATTR_OUTPUT_FIELD_NONE }, { "field_desc", SCSI_ATTR_OUTPUT_FIELD_DESC }, { "field_num", SCSI_ATTR_OUTPUT_FIELD_NUM }, { "field_size", SCSI_ATTR_OUTPUT_FIELD_SIZE }, { "field_rw", SCSI_ATTR_OUTPUT_FIELD_RW }, }; int scsiattrib(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout, int verbosemode, int err_recover) { union ccb *ccb = NULL; int attr_num = -1; #if 0 int num_attrs = 0; #endif int start_attr = 0; int cached_attr = 0; int read_service_action = -1; int read_attr = 0, write_attr = 0; int element_address = 0; int element_type = ELEMENT_TYPE_ALL; int partition = 0; int logical_volume = 0; char *endptr; uint8_t *data_buf = NULL; uint32_t dxfer_len = UINT16_MAX - 1; uint32_t valid_len; uint32_t output_format; STAILQ_HEAD(, scsi_attr_desc) write_attr_list; int error = 0; int c; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); error = 1; goto bailout; } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); STAILQ_INIT(&write_attr_list); /* * By default, when displaying attribute values, we trim out * non-ASCII characters in ASCII fields. We display all fields * (description, attribute number, attribute size, and readonly * status). We default to displaying raw text. * * XXX KDM need to port this to stable/10 and newer FreeBSD * versions that have iconv built in and can convert codesets. */ output_format = SCSI_ATTR_OUTPUT_NONASCII_TRIM | SCSI_ATTR_OUTPUT_FIELD_ALL | SCSI_ATTR_OUTPUT_TEXT_RAW; data_buf = malloc(dxfer_len); if (data_buf == NULL) { warn("%s: error allocating %u bytes", __func__, dxfer_len); error = 1; goto bailout; } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': attr_num = strtol(optarg, &endptr, 0); if (*endptr != '\0') { warnx("%s: invalid attribute number %s", __func__, optarg); error = 1; goto bailout; } start_attr = attr_num; break; case 'c': cached_attr = 1; break; case 'e': element_address = strtol(optarg, &endptr, 0); if (*endptr != '\0') { warnx("%s: invalid element address %s", __func__, optarg); error = 1; goto bailout; } break; case 'F': { scsi_nv_status status; scsi_attrib_output_flags new_outflags; int entry_num = 0; char *tmpstr; if (isdigit(optarg[0])) { output_format = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { warnx("%s: invalid numeric output " "format argument %s", __func__, optarg); error = 1; goto bailout; } break; } new_outflags = SCSI_ATTR_OUTPUT_NONE; while ((tmpstr = strsep(&optarg, ",")) != NULL) { status = scsi_get_nv(output_format_map, sizeof(output_format_map) / sizeof(output_format_map[0]), tmpstr, &entry_num, SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) new_outflags |= output_format_map[entry_num].value; else { warnx("%s: %s format option %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", tmpstr); error = 1; goto bailout; } } output_format = new_outflags; break; } case 'p': partition = strtol(optarg, &endptr, 0); if (*endptr != '\0') { warnx("%s: invalid partition number %s", __func__, optarg); error = 1; goto bailout; } break; case 'r': { scsi_nv_status status; int entry_num = 0; status = scsi_get_nv(sa_map, sizeof(sa_map) / sizeof(sa_map[0]), optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) read_service_action = sa_map[entry_num].value; else { warnx("%s: %s %s option %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", "service action", optarg); error = 1; goto bailout; } read_attr = 1; break; } case 's': start_attr = strtol(optarg, &endptr, 0); if (*endptr != '\0') { warnx("%s: invalid starting attr argument %s", __func__, optarg); error = 1; goto bailout; } break; case 'T': { scsi_nv_status status; int entry_num = 0; status = scsi_get_nv(elem_type_map, sizeof(elem_type_map) / sizeof(elem_type_map[0]), optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) element_type = elem_type_map[entry_num].value; else { warnx("%s: %s %s option %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", "element type", optarg); error = 1; goto bailout; } break; } case 'w': warnx("%s: writing attributes is not implemented yet", __func__); error = 1; goto bailout; break; case 'V': logical_volume = strtol(optarg, &endptr, 0); if (*endptr != '\0') { warnx("%s: invalid logical volume argument %s", __func__, optarg); error = 1; goto bailout; } break; default: break; } } /* * Default to reading attributes */ if (((read_attr == 0) && (write_attr == 0)) || ((read_attr != 0) && (write_attr != 0))) { warnx("%s: Must specify either -r or -w", __func__); error = 1; goto bailout; } if (read_attr != 0) { scsi_read_attribute(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ read_service_action, /*element*/ element_address, /*elem_type*/ element_type, /*logical_volume*/ logical_volume, /*partition*/ partition, /*first_attribute*/ start_attr, /*cache*/ cached_attr, /*data_ptr*/ data_buf, /*length*/ dxfer_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 60000); #if 0 } else { #endif } ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (err_recover != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending %s ATTRIBUTE", (read_attr != 0) ? "READ" : "WRITE"); if (verbosemode != 0) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (verbosemode != 0) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto bailout; } if (read_attr == 0) goto bailout; valid_len = dxfer_len - ccb->csio.resid; switch (read_service_action) { case SRA_SA_ATTR_VALUES: { uint32_t len_left, hdr_len, cur_len; struct scsi_read_attribute_values *hdr; struct scsi_mam_attribute_header *cur_id; char error_str[512]; uint8_t *cur_pos; struct sbuf *sb; hdr = (struct scsi_read_attribute_values *)data_buf; if (valid_len < sizeof(*hdr)) { fprintf(stdout, "No attributes returned.\n"); error = 0; goto bailout; } sb = sbuf_new_auto(); if (sb == NULL) { warn("%s: Unable to allocate sbuf", __func__); error = 1; goto bailout; } /* * XXX KDM grab more data if it is available. */ hdr_len = scsi_4btoul(hdr->length); for (len_left = MIN(valid_len, hdr_len), cur_pos = &hdr->attribute_0[0]; len_left > sizeof(*cur_id); len_left -= cur_len, cur_pos += cur_len) { int cur_attr_num; cur_id = (struct scsi_mam_attribute_header *)cur_pos; cur_len = scsi_2btoul(cur_id->length) + sizeof(*cur_id); cur_attr_num = scsi_2btoul(cur_id->id); if ((attr_num != -1) && (cur_attr_num != attr_num)) continue; error = scsi_attrib_sbuf(sb, cur_id, len_left, /*user_table*/ NULL, /*num_user_entries*/ 0, /*prefer_user_table*/ 0, output_format, error_str, sizeof(error_str)); if (error != 0) { warnx("%s: %s", __func__, error_str); sbuf_delete(sb); error = 1; goto bailout; } if (attr_num != -1) break; } sbuf_finish(sb); fprintf(stdout, "%s", sbuf_data(sb)); sbuf_delete(sb); break; } case SRA_SA_SUPPORTED_ATTRS: case SRA_SA_ATTR_LIST: { uint32_t len_left, hdr_len; struct scsi_attrib_list_header *hdr; struct scsi_attrib_table_entry *entry = NULL; const char *sa_name = "Supported Attributes"; const char *at_name = "Available Attributes"; int attr_id; uint8_t *cur_id; hdr = (struct scsi_attrib_list_header *)data_buf; if (valid_len < sizeof(*hdr)) { fprintf(stdout, "No %s\n", (read_service_action == SRA_SA_SUPPORTED_ATTRS)? sa_name : at_name); error = 0; goto bailout; } fprintf(stdout, "%s:\n", (read_service_action == SRA_SA_SUPPORTED_ATTRS) ? sa_name : at_name); hdr_len = scsi_4btoul(hdr->length); for (len_left = MIN(valid_len, hdr_len), cur_id = &hdr->first_attr_0[0]; len_left > 1; len_left -= sizeof(uint16_t), cur_id += sizeof(uint16_t)) { attr_id = scsi_2btoul(cur_id); if ((attr_num != -1) && (attr_id != attr_num)) continue; entry = scsi_get_attrib_entry(attr_id); fprintf(stdout, "0x%.4x", attr_id); if (entry == NULL) fprintf(stdout, "\n"); else fprintf(stdout, ": %s\n", entry->desc); if (attr_num != -1) break; } break; } case SRA_SA_PART_LIST: case SRA_SA_LOG_VOL_LIST: { struct scsi_attrib_lv_list *lv_list; const char *partition_name = "Partition"; const char *lv_name = "Logical Volume"; if (valid_len < sizeof(*lv_list)) { fprintf(stdout, "No %s list returned\n", (read_service_action == SRA_SA_PART_LIST) ? partition_name : lv_name); error = 0; goto bailout; } lv_list = (struct scsi_attrib_lv_list *)data_buf; fprintf(stdout, "First %s: %d\n", (read_service_action == SRA_SA_PART_LIST) ? partition_name : lv_name, lv_list->first_lv_number); fprintf(stdout, "Number of %ss: %d\n", (read_service_action == SRA_SA_PART_LIST) ? partition_name : lv_name, lv_list->num_logical_volumes); break; } default: break; } bailout: if (ccb != NULL) cam_freeccb(ccb); free(data_buf); return (error); } Index: head/sbin/camcontrol/camcontrol.c =================================================================== --- head/sbin/camcontrol/camcontrol.c (revision 300546) +++ head/sbin/camcontrol/camcontrol.c (revision 300547) @@ -1,9558 +1,9523 @@ /* * Copyright (c) 1997-2007 Kenneth D. Merry * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef MINIMALISTIC #include #include #endif #include #include #include #include #include #include #include #include #include #include #include "camcontrol.h" typedef enum { CAM_CMD_NONE = 0x00000000, CAM_CMD_DEVLIST = 0x00000001, CAM_CMD_TUR = 0x00000002, CAM_CMD_INQUIRY = 0x00000003, CAM_CMD_STARTSTOP = 0x00000004, CAM_CMD_RESCAN = 0x00000005, CAM_CMD_READ_DEFECTS = 0x00000006, CAM_CMD_MODE_PAGE = 0x00000007, CAM_CMD_SCSI_CMD = 0x00000008, CAM_CMD_DEVTREE = 0x00000009, CAM_CMD_USAGE = 0x0000000a, CAM_CMD_DEBUG = 0x0000000b, CAM_CMD_RESET = 0x0000000c, CAM_CMD_FORMAT = 0x0000000d, CAM_CMD_TAG = 0x0000000e, CAM_CMD_RATE = 0x0000000f, CAM_CMD_DETACH = 0x00000010, CAM_CMD_REPORTLUNS = 0x00000011, CAM_CMD_READCAP = 0x00000012, CAM_CMD_IDENTIFY = 0x00000013, CAM_CMD_IDLE = 0x00000014, CAM_CMD_STANDBY = 0x00000015, CAM_CMD_SLEEP = 0x00000016, CAM_CMD_SMP_CMD = 0x00000017, CAM_CMD_SMP_RG = 0x00000018, CAM_CMD_SMP_PC = 0x00000019, CAM_CMD_SMP_PHYLIST = 0x0000001a, CAM_CMD_SMP_MANINFO = 0x0000001b, CAM_CMD_DOWNLOAD_FW = 0x0000001c, CAM_CMD_SECURITY = 0x0000001d, CAM_CMD_HPA = 0x0000001e, CAM_CMD_SANITIZE = 0x0000001f, CAM_CMD_PERSIST = 0x00000020, CAM_CMD_APM = 0x00000021, CAM_CMD_AAM = 0x00000022, CAM_CMD_ATTRIB = 0x00000023, CAM_CMD_OPCODES = 0x00000024, CAM_CMD_REPROBE = 0x00000025, CAM_CMD_ZONE = 0x00000026, CAM_CMD_EPC = 0x00000027 } cam_cmdmask; typedef enum { CAM_ARG_NONE = 0x00000000, CAM_ARG_VERBOSE = 0x00000001, CAM_ARG_DEVICE = 0x00000002, CAM_ARG_BUS = 0x00000004, CAM_ARG_TARGET = 0x00000008, CAM_ARG_LUN = 0x00000010, CAM_ARG_EJECT = 0x00000020, CAM_ARG_UNIT = 0x00000040, CAM_ARG_FORMAT_BLOCK = 0x00000080, CAM_ARG_FORMAT_BFI = 0x00000100, CAM_ARG_FORMAT_PHYS = 0x00000200, CAM_ARG_PLIST = 0x00000400, CAM_ARG_GLIST = 0x00000800, CAM_ARG_GET_SERIAL = 0x00001000, CAM_ARG_GET_STDINQ = 0x00002000, CAM_ARG_GET_XFERRATE = 0x00004000, CAM_ARG_INQ_MASK = 0x00007000, CAM_ARG_MODE_EDIT = 0x00008000, CAM_ARG_PAGE_CNTL = 0x00010000, CAM_ARG_TIMEOUT = 0x00020000, CAM_ARG_CMD_IN = 0x00040000, CAM_ARG_CMD_OUT = 0x00080000, CAM_ARG_DBD = 0x00100000, CAM_ARG_ERR_RECOVER = 0x00200000, CAM_ARG_RETRIES = 0x00400000, CAM_ARG_START_UNIT = 0x00800000, CAM_ARG_DEBUG_INFO = 0x01000000, CAM_ARG_DEBUG_TRACE = 0x02000000, CAM_ARG_DEBUG_SUBTRACE = 0x04000000, CAM_ARG_DEBUG_CDB = 0x08000000, CAM_ARG_DEBUG_XPT = 0x10000000, CAM_ARG_DEBUG_PERIPH = 0x20000000, CAM_ARG_DEBUG_PROBE = 0x40000000, } cam_argmask; struct camcontrol_opts { const char *optname; uint32_t cmdnum; cam_argmask argnum; const char *subopt; }; #ifndef MINIMALISTIC struct ata_res_pass16 { u_int16_t reserved[5]; u_int8_t flags; u_int8_t error; u_int8_t sector_count_exp; u_int8_t sector_count; u_int8_t lba_low_exp; u_int8_t lba_low; u_int8_t lba_mid_exp; u_int8_t lba_mid; u_int8_t lba_high_exp; u_int8_t lba_high; u_int8_t device; u_int8_t status; }; struct ata_set_max_pwd { u_int16_t reserved1; u_int8_t password[32]; u_int16_t reserved2[239]; }; static const char scsicmd_opts[] = "a:c:dfi:o:r"; static const char readdefect_opts[] = "f:GPqsS:X"; static const char negotiate_opts[] = "acD:M:O:qR:T:UW:"; static const char smprg_opts[] = "l"; static const char smppc_opts[] = "a:A:d:lm:M:o:p:s:S:T:"; static const char smpphylist_opts[] = "lq"; static char pwd_opt; #endif static struct camcontrol_opts option_table[] = { #ifndef MINIMALISTIC {"tur", CAM_CMD_TUR, CAM_ARG_NONE, NULL}, {"inquiry", CAM_CMD_INQUIRY, CAM_ARG_NONE, "DSR"}, {"identify", CAM_CMD_IDENTIFY, CAM_ARG_NONE, NULL}, {"start", CAM_CMD_STARTSTOP, CAM_ARG_START_UNIT, NULL}, {"stop", CAM_CMD_STARTSTOP, CAM_ARG_NONE, NULL}, {"load", CAM_CMD_STARTSTOP, CAM_ARG_START_UNIT | CAM_ARG_EJECT, NULL}, {"eject", CAM_CMD_STARTSTOP, CAM_ARG_EJECT, NULL}, {"reportluns", CAM_CMD_REPORTLUNS, CAM_ARG_NONE, "clr:"}, {"readcapacity", CAM_CMD_READCAP, CAM_ARG_NONE, "bhHNqs"}, {"reprobe", CAM_CMD_REPROBE, CAM_ARG_NONE, NULL}, #endif /* MINIMALISTIC */ {"rescan", CAM_CMD_RESCAN, CAM_ARG_NONE, NULL}, {"reset", CAM_CMD_RESET, CAM_ARG_NONE, NULL}, #ifndef MINIMALISTIC {"cmd", CAM_CMD_SCSI_CMD, CAM_ARG_NONE, scsicmd_opts}, {"command", CAM_CMD_SCSI_CMD, CAM_ARG_NONE, scsicmd_opts}, {"smpcmd", CAM_CMD_SMP_CMD, CAM_ARG_NONE, "r:R:"}, {"smprg", CAM_CMD_SMP_RG, CAM_ARG_NONE, smprg_opts}, {"smpreportgeneral", CAM_CMD_SMP_RG, CAM_ARG_NONE, smprg_opts}, {"smppc", CAM_CMD_SMP_PC, CAM_ARG_NONE, smppc_opts}, {"smpphycontrol", CAM_CMD_SMP_PC, CAM_ARG_NONE, smppc_opts}, {"smpplist", CAM_CMD_SMP_PHYLIST, CAM_ARG_NONE, smpphylist_opts}, {"smpphylist", CAM_CMD_SMP_PHYLIST, CAM_ARG_NONE, smpphylist_opts}, {"smpmaninfo", CAM_CMD_SMP_MANINFO, CAM_ARG_NONE, "l"}, {"defects", CAM_CMD_READ_DEFECTS, CAM_ARG_NONE, readdefect_opts}, {"defectlist", CAM_CMD_READ_DEFECTS, CAM_ARG_NONE, readdefect_opts}, #endif /* MINIMALISTIC */ {"devlist", CAM_CMD_DEVTREE, CAM_ARG_NONE, "-b"}, #ifndef MINIMALISTIC {"periphlist", CAM_CMD_DEVLIST, CAM_ARG_NONE, NULL}, {"modepage", CAM_CMD_MODE_PAGE, CAM_ARG_NONE, "bdelm:P:"}, {"tags", CAM_CMD_TAG, CAM_ARG_NONE, "N:q"}, {"negotiate", CAM_CMD_RATE, CAM_ARG_NONE, negotiate_opts}, {"rate", CAM_CMD_RATE, CAM_ARG_NONE, negotiate_opts}, {"debug", CAM_CMD_DEBUG, CAM_ARG_NONE, "IPTSXcp"}, {"format", CAM_CMD_FORMAT, CAM_ARG_NONE, "qrwy"}, {"sanitize", CAM_CMD_SANITIZE, CAM_ARG_NONE, "a:c:IP:qrUwy"}, {"idle", CAM_CMD_IDLE, CAM_ARG_NONE, "t:"}, {"standby", CAM_CMD_STANDBY, CAM_ARG_NONE, "t:"}, {"sleep", CAM_CMD_SLEEP, CAM_ARG_NONE, ""}, {"apm", CAM_CMD_APM, CAM_ARG_NONE, "l:"}, {"aam", CAM_CMD_AAM, CAM_ARG_NONE, "l:"}, {"fwdownload", CAM_CMD_DOWNLOAD_FW, CAM_ARG_NONE, "f:qsy"}, {"security", CAM_CMD_SECURITY, CAM_ARG_NONE, "d:e:fh:k:l:qs:T:U:y"}, {"hpa", CAM_CMD_HPA, CAM_ARG_NONE, "Pflp:qs:U:y"}, {"persist", CAM_CMD_PERSIST, CAM_ARG_NONE, "ai:I:k:K:o:ps:ST:U"}, {"attrib", CAM_CMD_ATTRIB, CAM_ARG_NONE, "a:ce:F:p:r:s:T:w:V:"}, {"opcodes", CAM_CMD_OPCODES, CAM_ARG_NONE, "No:s:T"}, {"zone", CAM_CMD_ZONE, CAM_ARG_NONE, "ac:l:No:P:"}, {"epc", CAM_CMD_EPC, CAM_ARG_NONE, "c:dDeHp:Pr:sS:T:"}, #endif /* MINIMALISTIC */ {"help", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {"-?", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {"-h", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; struct cam_devitem { struct device_match_result dev_match; int num_periphs; struct periph_match_result *periph_matches; struct scsi_vpd_device_id *device_id; int device_id_len; STAILQ_ENTRY(cam_devitem) links; }; struct cam_devlist { STAILQ_HEAD(, cam_devitem) dev_queue; path_id_t path_id; }; static cam_cmdmask cmdlist; static cam_argmask arglist; camcontrol_optret getoption(struct camcontrol_opts *table, char *arg, uint32_t *cmdnum, cam_argmask *argnum, const char **subopt); #ifndef MINIMALISTIC static int getdevlist(struct cam_device *device); #endif /* MINIMALISTIC */ static int getdevtree(int argc, char **argv, char *combinedopt); #ifndef MINIMALISTIC static int testunitready(struct cam_device *device, int retry_count, int timeout, int quiet); static int scsistart(struct cam_device *device, int startstop, int loadeject, int retry_count, int timeout); static int scsiinquiry(struct cam_device *device, int retry_count, int timeout); static int scsiserial(struct cam_device *device, int retry_count, int timeout); #endif /* MINIMALISTIC */ static int parse_btl(char *tstr, path_id_t *bus, target_id_t *target, lun_id_t *lun, cam_argmask *arglst); static int dorescan_or_reset(int argc, char **argv, int rescan); static int rescan_or_reset_bus(path_id_t bus, int rescan); static int scanlun_or_reset_dev(path_id_t bus, target_id_t target, lun_id_t lun, int scan); #ifndef MINIMALISTIC static int readdefects(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static void modepage(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpcmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpreportgeneral(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpphycontrol(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpmaninfo(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int getdevid(struct cam_devitem *item); static int buildbusdevlist(struct cam_devlist *devlist); static void freebusdevlist(struct cam_devlist *devlist); static struct cam_devitem *findsasdevice(struct cam_devlist *devlist, uint64_t sasaddr); static int smpphylist(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int tagcontrol(struct cam_device *device, int argc, char **argv, char *combinedopt); static void cts_print(struct cam_device *device, struct ccb_trans_settings *cts); static void cpi_print(struct ccb_pathinq *cpi); static int get_cpi(struct cam_device *device, struct ccb_pathinq *cpi); static int get_cgd(struct cam_device *device, struct ccb_getdev *cgd); static int get_print_cts(struct cam_device *device, int user_settings, int quiet, struct ccb_trans_settings *cts); static int ratecontrol(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int scsiformat(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsisanitize(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsireportluns(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsireadcapacity(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int atapm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int atasecurity(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int atahpa(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int scsiprintoneopcode(struct cam_device *device, int req_opcode, int sa_set, int req_sa, uint8_t *buf, uint32_t valid_len); static int scsiprintopcodes(struct cam_device *device, int td_req, uint8_t *buf, uint32_t valid_len); static int scsiopcodes(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout, int verbose); static int scsireprobe(struct cam_device *device); #endif /* MINIMALISTIC */ #ifndef min #define min(a,b) (((a)<(b))?(a):(b)) #endif #ifndef max #define max(a,b) (((a)>(b))?(a):(b)) #endif camcontrol_optret getoption(struct camcontrol_opts *table, char *arg, uint32_t *cmdnum, cam_argmask *argnum, const char **subopt) { struct camcontrol_opts *opts; int num_matches = 0; for (opts = table; (opts != NULL) && (opts->optname != NULL); opts++) { if (strncmp(opts->optname, arg, strlen(arg)) == 0) { *cmdnum = opts->cmdnum; *argnum = opts->argnum; *subopt = opts->subopt; if (++num_matches > 1) return(CC_OR_AMBIGUOUS); } } if (num_matches > 0) return(CC_OR_FOUND); else return(CC_OR_NOT_FOUND); } #ifndef MINIMALISTIC static int getdevlist(struct cam_device *device) { union ccb *ccb; char status[32]; int error = 0; ccb = cam_getccb(device); ccb->ccb_h.func_code = XPT_GDEVLIST; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 1; ccb->cgdl.index = 0; ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { if (cam_send_ccb(device, ccb) < 0) { perror("error getting device list"); cam_freeccb(ccb); return(1); } status[0] = '\0'; switch (ccb->cgdl.status) { case CAM_GDEVLIST_MORE_DEVS: strcpy(status, "MORE"); break; case CAM_GDEVLIST_LAST_DEVICE: strcpy(status, "LAST"); break; case CAM_GDEVLIST_LIST_CHANGED: strcpy(status, "CHANGED"); break; case CAM_GDEVLIST_ERROR: strcpy(status, "ERROR"); error = 1; break; } fprintf(stdout, "%s%d: generation: %d index: %d status: %s\n", ccb->cgdl.periph_name, ccb->cgdl.unit_number, ccb->cgdl.generation, ccb->cgdl.index, status); /* * If the list has changed, we need to start over from the * beginning. */ if (ccb->cgdl.status == CAM_GDEVLIST_LIST_CHANGED) ccb->cgdl.index = 0; } cam_freeccb(ccb); return(error); } #endif /* MINIMALISTIC */ static int getdevtree(int argc, char **argv, char *combinedopt) { union ccb ccb; int bufsize, fd; unsigned int i; int need_close = 0; int error = 0; int skip_device = 0; int busonly = 0; int c; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'b': if ((arglist & CAM_ARG_VERBOSE) == 0) busonly = 1; break; default: break; } } if ((fd = open(XPT_DEVICE, O_RDWR)) == -1) { warn("couldn't open %s", XPT_DEVICE); return(1); } bzero(&ccb, sizeof(union ccb)); ccb.ccb_h.path_id = CAM_XPT_PATH_ID; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.ccb_h.func_code = XPT_DEV_MATCH; bufsize = sizeof(struct dev_match_result) * 100; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = (struct dev_match_result *)malloc(bufsize); if (ccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); close(fd); return(1); } ccb.cdm.num_matches = 0; /* * We fetch all nodes, since we display most of them in the default * case, and all in the verbose case. */ ccb.cdm.num_patterns = 0; ccb.cdm.pattern_buf_len = 0; /* * We do the ioctl multiple times if necessary, in case there are * more than 100 nodes in the EDT. */ do { if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("error sending CAMIOCOMMAND ioctl"); error = 1; break; } if ((ccb.ccb_h.status != CAM_REQ_CMP) || ((ccb.cdm.status != CAM_DEV_MATCH_LAST) && (ccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", ccb.ccb_h.status, ccb.cdm.status); error = 1; break; } for (i = 0; i < ccb.cdm.num_matches; i++) { switch (ccb.cdm.matches[i].type) { case DEV_MATCH_BUS: { struct bus_match_result *bus_result; /* * Only print the bus information if the * user turns on the verbose flag. */ if ((busonly == 0) && (arglist & CAM_ARG_VERBOSE) == 0) break; bus_result = &ccb.cdm.matches[i].result.bus_result; if (need_close) { fprintf(stdout, ")\n"); need_close = 0; } fprintf(stdout, "scbus%d on %s%d bus %d%s\n", bus_result->path_id, bus_result->dev_name, bus_result->unit_number, bus_result->bus_id, (busonly ? "" : ":")); break; } case DEV_MATCH_DEVICE: { struct device_match_result *dev_result; char vendor[16], product[48], revision[16]; char fw[5], tmpstr[256]; if (busonly == 1) break; dev_result = &ccb.cdm.matches[i].result.device_result; if ((dev_result->flags & DEV_RESULT_UNCONFIGURED) && ((arglist & CAM_ARG_VERBOSE) == 0)) { skip_device = 1; break; } else skip_device = 0; if (dev_result->protocol == PROTO_SCSI) { cam_strvis(vendor, dev_result->inq_data.vendor, sizeof(dev_result->inq_data.vendor), sizeof(vendor)); cam_strvis(product, dev_result->inq_data.product, sizeof(dev_result->inq_data.product), sizeof(product)); cam_strvis(revision, dev_result->inq_data.revision, sizeof(dev_result->inq_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s %s>", vendor, product, revision); } else if (dev_result->protocol == PROTO_ATA || dev_result->protocol == PROTO_SATAPM) { cam_strvis(product, dev_result->ident_data.model, sizeof(dev_result->ident_data.model), sizeof(product)); cam_strvis(revision, dev_result->ident_data.revision, sizeof(dev_result->ident_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s>", product, revision); } else if (dev_result->protocol == PROTO_SEMB) { struct sep_identify_data *sid; sid = (struct sep_identify_data *) &dev_result->ident_data; cam_strvis(vendor, sid->vendor_id, sizeof(sid->vendor_id), sizeof(vendor)); cam_strvis(product, sid->product_id, sizeof(sid->product_id), sizeof(product)); cam_strvis(revision, sid->product_rev, sizeof(sid->product_rev), sizeof(revision)); cam_strvis(fw, sid->firmware_rev, sizeof(sid->firmware_rev), sizeof(fw)); sprintf(tmpstr, "<%s %s %s %s>", vendor, product, revision, fw); } else { sprintf(tmpstr, "<>"); } if (need_close) { fprintf(stdout, ")\n"); need_close = 0; } fprintf(stdout, "%-33s at scbus%d " "target %d lun %jx (", tmpstr, dev_result->path_id, dev_result->target_id, (uintmax_t)dev_result->target_lun); need_close = 1; break; } case DEV_MATCH_PERIPH: { struct periph_match_result *periph_result; periph_result = &ccb.cdm.matches[i].result.periph_result; if (busonly || skip_device != 0) break; if (need_close > 1) fprintf(stdout, ","); fprintf(stdout, "%s%d", periph_result->periph_name, periph_result->unit_number); need_close++; break; } default: fprintf(stdout, "unknown match type\n"); break; } } } while ((ccb.ccb_h.status == CAM_REQ_CMP) && (ccb.cdm.status == CAM_DEV_MATCH_MORE)); if (need_close) fprintf(stdout, ")\n"); close(fd); return(error); } #ifndef MINIMALISTIC static int testunitready(struct cam_device *device, int retry_count, int timeout, int quiet) { int error = 0; union ccb *ccb; ccb = cam_getccb(device); scsi_test_unit_ready(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { if (quiet == 0) perror("error sending test unit ready"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { if (quiet == 0) fprintf(stdout, "Unit is ready\n"); } else { if (quiet == 0) fprintf(stdout, "Unit is not ready\n"); error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); return(error); } static int scsistart(struct cam_device *device, int startstop, int loadeject, int retry_count, int timeout) { union ccb *ccb; int error = 0; ccb = cam_getccb(device); /* * If we're stopping, send an ordered tag so the drive in question * will finish any previously queued writes before stopping. If * the device isn't capable of tagged queueing, or if tagged * queueing is turned off, the tag action is a no-op. */ scsi_start_stop(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ startstop ? MSG_SIMPLE_Q_TAG : MSG_ORDERED_Q_TAG, /* start/stop */ startstop, /* load_eject */ loadeject, /* immediate */ 0, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 120000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { perror("error sending start unit"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) if (startstop) { fprintf(stdout, "Unit started successfully"); if (loadeject) fprintf(stdout,", Media loaded\n"); else fprintf(stdout,"\n"); } else { fprintf(stdout, "Unit stopped successfully"); if (loadeject) fprintf(stdout, ", Media ejected\n"); else fprintf(stdout, "\n"); } else { error = 1; if (startstop) fprintf(stdout, "Error received from start unit command\n"); else fprintf(stdout, "Error received from stop unit command\n"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); return(error); } int scsidoinquiry(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c; int error = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'D': arglist |= CAM_ARG_GET_STDINQ; break; case 'R': arglist |= CAM_ARG_GET_XFERRATE; break; case 'S': arglist |= CAM_ARG_GET_SERIAL; break; default: break; } } /* * If the user didn't specify any inquiry options, he wants all of * them. */ if ((arglist & CAM_ARG_INQ_MASK) == 0) arglist |= CAM_ARG_INQ_MASK; if (arglist & CAM_ARG_GET_STDINQ) error = scsiinquiry(device, retry_count, timeout); if (error != 0) return(error); if (arglist & CAM_ARG_GET_SERIAL) scsiserial(device, retry_count, timeout); if (arglist & CAM_ARG_GET_XFERRATE) error = camxferrate(device); return(error); } static int scsiinquiry(struct cam_device *device, int retry_count, int timeout) { union ccb *ccb; struct scsi_inquiry_data *inq_buf; int error = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return(1); } /* cam_getccb cleans up the header, caller has to zero the payload */ - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); inq_buf = (struct scsi_inquiry_data *)malloc( sizeof(struct scsi_inquiry_data)); if (inq_buf == NULL) { cam_freeccb(ccb); warnx("can't malloc memory for inquiry\n"); return(1); } bzero(inq_buf, sizeof(*inq_buf)); /* * Note that although the size of the inquiry buffer is the full * 256 bytes specified in the SCSI spec, we only tell the device * that we have allocated SHORT_INQUIRY_LENGTH bytes. There are * two reasons for this: * * - The SCSI spec says that when a length field is only 1 byte, * a value of 0 will be interpreted as 256. Therefore * scsi_inquiry() will convert an inq_len (which is passed in as * a u_int32_t, but the field in the CDB is only 1 byte) of 256 * to 0. Evidently, very few devices meet the spec in that * regard. Some devices, like many Seagate disks, take the 0 as * 0, and don't return any data. One Pioneer DVD-R drive * returns more data than the command asked for. * * So, since there are numerous devices that just don't work * right with the full inquiry size, we don't send the full size. * * - The second reason not to use the full inquiry data length is * that we don't need it here. The only reason we issue a * standard inquiry is to get the vendor name, device name, * and revision so scsi_print_inquiry() can print them. * * If, at some point in the future, more inquiry data is needed for * some reason, this code should use a procedure similar to the * probe code. i.e., issue a short inquiry, and determine from * the additional length passed back from the device how much * inquiry data the device supports. Once the amount the device * supports is determined, issue an inquiry for that amount and no * more. * * KDM, 2/18/2000 */ scsi_inquiry(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)inq_buf, /* inq_len */ SHORT_INQUIRY_LENGTH, /* evpd */ 0, /* page_code */ 0, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { perror("error sending SCSI inquiry"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); if (error != 0) { free(inq_buf); return(error); } fprintf(stdout, "%s%d: ", device->device_name, device->dev_unit_num); scsi_print_inquiry(inq_buf); free(inq_buf); return(0); } static int scsiserial(struct cam_device *device, int retry_count, int timeout) { union ccb *ccb; struct scsi_vpd_unit_serial_number *serial_buf; char serial_num[SVPD_SERIAL_NUM_SIZE + 1]; int error = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return(1); } /* cam_getccb cleans up the header, caller has to zero the payload */ - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); serial_buf = (struct scsi_vpd_unit_serial_number *) malloc(sizeof(*serial_buf)); if (serial_buf == NULL) { cam_freeccb(ccb); warnx("can't malloc memory for serial number"); return(1); } scsi_inquiry(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)serial_buf, /* inq_len */ sizeof(*serial_buf), /* evpd */ 1, /* page_code */ SVPD_UNIT_SERIAL_NUMBER, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error getting serial number"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); free(serial_buf); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); if (error != 0) { free(serial_buf); return(error); } bcopy(serial_buf->serial_num, serial_num, serial_buf->length); serial_num[serial_buf->length] = '\0'; if ((arglist & CAM_ARG_GET_STDINQ) || (arglist & CAM_ARG_GET_XFERRATE)) fprintf(stdout, "%s%d: Serial Number ", device->device_name, device->dev_unit_num); fprintf(stdout, "%.60s\n", serial_num); free(serial_buf); return(0); } int camxferrate(struct cam_device *device) { struct ccb_pathinq cpi; u_int32_t freq = 0; u_int32_t speed = 0; union ccb *ccb; u_int mb; int retval = 0; if ((retval = get_cpi(device, &cpi)) != 0) return (1); ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return(1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_trans_settings) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cts); ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; ccb->cts.type = CTS_TYPE_CURRENT_SETTINGS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char error_string[] = "error getting transfer settings"; if (retval < 0) warn(error_string); else warnx(error_string); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto xferrate_bailout; } speed = cpi.base_transfer_speed; freq = 0; if (ccb->cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &ccb->cts.xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { freq = scsi_calc_syncsrate(spi->sync_period); speed = freq; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { speed *= (0x01 << spi->bus_width); } } else if (ccb->cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &ccb->cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_SPEED) speed = fc->bitrate; } else if (ccb->cts.transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &ccb->cts.xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) speed = sas->bitrate; } else if (ccb->cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &ccb->cts.xport_specific.ata; if (pata->valid & CTS_ATA_VALID_MODE) speed = ata_mode2speed(pata->mode); } else if (ccb->cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &ccb->cts.xport_specific.sata; if (sata->valid & CTS_SATA_VALID_REVISION) speed = ata_revision2speed(sata->revision); } mb = speed / 1000; if (mb > 0) { fprintf(stdout, "%s%d: %d.%03dMB/s transfers", device->device_name, device->dev_unit_num, mb, speed % 1000); } else { fprintf(stdout, "%s%d: %dKB/s transfers", device->device_name, device->dev_unit_num, speed); } if (ccb->cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &ccb->cts.xport_specific.spi; if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) fprintf(stdout, " (%d.%03dMHz, offset %d", freq / 1000, freq % 1000, spi->sync_offset); if (((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) && (spi->bus_width > 0)) { if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) { fprintf(stdout, ", "); } else { fprintf(stdout, " ("); } fprintf(stdout, "%dbit)", 8 * (0x01 << spi->bus_width)); } else if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) { fprintf(stdout, ")"); } } else if (ccb->cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &ccb->cts.xport_specific.ata; printf(" ("); if (pata->valid & CTS_ATA_VALID_MODE) printf("%s, ", ata_mode2string(pata->mode)); if ((pata->valid & CTS_ATA_VALID_ATAPI) && pata->atapi != 0) printf("ATAPI %dbytes, ", pata->atapi); if (pata->valid & CTS_ATA_VALID_BYTECOUNT) printf("PIO %dbytes", pata->bytecount); printf(")"); } else if (ccb->cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &ccb->cts.xport_specific.sata; printf(" ("); if (sata->valid & CTS_SATA_VALID_REVISION) printf("SATA %d.x, ", sata->revision); else printf("SATA, "); if (sata->valid & CTS_SATA_VALID_MODE) printf("%s, ", ata_mode2string(sata->mode)); if ((sata->valid & CTS_SATA_VALID_ATAPI) && sata->atapi != 0) printf("ATAPI %dbytes, ", sata->atapi); if (sata->valid & CTS_SATA_VALID_BYTECOUNT) printf("PIO %dbytes", sata->bytecount); printf(")"); } if (ccb->cts.protocol == PROTO_SCSI) { struct ccb_trans_settings_scsi *scsi = &ccb->cts.proto_specific.scsi; if (scsi->valid & CTS_SCSI_VALID_TQ) { if (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) { fprintf(stdout, ", Command Queueing Enabled"); } } } fprintf(stdout, "\n"); xferrate_bailout: cam_freeccb(ccb); return(retval); } static void atahpa_print(struct ata_params *parm, u_int64_t hpasize, int header) { u_int32_t lbasize = (u_int32_t)parm->lba_size_1 | ((u_int32_t)parm->lba_size_2 << 16); u_int64_t lbasize48 = ((u_int64_t)parm->lba_size48_1) | ((u_int64_t)parm->lba_size48_2 << 16) | ((u_int64_t)parm->lba_size48_3 << 32) | ((u_int64_t)parm->lba_size48_4 << 48); if (header) { printf("\nFeature " "Support Enabled Value\n"); } printf("Host Protected Area (HPA) "); if (parm->support.command1 & ATA_SUPPORT_PROTECTED) { u_int64_t lba = lbasize48 ? lbasize48 : lbasize; printf("yes %s %ju/%ju\n", (hpasize > lba) ? "yes" : "no ", lba, hpasize); printf("HPA - Security "); if (parm->support.command1 & ATA_SUPPORT_MAXSECURITY) printf("yes\n"); else printf("no\n"); } else { printf("no\n"); } } static int atasata(struct ata_params *parm) { if (parm->satacapabilities != 0xffff && parm->satacapabilities != 0x0000) return 1; return 0; } static void atacapprint(struct ata_params *parm) { u_int32_t lbasize = (u_int32_t)parm->lba_size_1 | ((u_int32_t)parm->lba_size_2 << 16); u_int64_t lbasize48 = ((u_int64_t)parm->lba_size48_1) | ((u_int64_t)parm->lba_size48_2 << 16) | ((u_int64_t)parm->lba_size48_3 << 32) | ((u_int64_t)parm->lba_size48_4 << 48); printf("\n"); printf("protocol "); printf("ATA/ATAPI-%d", ata_version(parm->version_major)); if (parm->satacapabilities && parm->satacapabilities != 0xffff) { if (parm->satacapabilities & ATA_SATA_GEN3) printf(" SATA 3.x\n"); else if (parm->satacapabilities & ATA_SATA_GEN2) printf(" SATA 2.x\n"); else if (parm->satacapabilities & ATA_SATA_GEN1) printf(" SATA 1.x\n"); else printf(" SATA\n"); } else printf("\n"); printf("device model %.40s\n", parm->model); printf("firmware revision %.8s\n", parm->revision); printf("serial number %.20s\n", parm->serial); if (parm->enabled.extension & ATA_SUPPORT_64BITWWN) { printf("WWN %04x%04x%04x%04x\n", parm->wwn[0], parm->wwn[1], parm->wwn[2], parm->wwn[3]); } if (parm->enabled.extension & ATA_SUPPORT_MEDIASN) { printf("media serial number %.30s\n", parm->media_serial); } printf("cylinders %d\n", parm->cylinders); printf("heads %d\n", parm->heads); printf("sectors/track %d\n", parm->sectors); printf("sector size logical %u, physical %lu, offset %lu\n", ata_logical_sector_size(parm), (unsigned long)ata_physical_sector_size(parm), (unsigned long)ata_logical_sector_offset(parm)); if (parm->config == ATA_PROTO_CFA || (parm->support.command2 & ATA_SUPPORT_CFA)) printf("CFA supported\n"); printf("LBA%ssupported ", parm->capabilities1 & ATA_SUPPORT_LBA ? " " : " not "); if (lbasize) printf("%d sectors\n", lbasize); else printf("\n"); printf("LBA48%ssupported ", parm->support.command2 & ATA_SUPPORT_ADDRESS48 ? " " : " not "); if (lbasize48) printf("%ju sectors\n", (uintmax_t)lbasize48); else printf("\n"); printf("PIO supported PIO"); switch (ata_max_pmode(parm)) { case ATA_PIO4: printf("4"); break; case ATA_PIO3: printf("3"); break; case ATA_PIO2: printf("2"); break; case ATA_PIO1: printf("1"); break; default: printf("0"); } if ((parm->capabilities1 & ATA_SUPPORT_IORDY) == 0) printf(" w/o IORDY"); printf("\n"); printf("DMA%ssupported ", parm->capabilities1 & ATA_SUPPORT_DMA ? " " : " not "); if (parm->capabilities1 & ATA_SUPPORT_DMA) { if (parm->mwdmamodes & 0xff) { printf("WDMA"); if (parm->mwdmamodes & 0x04) printf("2"); else if (parm->mwdmamodes & 0x02) printf("1"); else if (parm->mwdmamodes & 0x01) printf("0"); printf(" "); } if ((parm->atavalid & ATA_FLAG_88) && (parm->udmamodes & 0xff)) { printf("UDMA"); if (parm->udmamodes & 0x40) printf("6"); else if (parm->udmamodes & 0x20) printf("5"); else if (parm->udmamodes & 0x10) printf("4"); else if (parm->udmamodes & 0x08) printf("3"); else if (parm->udmamodes & 0x04) printf("2"); else if (parm->udmamodes & 0x02) printf("1"); else if (parm->udmamodes & 0x01) printf("0"); printf(" "); } } printf("\n"); if (parm->media_rotation_rate == 1) { printf("media RPM non-rotating\n"); } else if (parm->media_rotation_rate >= 0x0401 && parm->media_rotation_rate <= 0xFFFE) { printf("media RPM %d\n", parm->media_rotation_rate); } printf("\nFeature " "Support Enabled Value Vendor\n"); printf("read ahead %s %s\n", parm->support.command1 & ATA_SUPPORT_LOOKAHEAD ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_LOOKAHEAD ? "yes" : "no"); printf("write cache %s %s\n", parm->support.command1 & ATA_SUPPORT_WRITECACHE ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_WRITECACHE ? "yes" : "no"); printf("flush cache %s %s\n", parm->support.command2 & ATA_SUPPORT_FLUSHCACHE ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_FLUSHCACHE ? "yes" : "no"); printf("overlap %s\n", parm->capabilities1 & ATA_SUPPORT_OVERLAP ? "yes" : "no"); printf("Tagged Command Queuing (TCQ) %s %s", parm->support.command2 & ATA_SUPPORT_QUEUED ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_QUEUED ? "yes" : "no"); if (parm->support.command2 & ATA_SUPPORT_QUEUED) { printf(" %d tags\n", ATA_QUEUE_LEN(parm->queue) + 1); } else printf("\n"); printf("Native Command Queuing (NCQ) "); if (parm->satacapabilities != 0xffff && (parm->satacapabilities & ATA_SUPPORT_NCQ)) { printf("yes %d tags\n", ATA_QUEUE_LEN(parm->queue) + 1); } else printf("no\n"); printf("NCQ Queue Management %s\n", atasata(parm) && parm->satacapabilities2 & ATA_SUPPORT_NCQ_QMANAGEMENT ? "yes" : "no"); printf("NCQ Streaming %s\n", atasata(parm) && parm->satacapabilities2 & ATA_SUPPORT_NCQ_STREAM ? "yes" : "no"); printf("Receive & Send FPDMA Queued %s\n", atasata(parm) && parm->satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED ? "yes" : "no"); printf("SMART %s %s\n", parm->support.command1 & ATA_SUPPORT_SMART ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_SMART ? "yes" : "no"); printf("microcode download %s %s\n", parm->support.command2 & ATA_SUPPORT_MICROCODE ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_MICROCODE ? "yes" : "no"); printf("security %s %s\n", parm->support.command1 & ATA_SUPPORT_SECURITY ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_SECURITY ? "yes" : "no"); printf("power management %s %s\n", parm->support.command1 & ATA_SUPPORT_POWERMGT ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_POWERMGT ? "yes" : "no"); printf("advanced power management %s %s", parm->support.command2 & ATA_SUPPORT_APM ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_APM ? "yes" : "no"); if (parm->support.command2 & ATA_SUPPORT_APM) { printf(" %d/0x%02X\n", parm->apm_value & 0xff, parm->apm_value & 0xff); } else printf("\n"); printf("automatic acoustic management %s %s", parm->support.command2 & ATA_SUPPORT_AUTOACOUSTIC ? "yes" :"no", parm->enabled.command2 & ATA_SUPPORT_AUTOACOUSTIC ? "yes" :"no"); if (parm->support.command2 & ATA_SUPPORT_AUTOACOUSTIC) { printf(" %d/0x%02X %d/0x%02X\n", ATA_ACOUSTIC_CURRENT(parm->acoustic), ATA_ACOUSTIC_CURRENT(parm->acoustic), ATA_ACOUSTIC_VENDOR(parm->acoustic), ATA_ACOUSTIC_VENDOR(parm->acoustic)); } else printf("\n"); printf("media status notification %s %s\n", parm->support.command2 & ATA_SUPPORT_NOTIFY ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_NOTIFY ? "yes" : "no"); printf("power-up in Standby %s %s\n", parm->support.command2 & ATA_SUPPORT_STANDBY ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_STANDBY ? "yes" : "no"); printf("write-read-verify %s %s", parm->support2 & ATA_SUPPORT_WRITEREADVERIFY ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_WRITEREADVERIFY ? "yes" : "no"); if (parm->support2 & ATA_SUPPORT_WRITEREADVERIFY) { printf(" %d/0x%x\n", parm->wrv_mode, parm->wrv_mode); } else printf("\n"); printf("unload %s %s\n", parm->support.extension & ATA_SUPPORT_UNLOAD ? "yes" : "no", parm->enabled.extension & ATA_SUPPORT_UNLOAD ? "yes" : "no"); printf("general purpose logging %s %s\n", parm->support.extension & ATA_SUPPORT_GENLOG ? "yes" : "no", parm->enabled.extension & ATA_SUPPORT_GENLOG ? "yes" : "no"); printf("free-fall %s %s\n", parm->support2 & ATA_SUPPORT_FREEFALL ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_FREEFALL ? "yes" : "no"); printf("Data Set Management (DSM/TRIM) "); if (parm->support_dsm & ATA_SUPPORT_DSM_TRIM) { printf("yes\n"); printf("DSM - max 512byte blocks "); if (parm->max_dsm_blocks == 0x00) printf("yes not specified\n"); else printf("yes %d\n", parm->max_dsm_blocks); printf("DSM - deterministic read "); if (parm->support3 & ATA_SUPPORT_DRAT) { if (parm->support3 & ATA_SUPPORT_RZAT) printf("yes zeroed\n"); else printf("yes any value\n"); } else { printf("no\n"); } } else { printf("no\n"); } } static int scsi_cam_pass_16_send(struct cam_device *device, union ccb *ccb, int quiet) { struct ata_pass_16 *ata_pass_16; struct ata_cmd ata_cmd; ata_pass_16 = (struct ata_pass_16 *)ccb->csio.cdb_io.cdb_bytes; ata_cmd.command = ata_pass_16->command; ata_cmd.control = ata_pass_16->control; ata_cmd.features = ata_pass_16->features; if (arglist & CAM_ARG_VERBOSE) { warnx("sending ATA %s via pass_16 with timeout of %u msecs", ata_op_string(&ata_cmd), ccb->csio.ccb_h.timeout); } /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warn("error sending ATA %s via pass_16", ata_op_string(&ata_cmd)); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } if (!(ata_pass_16->flags & AP_FLAG_CHK_COND) && (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warnx("ATA %s via pass_16 failed", ata_op_string(&ata_cmd)); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } return (0); } static int ata_cam_send(struct cam_device *device, union ccb *ccb, int quiet) { if (arglist & CAM_ARG_VERBOSE) { warnx("sending ATA %s with timeout of %u msecs", ata_op_string(&(ccb->ataio.cmd)), ccb->ataio.ccb_h.timeout); } /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warn("error sending ATA %s", ata_op_string(&(ccb->ataio.cmd))); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warnx("ATA %s failed: %d", ata_op_string(&(ccb->ataio.cmd)), quiet); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } return (0); } static int ata_do_pass_16(struct cam_device *device, union ccb *ccb, int retries, u_int32_t flags, u_int8_t protocol, u_int8_t ata_flags, u_int8_t tag_action, u_int8_t command, u_int8_t features, u_int64_t lba, u_int8_t sector_count, u_int8_t *data_ptr, u_int16_t dxfer_len, int timeout, int quiet) { if (data_ptr != NULL) { ata_flags |= AP_FLAG_BYT_BLOK_BYTES | AP_FLAG_TLEN_SECT_CNT; if (flags & CAM_DIR_OUT) ata_flags |= AP_FLAG_TDIR_TO_DEV; else ata_flags |= AP_FLAG_TDIR_FROM_DEV; } else { ata_flags |= AP_FLAG_TLEN_NO_DATA; } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); scsi_ata_pass_16(&ccb->csio, retries, NULL, flags, tag_action, protocol, ata_flags, features, sector_count, lba, command, /*control*/0, data_ptr, dxfer_len, /*sense_len*/SSD_FULL_SIZE, timeout); return scsi_cam_pass_16_send(device, ccb, quiet); } static int ata_try_pass_16(struct cam_device *device) { struct ccb_pathinq cpi; if (get_cpi(device, &cpi) != 0) { warnx("couldn't get CPI"); return (-1); } if (cpi.protocol == PROTO_SCSI) { /* possibly compatible with pass_16 */ return (1); } /* likely not compatible with pass_16 */ return (0); } static int ata_do_28bit_cmd(struct cam_device *device, union ccb *ccb, int retries, u_int32_t flags, u_int8_t protocol, u_int8_t tag_action, u_int8_t command, u_int8_t features, u_int32_t lba, u_int8_t sector_count, u_int8_t *data_ptr, u_int16_t dxfer_len, int timeout, int quiet) { switch (ata_try_pass_16(device)) { case -1: return (1); case 1: /* Try using SCSI Passthrough */ return ata_do_pass_16(device, ccb, retries, flags, protocol, 0, tag_action, command, features, lba, sector_count, data_ptr, dxfer_len, timeout, quiet); } - bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_ataio) - - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->ataio); cam_fill_ataio(&ccb->ataio, retries, NULL, flags, tag_action, data_ptr, dxfer_len, timeout); ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); return ata_cam_send(device, ccb, quiet); } static int ata_do_cmd(struct cam_device *device, union ccb *ccb, int retries, u_int32_t flags, u_int8_t protocol, u_int8_t ata_flags, u_int8_t tag_action, u_int8_t command, u_int8_t features, u_int64_t lba, u_int8_t sector_count, u_int8_t *data_ptr, u_int16_t dxfer_len, int timeout, int force48bit) { int retval; retval = ata_try_pass_16(device); if (retval == -1) return (1); if (retval == 1) { int error; /* Try using SCSI Passthrough */ error = ata_do_pass_16(device, ccb, retries, flags, protocol, ata_flags, tag_action, command, features, lba, sector_count, data_ptr, dxfer_len, timeout, 0); if (ata_flags & AP_FLAG_CHK_COND) { /* Decode ata_res from sense data */ struct ata_res_pass16 *res_pass16; struct ata_res *res; u_int i; u_int16_t *ptr; /* sense_data is 4 byte aligned */ ptr = (uint16_t*)(uintptr_t)&ccb->csio.sense_data; for (i = 0; i < sizeof(*res_pass16) / 2; i++) ptr[i] = le16toh(ptr[i]); /* sense_data is 4 byte aligned */ res_pass16 = (struct ata_res_pass16 *)(uintptr_t) &ccb->csio.sense_data; res = &ccb->ataio.res; res->flags = res_pass16->flags; res->status = res_pass16->status; res->error = res_pass16->error; res->lba_low = res_pass16->lba_low; res->lba_mid = res_pass16->lba_mid; res->lba_high = res_pass16->lba_high; res->device = res_pass16->device; res->lba_low_exp = res_pass16->lba_low_exp; res->lba_mid_exp = res_pass16->lba_mid_exp; res->lba_high_exp = res_pass16->lba_high_exp; res->sector_count = res_pass16->sector_count; res->sector_count_exp = res_pass16->sector_count_exp; } return (error); } - bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_ataio) - - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->ataio); cam_fill_ataio(&ccb->ataio, retries, NULL, flags, tag_action, data_ptr, dxfer_len, timeout); if (force48bit || lba > ATA_MAX_28BIT_LBA) ata_48bit_cmd(&ccb->ataio, command, features, lba, sector_count); else ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); if (ata_flags & AP_FLAG_CHK_COND) ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; return ata_cam_send(device, ccb, 0); } static void dump_data(uint16_t *ptr, uint32_t len) { u_int i; for (i = 0; i < len / 2; i++) { if ((i % 8) == 0) printf(" %3d: ", i); printf("%04hx ", ptr[i]); if ((i % 8) == 7) printf("\n"); } if ((i % 8) != 7) printf("\n"); } static int atahpa_proc_resp(struct cam_device *device, union ccb *ccb, int is48bit, u_int64_t *hpasize) { struct ata_res *res; res = &ccb->ataio.res; if (res->status & ATA_STATUS_ERROR) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); printf("error = 0x%02x, sector_count = 0x%04x, " "device = 0x%02x, status = 0x%02x\n", res->error, res->sector_count, res->device, res->status); } if (res->error & ATA_ERROR_ID_NOT_FOUND) { warnx("Max address has already been set since " "last power-on or hardware reset"); } return (1); } if (arglist & CAM_ARG_VERBOSE) { fprintf(stdout, "%s%d: Raw native max data:\n", device->device_name, device->dev_unit_num); /* res is 4 byte aligned */ dump_data((uint16_t*)(uintptr_t)res, sizeof(struct ata_res)); printf("error = 0x%02x, sector_count = 0x%04x, device = 0x%02x, " "status = 0x%02x\n", res->error, res->sector_count, res->device, res->status); } if (hpasize != NULL) { if (is48bit) { *hpasize = (((u_int64_t)((res->lba_high_exp << 16) | (res->lba_mid_exp << 8) | res->lba_low_exp) << 24) | ((res->lba_high << 16) | (res->lba_mid << 8) | res->lba_low)) + 1; } else { *hpasize = (((res->device & 0x0f) << 24) | (res->lba_high << 16) | (res->lba_mid << 8) | res->lba_low) + 1; } } return (0); } static int ata_read_native_max(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, struct ata_params *parm, u_int64_t *hpasize) { int error; u_int cmd, is48bit; u_int8_t protocol; is48bit = parm->support.command2 & ATA_SUPPORT_ADDRESS48; protocol = AP_PROTO_NON_DATA; if (is48bit) { cmd = ATA_READ_NATIVE_MAX_ADDRESS48; protocol |= AP_EXTEND; } else { cmd = ATA_READ_NATIVE_MAX_ADDRESS; } error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, hpasize); } static int atahpa_set_max(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit, u_int64_t maxsize, int persist) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_NON_DATA; if (is48bit) { cmd = ATA_SET_MAX_ADDRESS48; protocol |= AP_EXTEND; } else { cmd = ATA_SET_MAX_ADDRESS; } /* lba's are zero indexed so the max lba is requested max - 1 */ if (maxsize) maxsize--; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_MAX_ADDR, /*lba*/maxsize, /*sector_count*/persist, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_password(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit, struct ata_set_max_pwd *pwd) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_PIO_OUT; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_SET_PWD, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t*)pwd, /*dxfer_len*/sizeof(struct ata_set_max_pwd), timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_lock(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_NON_DATA; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_LOCK, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_unlock(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit, struct ata_set_max_pwd *pwd) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_PIO_OUT; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_UNLOCK, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t*)pwd, /*dxfer_len*/sizeof(struct ata_set_max_pwd), timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_freeze_lock(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_NON_DATA; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_FREEZE, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } int ata_do_identify(struct cam_device *device, int retry_count, int timeout, union ccb *ccb, struct ata_params** ident_bufp) { struct ata_params *ident_buf; struct ccb_pathinq cpi; struct ccb_getdev cgd; u_int i, error; int16_t *ptr; u_int8_t command, retry_command; if (get_cpi(device, &cpi) != 0) { warnx("couldn't get CPI"); return (-1); } /* Neither PROTO_ATAPI or PROTO_SATAPM are used in cpi.protocol */ if (cpi.protocol == PROTO_ATA) { if (get_cgd(device, &cgd) != 0) { warnx("couldn't get CGD"); return (-1); } command = (cgd.protocol == PROTO_ATA) ? ATA_ATA_IDENTIFY : ATA_ATAPI_IDENTIFY; retry_command = 0; } else { /* We don't know which for sure so try both */ command = ATA_ATA_IDENTIFY; retry_command = ATA_ATAPI_IDENTIFY; } ptr = (uint16_t *)calloc(1, sizeof(struct ata_params)); if (ptr == NULL) { warnx("can't calloc memory for identify\n"); return (1); } error = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_IN, /*protocol*/AP_PROTO_PIO_IN, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/command, /*features*/0, /*lba*/0, /*sector_count*/(u_int8_t)sizeof(struct ata_params), /*data_ptr*/(u_int8_t *)ptr, /*dxfer_len*/sizeof(struct ata_params), /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/1); if (error != 0) { if (retry_command == 0) { free(ptr); return (1); } error = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_IN, /*protocol*/AP_PROTO_PIO_IN, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/retry_command, /*features*/0, /*lba*/0, /*sector_count*/(u_int8_t) sizeof(struct ata_params), /*data_ptr*/(u_int8_t *)ptr, /*dxfer_len*/sizeof(struct ata_params), /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/0); if (error != 0) { free(ptr); return (1); } } error = 1; for (i = 0; i < sizeof(struct ata_params) / 2; i++) { ptr[i] = le16toh(ptr[i]); if (ptr[i] != 0) error = 0; } if (arglist & CAM_ARG_VERBOSE) { fprintf(stdout, "%s%d: Raw identify data:\n", device->device_name, device->dev_unit_num); dump_data(ptr, sizeof(struct ata_params)); } /* check for invalid (all zero) response */ if (error != 0) { warnx("Invalid identify response detected"); free(ptr); return (error); } ident_buf = (struct ata_params *)ptr; if (strncmp(ident_buf->model, "FX", 2) && strncmp(ident_buf->model, "NEC", 3) && strncmp(ident_buf->model, "Pioneer", 7) && strncmp(ident_buf->model, "SHARP", 5)) { ata_bswap(ident_buf->model, sizeof(ident_buf->model)); ata_bswap(ident_buf->revision, sizeof(ident_buf->revision)); ata_bswap(ident_buf->serial, sizeof(ident_buf->serial)); ata_bswap(ident_buf->media_serial, sizeof(ident_buf->media_serial)); } ata_btrim(ident_buf->model, sizeof(ident_buf->model)); ata_bpack(ident_buf->model, ident_buf->model, sizeof(ident_buf->model)); ata_btrim(ident_buf->revision, sizeof(ident_buf->revision)); ata_bpack(ident_buf->revision, ident_buf->revision, sizeof(ident_buf->revision)); ata_btrim(ident_buf->serial, sizeof(ident_buf->serial)); ata_bpack(ident_buf->serial, ident_buf->serial, sizeof(ident_buf->serial)); ata_btrim(ident_buf->media_serial, sizeof(ident_buf->media_serial)); ata_bpack(ident_buf->media_serial, ident_buf->media_serial, sizeof(ident_buf->media_serial)); *ident_bufp = ident_buf; return (0); } static int ataidentify(struct cam_device *device, int retry_count, int timeout) { union ccb *ccb; struct ata_params *ident_buf; u_int64_t hpasize; if ((ccb = cam_getccb(device)) == NULL) { warnx("couldn't allocate CCB"); return (1); } if (ata_do_identify(device, retry_count, timeout, ccb, &ident_buf) != 0) { cam_freeccb(ccb); return (1); } if (ident_buf->support.command1 & ATA_SUPPORT_PROTECTED) { if (ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize) != 0) { cam_freeccb(ccb); return (1); } } else { hpasize = 0; } printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); atacapprint(ident_buf); atahpa_print(ident_buf, hpasize, 0); free(ident_buf); cam_freeccb(ccb); return (0); } #endif /* MINIMALISTIC */ #ifndef MINIMALISTIC enum { ATA_SECURITY_ACTION_PRINT, ATA_SECURITY_ACTION_FREEZE, ATA_SECURITY_ACTION_UNLOCK, ATA_SECURITY_ACTION_DISABLE, ATA_SECURITY_ACTION_ERASE, ATA_SECURITY_ACTION_ERASE_ENHANCED, ATA_SECURITY_ACTION_SET_PASSWORD }; static void atasecurity_print_time(u_int16_t tw) { if (tw == 0) printf("unspecified"); else if (tw >= 255) printf("> 508 min"); else printf("%i min", 2 * tw); } static u_int32_t atasecurity_erase_timeout_msecs(u_int16_t timeout) { if (timeout == 0) return 2 * 3600 * 1000; /* default: two hours */ else if (timeout > 255) return (508 + 60) * 60 * 1000; /* spec says > 508 minutes */ return ((2 * timeout) + 5) * 60 * 1000; /* add a 5min margin */ } static void atasecurity_notify(u_int8_t command, struct ata_security_password *pwd) { struct ata_cmd cmd; bzero(&cmd, sizeof(cmd)); cmd.command = command; printf("Issuing %s", ata_op_string(&cmd)); if (pwd != NULL) { char pass[sizeof(pwd->password)+1]; /* pwd->password may not be null terminated */ pass[sizeof(pwd->password)] = '\0'; strncpy(pass, pwd->password, sizeof(pwd->password)); printf(" password='%s', user='%s'", pass, (pwd->ctrl & ATA_SECURITY_PASSWORD_MASTER) ? "master" : "user"); if (command == ATA_SECURITY_SET_PASSWORD) { printf(", mode='%s'", (pwd->ctrl & ATA_SECURITY_LEVEL_MAXIMUM) ? "maximum" : "high"); } } printf("\n"); } static int atasecurity_freeze(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_FREEZE_LOCK, NULL); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_FREEZE_LOCK, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout, /*quiet*/0); } static int atasecurity_unlock(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_UNLOCK, pwd); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_UNLOCK, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*quiet*/0); } static int atasecurity_disable(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_DISABLE_PASSWORD, pwd); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_DISABLE_PASSWORD, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*quiet*/0); } static int atasecurity_erase_confirm(struct cam_device *device, struct ata_params* ident_buf) { printf("\nYou are about to ERASE ALL DATA from the following" " device:\n%s%d,%s%d: ", device->device_name, device->dev_unit_num, device->given_dev_name, device->given_unit_number); ata_print_ident(ident_buf); for(;;) { char str[50]; printf("\nAre you SURE you want to ERASE ALL DATA? (yes/no) "); if (fgets(str, sizeof(str), stdin) != NULL) { if (strncasecmp(str, "yes", 3) == 0) { return (1); } else if (strncasecmp(str, "no", 2) == 0) { return (0); } else { printf("Please answer \"yes\" or " "\"no\"\n"); } } } /* NOTREACHED */ return (0); } static int atasecurity_erase(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, u_int32_t erase_timeout, struct ata_security_password *pwd, int quiet) { int error; if (quiet == 0) atasecurity_notify(ATA_SECURITY_ERASE_PREPARE, NULL); error = ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_ERASE_PREPARE, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout, /*quiet*/0); if (error != 0) return error; if (quiet == 0) atasecurity_notify(ATA_SECURITY_ERASE_UNIT, pwd); error = ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_ERASE_UNIT, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/erase_timeout, /*quiet*/0); if (error == 0 && quiet == 0) printf("\nErase Complete\n"); return error; } static int atasecurity_set_password(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_SET_PASSWORD, pwd); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_SET_PASSWORD, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*quiet*/0); } static void atasecurity_print(struct ata_params *parm) { printf("\nSecurity Option Value\n"); if (arglist & CAM_ARG_VERBOSE) { printf("status %04x\n", parm->security_status); } printf("supported %s\n", parm->security_status & ATA_SECURITY_SUPPORTED ? "yes" : "no"); if (!(parm->security_status & ATA_SECURITY_SUPPORTED)) return; printf("enabled %s\n", parm->security_status & ATA_SECURITY_ENABLED ? "yes" : "no"); printf("drive locked %s\n", parm->security_status & ATA_SECURITY_LOCKED ? "yes" : "no"); printf("security config frozen %s\n", parm->security_status & ATA_SECURITY_FROZEN ? "yes" : "no"); printf("count expired %s\n", parm->security_status & ATA_SECURITY_COUNT_EXP ? "yes" : "no"); printf("security level %s\n", parm->security_status & ATA_SECURITY_LEVEL ? "maximum" : "high"); printf("enhanced erase supported %s\n", parm->security_status & ATA_SECURITY_ENH_SUPP ? "yes" : "no"); printf("erase time "); atasecurity_print_time(parm->erase_time); printf("\n"); printf("enhanced erase time "); atasecurity_print_time(parm->enhanced_erase_time); printf("\n"); printf("master password rev %04x%s\n", parm->master_passwd_revision, parm->master_passwd_revision == 0x0000 || parm->master_passwd_revision == 0xFFFF ? " (unsupported)" : ""); } /* * Validates and copies the password in optarg to the passed buffer. * If the password in optarg is the same length as the buffer then * the data will still be copied but no null termination will occur. */ static int ata_getpwd(u_int8_t *passwd, int max, char opt) { int len; len = strlen(optarg); if (len > max) { warnx("-%c password is too long", opt); return (1); } else if (len == 0) { warnx("-%c password is missing", opt); return (1); } else if (optarg[0] == '-'){ warnx("-%c password starts with '-' (generic arg?)", opt); return (1); } else if (strlen(passwd) != 0 && strcmp(passwd, optarg) != 0) { warnx("-%c password conflicts with existing password from -%c", opt, pwd_opt); return (1); } /* Callers pass in a buffer which does NOT need to be terminated */ strncpy(passwd, optarg, max); pwd_opt = opt; return (0); } enum { ATA_HPA_ACTION_PRINT, ATA_HPA_ACTION_SET_MAX, ATA_HPA_ACTION_SET_PWD, ATA_HPA_ACTION_LOCK, ATA_HPA_ACTION_UNLOCK, ATA_HPA_ACTION_FREEZE_LOCK }; static int atahpa_set_confirm(struct cam_device *device, struct ata_params* ident_buf, u_int64_t maxsize, int persist) { printf("\nYou are about to configure HPA to limit the user accessible\n" "sectors to %ju %s on the device:\n%s%d,%s%d: ", maxsize, persist ? "persistently" : "temporarily", device->device_name, device->dev_unit_num, device->given_dev_name, device->given_unit_number); ata_print_ident(ident_buf); for(;;) { char str[50]; printf("\nAre you SURE you want to configure HPA? (yes/no) "); if (NULL != fgets(str, sizeof(str), stdin)) { if (0 == strncasecmp(str, "yes", 3)) { return (1); } else if (0 == strncasecmp(str, "no", 2)) { return (0); } else { printf("Please answer \"yes\" or " "\"no\"\n"); } } } /* NOTREACHED */ return (0); } static int atahpa(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { union ccb *ccb; struct ata_params *ident_buf; struct ccb_getdev cgd; struct ata_set_max_pwd pwd; int error, confirm, quiet, c, action, actions, persist; int security, is48bit, pwdsize; u_int64_t hpasize, maxsize; actions = 0; confirm = 0; quiet = 0; maxsize = 0; persist = 0; security = 0; memset(&pwd, 0, sizeof(pwd)); /* default action is to print hpa information */ action = ATA_HPA_ACTION_PRINT; pwdsize = sizeof(pwd.password); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 's': action = ATA_HPA_ACTION_SET_MAX; maxsize = strtoumax(optarg, NULL, 0); actions++; break; case 'p': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_HPA_ACTION_SET_PWD; security = 1; actions++; break; case 'l': action = ATA_HPA_ACTION_LOCK; security = 1; actions++; break; case 'U': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_HPA_ACTION_UNLOCK; security = 1; actions++; break; case 'f': action = ATA_HPA_ACTION_FREEZE_LOCK; security = 1; actions++; break; case 'P': persist = 1; break; case 'y': confirm++; break; case 'q': quiet++; break; } } if (actions > 1) { warnx("too many hpa actions specified"); return (1); } if (get_cgd(device, &cgd) != 0) { warnx("couldn't get CGD"); return (1); } ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return (1); } error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); if (error != 0) { cam_freeccb(ccb); return (1); } if (quiet == 0) { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); } if (action == ATA_HPA_ACTION_PRINT) { error = ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize); if (error == 0) atahpa_print(ident_buf, hpasize, 1); cam_freeccb(ccb); free(ident_buf); return (error); } if (!(ident_buf->support.command1 & ATA_SUPPORT_PROTECTED)) { warnx("HPA is not supported by this device"); cam_freeccb(ccb); free(ident_buf); return (1); } if (security && !(ident_buf->support.command1 & ATA_SUPPORT_MAXSECURITY)) { warnx("HPA Security is not supported by this device"); cam_freeccb(ccb); free(ident_buf); return (1); } is48bit = ident_buf->support.command2 & ATA_SUPPORT_ADDRESS48; /* * The ATA spec requires: * 1. Read native max addr is called directly before set max addr * 2. Read native max addr is NOT called before any other set max call */ switch(action) { case ATA_HPA_ACTION_SET_MAX: if (confirm == 0 && atahpa_set_confirm(device, ident_buf, maxsize, persist) == 0) { cam_freeccb(ccb); free(ident_buf); return (1); } error = ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize); if (error == 0) { error = atahpa_set_max(device, retry_count, timeout, ccb, is48bit, maxsize, persist); if (error == 0) { /* redo identify to get new lba values */ error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); atahpa_print(ident_buf, hpasize, 1); } } break; case ATA_HPA_ACTION_SET_PWD: error = atahpa_password(device, retry_count, timeout, ccb, is48bit, &pwd); if (error == 0) printf("HPA password has been set\n"); break; case ATA_HPA_ACTION_LOCK: error = atahpa_lock(device, retry_count, timeout, ccb, is48bit); if (error == 0) printf("HPA has been locked\n"); break; case ATA_HPA_ACTION_UNLOCK: error = atahpa_unlock(device, retry_count, timeout, ccb, is48bit, &pwd); if (error == 0) printf("HPA has been unlocked\n"); break; case ATA_HPA_ACTION_FREEZE_LOCK: error = atahpa_freeze_lock(device, retry_count, timeout, ccb, is48bit); if (error == 0) printf("HPA has been frozen\n"); break; default: errx(1, "Option currently not supported"); } cam_freeccb(ccb); free(ident_buf); return (error); } static int atasecurity(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { union ccb *ccb; struct ata_params *ident_buf; int error, confirm, quiet, c, action, actions, setpwd; int security_enabled, erase_timeout, pwdsize; struct ata_security_password pwd; actions = 0; setpwd = 0; erase_timeout = 0; confirm = 0; quiet = 0; memset(&pwd, 0, sizeof(pwd)); /* default action is to print security information */ action = ATA_SECURITY_ACTION_PRINT; /* user is master by default as its safer that way */ pwd.ctrl |= ATA_SECURITY_PASSWORD_MASTER; pwdsize = sizeof(pwd.password); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'f': action = ATA_SECURITY_ACTION_FREEZE; actions++; break; case 'U': if (strcasecmp(optarg, "user") == 0) { pwd.ctrl |= ATA_SECURITY_PASSWORD_USER; pwd.ctrl &= ~ATA_SECURITY_PASSWORD_MASTER; } else if (strcasecmp(optarg, "master") == 0) { pwd.ctrl |= ATA_SECURITY_PASSWORD_MASTER; pwd.ctrl &= ~ATA_SECURITY_PASSWORD_USER; } else { warnx("-U argument '%s' is invalid (must be " "'user' or 'master')", optarg); return (1); } break; case 'l': if (strcasecmp(optarg, "high") == 0) { pwd.ctrl |= ATA_SECURITY_LEVEL_HIGH; pwd.ctrl &= ~ATA_SECURITY_LEVEL_MAXIMUM; } else if (strcasecmp(optarg, "maximum") == 0) { pwd.ctrl |= ATA_SECURITY_LEVEL_MAXIMUM; pwd.ctrl &= ~ATA_SECURITY_LEVEL_HIGH; } else { warnx("-l argument '%s' is unknown (must be " "'high' or 'maximum')", optarg); return (1); } break; case 'k': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_UNLOCK; actions++; break; case 'd': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_DISABLE; actions++; break; case 'e': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_ERASE; actions++; break; case 'h': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); pwd.ctrl |= ATA_SECURITY_ERASE_ENHANCED; action = ATA_SECURITY_ACTION_ERASE_ENHANCED; actions++; break; case 's': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); setpwd = 1; if (action == ATA_SECURITY_ACTION_PRINT) action = ATA_SECURITY_ACTION_SET_PASSWORD; /* * Don't increment action as this can be combined * with other actions. */ break; case 'y': confirm++; break; case 'q': quiet++; break; case 'T': erase_timeout = atoi(optarg) * 1000; break; } } if (actions > 1) { warnx("too many security actions specified"); return (1); } if ((ccb = cam_getccb(device)) == NULL) { warnx("couldn't allocate CCB"); return (1); } error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); if (error != 0) { cam_freeccb(ccb); return (1); } if (quiet == 0) { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); } if (action == ATA_SECURITY_ACTION_PRINT) { atasecurity_print(ident_buf); free(ident_buf); cam_freeccb(ccb); return (0); } if ((ident_buf->support.command1 & ATA_SUPPORT_SECURITY) == 0) { warnx("Security not supported"); free(ident_buf); cam_freeccb(ccb); return (1); } /* default timeout 15 seconds the same as linux hdparm */ timeout = timeout ? timeout : 15 * 1000; security_enabled = ident_buf->security_status & ATA_SECURITY_ENABLED; /* first set the password if requested */ if (setpwd == 1) { /* confirm we can erase before setting the password if erasing */ if (confirm == 0 && (action == ATA_SECURITY_ACTION_ERASE_ENHANCED || action == ATA_SECURITY_ACTION_ERASE) && atasecurity_erase_confirm(device, ident_buf) == 0) { cam_freeccb(ccb); free(ident_buf); return (error); } if (pwd.ctrl & ATA_SECURITY_PASSWORD_MASTER) { pwd.revision = ident_buf->master_passwd_revision; if (pwd.revision != 0 && pwd.revision != 0xfff && --pwd.revision == 0) { pwd.revision = 0xfffe; } } error = atasecurity_set_password(device, ccb, retry_count, timeout, &pwd, quiet); if (error != 0) { cam_freeccb(ccb); free(ident_buf); return (error); } security_enabled = 1; } switch(action) { case ATA_SECURITY_ACTION_FREEZE: error = atasecurity_freeze(device, ccb, retry_count, timeout, quiet); break; case ATA_SECURITY_ACTION_UNLOCK: if (security_enabled) { if (ident_buf->security_status & ATA_SECURITY_LOCKED) { error = atasecurity_unlock(device, ccb, retry_count, timeout, &pwd, quiet); } else { warnx("Can't unlock, drive is not locked"); error = 1; } } else { warnx("Can't unlock, security is disabled"); error = 1; } break; case ATA_SECURITY_ACTION_DISABLE: if (security_enabled) { /* First unlock the drive if its locked */ if (ident_buf->security_status & ATA_SECURITY_LOCKED) { error = atasecurity_unlock(device, ccb, retry_count, timeout, &pwd, quiet); } if (error == 0) { error = atasecurity_disable(device, ccb, retry_count, timeout, &pwd, quiet); } } else { warnx("Can't disable security (already disabled)"); error = 1; } break; case ATA_SECURITY_ACTION_ERASE: if (security_enabled) { if (erase_timeout == 0) { erase_timeout = atasecurity_erase_timeout_msecs( ident_buf->erase_time); } error = atasecurity_erase(device, ccb, retry_count, timeout, erase_timeout, &pwd, quiet); } else { warnx("Can't secure erase (security is disabled)"); error = 1; } break; case ATA_SECURITY_ACTION_ERASE_ENHANCED: if (security_enabled) { if (ident_buf->security_status & ATA_SECURITY_ENH_SUPP) { if (erase_timeout == 0) { erase_timeout = atasecurity_erase_timeout_msecs( ident_buf->enhanced_erase_time); } error = atasecurity_erase(device, ccb, retry_count, timeout, erase_timeout, &pwd, quiet); } else { warnx("Enhanced erase is not supported"); error = 1; } } else { warnx("Can't secure erase (enhanced), " "(security is disabled)"); error = 1; } break; } cam_freeccb(ccb); free(ident_buf); return (error); } #endif /* MINIMALISTIC */ /* * Parse out a bus, or a bus, target and lun in the following * format: * bus * bus:target * bus:target:lun * * Returns the number of parsed components, or 0. */ static int parse_btl(char *tstr, path_id_t *bus, target_id_t *target, lun_id_t *lun, cam_argmask *arglst) { char *tmpstr; int convs = 0; while (isspace(*tstr) && (*tstr != '\0')) tstr++; tmpstr = (char *)strtok(tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *bus = strtol(tmpstr, NULL, 0); *arglst |= CAM_ARG_BUS; convs++; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *target = strtol(tmpstr, NULL, 0); *arglst |= CAM_ARG_TARGET; convs++; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *lun = strtol(tmpstr, NULL, 0); *arglst |= CAM_ARG_LUN; convs++; } } } return convs; } static int dorescan_or_reset(int argc, char **argv, int rescan) { static const char must[] = "you must specify \"all\", a bus, or a bus:target:lun to %s"; int rv, error = 0; path_id_t bus = CAM_BUS_WILDCARD; target_id_t target = CAM_TARGET_WILDCARD; lun_id_t lun = CAM_LUN_WILDCARD; char *tstr; if (argc < 3) { warnx(must, rescan? "rescan" : "reset"); return(1); } tstr = argv[optind]; while (isspace(*tstr) && (*tstr != '\0')) tstr++; if (strncasecmp(tstr, "all", strlen("all")) == 0) arglist |= CAM_ARG_BUS; else { rv = parse_btl(argv[optind], &bus, &target, &lun, &arglist); if (rv != 1 && rv != 3) { warnx(must, rescan? "rescan" : "reset"); return(1); } } if ((arglist & CAM_ARG_BUS) && (arglist & CAM_ARG_TARGET) && (arglist & CAM_ARG_LUN)) error = scanlun_or_reset_dev(bus, target, lun, rescan); else error = rescan_or_reset_bus(bus, rescan); return(error); } static int rescan_or_reset_bus(path_id_t bus, int rescan) { union ccb ccb, matchccb; int fd, retval; int bufsize; retval = 0; if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s", XPT_DEVICE); warn("%s", XPT_DEVICE); return(1); } if (bus != CAM_BUS_WILDCARD) { ccb.ccb_h.func_code = rescan ? XPT_SCAN_BUS : XPT_RESET_BUS; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb.ccb_h.pinfo.priority = 5; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); close(fd); return(1); } if ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { fprintf(stdout, "%s of bus %d was successful\n", rescan ? "Re-scan" : "Reset", bus); } else { fprintf(stdout, "%s of bus %d returned error %#x\n", rescan ? "Re-scan" : "Reset", bus, ccb.ccb_h.status & CAM_STATUS_MASK); retval = 1; } close(fd); return(retval); } /* * The right way to handle this is to modify the xpt so that it can * handle a wildcarded bus in a rescan or reset CCB. At the moment * that isn't implemented, so instead we enumerate the busses and * send the rescan or reset to those busses in the case where the * given bus is -1 (wildcard). We don't send a rescan or reset * to the xpt bus; sending a rescan to the xpt bus is effectively a * no-op, sending a rescan to the xpt bus would result in a status of * CAM_REQ_INVALID. */ - bzero(&(&matchccb.ccb_h)[1], - sizeof(struct ccb_dev_match) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&matchccb.cdm); matchccb.ccb_h.func_code = XPT_DEV_MATCH; matchccb.ccb_h.path_id = CAM_BUS_WILDCARD; bufsize = sizeof(struct dev_match_result) * 20; matchccb.cdm.match_buf_len = bufsize; matchccb.cdm.matches=(struct dev_match_result *)malloc(bufsize); if (matchccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); retval = 1; goto bailout; } matchccb.cdm.num_matches = 0; matchccb.cdm.num_patterns = 1; matchccb.cdm.pattern_buf_len = sizeof(struct dev_match_pattern); matchccb.cdm.patterns = (struct dev_match_pattern *)malloc( matchccb.cdm.pattern_buf_len); if (matchccb.cdm.patterns == NULL) { warnx("can't malloc memory for patterns"); retval = 1; goto bailout; } matchccb.cdm.patterns[0].type = DEV_MATCH_BUS; matchccb.cdm.patterns[0].pattern.bus_pattern.flags = BUS_MATCH_ANY; do { unsigned int i; if (ioctl(fd, CAMIOCOMMAND, &matchccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); retval = 1; goto bailout; } if ((matchccb.ccb_h.status != CAM_REQ_CMP) || ((matchccb.cdm.status != CAM_DEV_MATCH_LAST) && (matchccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", matchccb.ccb_h.status, matchccb.cdm.status); retval = 1; goto bailout; } for (i = 0; i < matchccb.cdm.num_matches; i++) { struct bus_match_result *bus_result; /* This shouldn't happen. */ if (matchccb.cdm.matches[i].type != DEV_MATCH_BUS) continue; bus_result = &matchccb.cdm.matches[i].result.bus_result; /* * We don't want to rescan or reset the xpt bus. * See above. */ if (bus_result->path_id == CAM_XPT_PATH_ID) continue; ccb.ccb_h.func_code = rescan ? XPT_SCAN_BUS : XPT_RESET_BUS; ccb.ccb_h.path_id = bus_result->path_id; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb.ccb_h.pinfo.priority = 5; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); retval = 1; goto bailout; } if ((ccb.ccb_h.status & CAM_STATUS_MASK) ==CAM_REQ_CMP){ fprintf(stdout, "%s of bus %d was successful\n", rescan? "Re-scan" : "Reset", bus_result->path_id); } else { /* * Don't bail out just yet, maybe the other * rescan or reset commands will complete * successfully. */ fprintf(stderr, "%s of bus %d returned error " "%#x\n", rescan? "Re-scan" : "Reset", bus_result->path_id, ccb.ccb_h.status & CAM_STATUS_MASK); retval = 1; } } } while ((matchccb.ccb_h.status == CAM_REQ_CMP) && (matchccb.cdm.status == CAM_DEV_MATCH_MORE)); bailout: if (fd != -1) close(fd); if (matchccb.cdm.patterns != NULL) free(matchccb.cdm.patterns); if (matchccb.cdm.matches != NULL) free(matchccb.cdm.matches); return(retval); } static int scanlun_or_reset_dev(path_id_t bus, target_id_t target, lun_id_t lun, int scan) { union ccb ccb; struct cam_device *device; int fd; device = NULL; if (bus == CAM_BUS_WILDCARD) { warnx("invalid bus number %d", bus); return(1); } if (target == CAM_TARGET_WILDCARD) { warnx("invalid target number %d", target); return(1); } if (lun == CAM_LUN_WILDCARD) { warnx("invalid lun number %jx", (uintmax_t)lun); return(1); } fd = -1; bzero(&ccb, sizeof(union ccb)); if (scan) { if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s\n", XPT_DEVICE); warn("%s", XPT_DEVICE); return(1); } } else { device = cam_open_btl(bus, target, lun, O_RDWR, NULL); if (device == NULL) { warnx("%s", cam_errbuf); return(1); } } ccb.ccb_h.func_code = (scan)? XPT_SCAN_LUN : XPT_RESET_DEV; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = target; ccb.ccb_h.target_lun = lun; ccb.ccb_h.timeout = 5000; ccb.crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb.ccb_h.pinfo.priority = 5; if (scan) { if (ioctl(fd, CAMIOCOMMAND, &ccb) < 0) { warn("CAMIOCOMMAND ioctl failed"); close(fd); return(1); } } else { if (cam_send_ccb(device, &ccb) < 0) { warn("error sending XPT_RESET_DEV CCB"); cam_close_device(device); return(1); } } if (scan) close(fd); else cam_close_device(device); /* * An error code of CAM_BDR_SENT is normal for a BDR request. */ if (((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) || ((!scan) && ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_BDR_SENT))) { fprintf(stdout, "%s of %d:%d:%jx was successful\n", scan? "Re-scan" : "Reset", bus, target, (uintmax_t)lun); return(0); } else { fprintf(stdout, "%s of %d:%d:%jx returned error %#x\n", scan? "Re-scan" : "Reset", bus, target, (uintmax_t)lun, ccb.ccb_h.status & CAM_STATUS_MASK); return(1); } } #ifndef MINIMALISTIC static struct scsi_nv defect_list_type_map[] = { { "block", SRDD10_BLOCK_FORMAT }, { "extbfi", SRDD10_EXT_BFI_FORMAT }, { "extphys", SRDD10_EXT_PHYS_FORMAT }, { "longblock", SRDD10_LONG_BLOCK_FORMAT }, { "bfi", SRDD10_BYTES_FROM_INDEX_FORMAT }, { "phys", SRDD10_PHYSICAL_SECTOR_FORMAT } }; static int readdefects(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb = NULL; struct scsi_read_defect_data_hdr_10 *hdr10 = NULL; struct scsi_read_defect_data_hdr_12 *hdr12 = NULL; size_t hdr_size = 0, entry_size = 0; int use_12byte = 0; int hex_format = 0; u_int8_t *defect_list = NULL; u_int8_t list_format = 0; int list_type_set = 0; u_int32_t dlist_length = 0; u_int32_t returned_length = 0, valid_len = 0; u_int32_t num_returned = 0, num_valid = 0; u_int32_t max_possible_size = 0, hdr_max = 0; u_int32_t starting_offset = 0; u_int8_t returned_format, returned_type; unsigned int i; int summary = 0, quiet = 0; int c, error = 0; int lists_specified = 0; int get_length = 1, first_pass = 1; int mads = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'f': { scsi_nv_status status; int entry_num = 0; status = scsi_get_nv(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) { list_format = defect_list_type_map[ entry_num].value; list_type_set = 1; } else { warnx("%s: %s %s option %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", "defect list type", optarg); error = 1; goto defect_bailout; } break; } case 'G': arglist |= CAM_ARG_GLIST; break; case 'P': arglist |= CAM_ARG_PLIST; break; case 'q': quiet = 1; break; case 's': summary = 1; break; case 'S': { char *endptr; starting_offset = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { error = 1; warnx("invalid starting offset %s", optarg); goto defect_bailout; } break; } case 'X': hex_format = 1; break; default: break; } } if (list_type_set == 0) { error = 1; warnx("no defect list format specified"); goto defect_bailout; } if (arglist & CAM_ARG_PLIST) { list_format |= SRDD10_PLIST; lists_specified++; } if (arglist & CAM_ARG_GLIST) { list_format |= SRDD10_GLIST; lists_specified++; } /* * This implies a summary, and was the previous behavior. */ if (lists_specified == 0) summary = 1; ccb = cam_getccb(device); retry_12byte: /* * We start off asking for just the header to determine how much * defect data is available. Some Hitachi drives return an error * if you ask for more data than the drive has. Once we know the * length, we retry the command with the returned length. */ if (use_12byte == 0) dlist_length = sizeof(*hdr10); else dlist_length = sizeof(*hdr12); retry: if (defect_list != NULL) { free(defect_list); defect_list = NULL; } defect_list = malloc(dlist_length); if (defect_list == NULL) { warnx("can't malloc memory for defect list"); error = 1; goto defect_bailout; } next_batch: bzero(defect_list, dlist_length); /* * cam_getccb() zeros the CCB header only. So we need to zero the * payload portion of the ccb. */ - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); scsi_read_defects(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*list_format*/ list_format, /*addr_desc_index*/ starting_offset, /*data_ptr*/ defect_list, /*dxfer_len*/ dlist_length, /*minimum_cmd_size*/ use_12byte ? 12 : 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (cam_send_ccb(device, ccb) < 0) { perror("error reading defect list"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto defect_bailout; } valid_len = ccb->csio.dxfer_len - ccb->csio.resid; if (use_12byte == 0) { hdr10 = (struct scsi_read_defect_data_hdr_10 *)defect_list; hdr_size = sizeof(*hdr10); hdr_max = SRDDH10_MAX_LENGTH; if (valid_len >= hdr_size) { returned_length = scsi_2btoul(hdr10->length); returned_format = hdr10->format; } else { returned_length = 0; returned_format = 0; } } else { hdr12 = (struct scsi_read_defect_data_hdr_12 *)defect_list; hdr_size = sizeof(*hdr12); hdr_max = SRDDH12_MAX_LENGTH; if (valid_len >= hdr_size) { returned_length = scsi_4btoul(hdr12->length); returned_format = hdr12->format; } else { returned_length = 0; returned_format = 0; } } returned_type = returned_format & SRDDH10_DLIST_FORMAT_MASK; switch (returned_type) { case SRDD10_BLOCK_FORMAT: entry_size = sizeof(struct scsi_defect_desc_block); break; case SRDD10_LONG_BLOCK_FORMAT: entry_size = sizeof(struct scsi_defect_desc_long_block); break; case SRDD10_EXT_PHYS_FORMAT: case SRDD10_PHYSICAL_SECTOR_FORMAT: entry_size = sizeof(struct scsi_defect_desc_phys_sector); break; case SRDD10_EXT_BFI_FORMAT: case SRDD10_BYTES_FROM_INDEX_FORMAT: entry_size = sizeof(struct scsi_defect_desc_bytes_from_index); break; default: warnx("Unknown defect format 0x%x\n", returned_type); error = 1; goto defect_bailout; break; } max_possible_size = (hdr_max / entry_size) * entry_size; num_returned = returned_length / entry_size; num_valid = min(returned_length, valid_len - hdr_size); num_valid /= entry_size; if (get_length != 0) { get_length = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * If the drive is reporting that it just doesn't * support the defect list format, go ahead and use * the length it reported. Otherwise, the length * may not be valid, so use the maximum. */ if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1c) && (ascq == 0x00) && (returned_length > 0)) { if ((use_12byte == 0) && (returned_length >= max_possible_size)) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } else if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1f) && (ascq == 0x00) && (returned_length > 0)) { /* Partial defect list transfer */ /* * Hitachi drives return this error * along with a partial defect list if they * have more defects than the 10 byte * command can support. Retry with the 12 * byte command. */ if (use_12byte == 0) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } else if ((sense_key == SSD_KEY_ILLEGAL_REQUEST) && (asc == 0x24) && (ascq == 0x00)) { /* Invalid field in CDB */ /* * SBC-3 says that if the drive has more * defects than can be reported with the * 10 byte command, it should return this * error and no data. Retry with the 12 * byte command. */ if (use_12byte == 0) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } else { /* * If we got a SCSI error and no valid length, * just use the 10 byte maximum. The 12 * byte maximum is too large. */ if (returned_length == 0) dlist_length = SRDD10_MAX_LENGTH; else { if ((use_12byte == 0) && (returned_length >= max_possible_size)) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } } } else if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP){ error = 1; warnx("Error reading defect header"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } else { if ((use_12byte == 0) && (returned_length >= max_possible_size)) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } if (summary != 0) { fprintf(stdout, "%u", num_returned); if (quiet == 0) { fprintf(stdout, " defect%s", (num_returned != 1) ? "s" : ""); } fprintf(stdout, "\n"); goto defect_bailout; } /* * We always limit the list length to the 10-byte maximum * length (0xffff). The reason is that some controllers * can't handle larger I/Os, and we can transfer the entire * 10 byte list in one shot. For drives that support the 12 * byte read defects command, we'll step through the list * by specifying a starting offset. For drives that don't * support the 12 byte command's starting offset, we'll * just display the first 64K. */ dlist_length = min(dlist_length, SRDD10_MAX_LENGTH); goto retry; } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI spec, if the disk doesn't support * the requested format, it will generally return a sense * key of RECOVERED ERROR, and an additional sense code * of "DEFECT LIST NOT FOUND". HGST drives also return * Primary/Grown defect list not found errors. So just * check for an ASC of 0x1c. */ if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1c)) { const char *format_str; format_str = scsi_nv_to_str(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), list_format & SRDD10_DLIST_FORMAT_MASK); warnx("requested defect format %s not available", format_str ? format_str : "unknown"); format_str = scsi_nv_to_str(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), returned_type); if (format_str != NULL) { warnx("Device returned %s format", format_str); } else { error = 1; warnx("Device returned unknown defect" " data format %#x", returned_type); goto defect_bailout; } } else { error = 1; warnx("Error returned from read defect data command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } } else if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; warnx("Error returned from read defect data command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } if (first_pass != 0) { fprintf(stderr, "Got %d defect", num_returned); if ((lists_specified == 0) || (num_returned == 0)) { fprintf(stderr, "s.\n"); goto defect_bailout; } else if (num_returned == 1) fprintf(stderr, ":\n"); else fprintf(stderr, "s:\n"); first_pass = 0; } /* * XXX KDM I should probably clean up the printout format for the * disk defects. */ switch (returned_type) { case SRDD10_PHYSICAL_SECTOR_FORMAT: case SRDD10_EXT_PHYS_FORMAT: { struct scsi_defect_desc_phys_sector *dlist; dlist = (struct scsi_defect_desc_phys_sector *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { uint32_t sector; sector = scsi_4btoul(dlist[i].sector); if (returned_type == SRDD10_EXT_PHYS_FORMAT) { mads = (sector & SDD_EXT_PHYS_MADS) ? 0 : 1; sector &= ~SDD_EXT_PHYS_FLAG_MASK; } if (hex_format == 0) fprintf(stdout, "%d:%d:%d%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].sector), mads ? " - " : "\n"); else fprintf(stdout, "0x%x:0x%x:0x%x%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].sector), mads ? " - " : "\n"); mads = 0; } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDD10_BYTES_FROM_INDEX_FORMAT: case SRDD10_EXT_BFI_FORMAT: { struct scsi_defect_desc_bytes_from_index *dlist; dlist = (struct scsi_defect_desc_bytes_from_index *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { uint32_t bfi; bfi = scsi_4btoul(dlist[i].bytes_from_index); if (returned_type == SRDD10_EXT_BFI_FORMAT) { mads = (bfi & SDD_EXT_BFI_MADS) ? 1 : 0; bfi &= ~SDD_EXT_BFI_FLAG_MASK; } if (hex_format == 0) fprintf(stdout, "%d:%d:%d%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].bytes_from_index), mads ? " - " : "\n"); else fprintf(stdout, "0x%x:0x%x:0x%x%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].bytes_from_index), mads ? " - " : "\n"); mads = 0; } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDDH10_BLOCK_FORMAT: { struct scsi_defect_desc_block *dlist; dlist = (struct scsi_defect_desc_block *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { if (hex_format == 0) fprintf(stdout, "%u\n", scsi_4btoul(dlist[i].address)); else fprintf(stdout, "0x%x\n", scsi_4btoul(dlist[i].address)); } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDD10_LONG_BLOCK_FORMAT: { struct scsi_defect_desc_long_block *dlist; dlist = (struct scsi_defect_desc_long_block *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { if (hex_format == 0) fprintf(stdout, "%ju\n", (uintmax_t)scsi_8btou64( dlist[i].address)); else fprintf(stdout, "0x%jx\n", (uintmax_t)scsi_8btou64( dlist[i].address)); } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } default: fprintf(stderr, "Unknown defect format 0x%x\n", returned_type); error = 1; break; } defect_bailout: if (defect_list != NULL) free(defect_list); if (ccb != NULL) cam_freeccb(ccb); return(error); } #endif /* MINIMALISTIC */ #if 0 void reassignblocks(struct cam_device *device, u_int32_t *blocks, int num_blocks) { union ccb *ccb; ccb = cam_getccb(device); cam_freeccb(ccb); } #endif #ifndef MINIMALISTIC void mode_sense(struct cam_device *device, int mode_page, int page_control, int dbd, int retry_count, int timeout, u_int8_t *data, int datalen) { union ccb *ccb; int retval; ccb = cam_getccb(device); if (ccb == NULL) errx(1, "mode_sense: couldn't allocate CCB"); - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); scsi_mode_sense(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ dbd, /* page_code */ page_control << 6, /* page */ mode_page, /* param_buf */ data, /* param_len */ datalen, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); cam_close_device(device); if (retval < 0) err(1, "error sending mode sense command"); else errx(1, "error sending mode sense command"); } cam_freeccb(ccb); } void mode_select(struct cam_device *device, int save_pages, int retry_count, int timeout, u_int8_t *data, int datalen) { union ccb *ccb; int retval; ccb = cam_getccb(device); if (ccb == NULL) errx(1, "mode_select: couldn't allocate CCB"); - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); scsi_mode_select(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* scsi_page_fmt */ 1, /* save_pages */ save_pages, /* param_buf */ data, /* param_len */ datalen, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); cam_close_device(device); if (retval < 0) err(1, "error sending mode select command"); else errx(1, "error sending mode select command"); } cam_freeccb(ccb); } void modepage(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c, mode_page = -1, page_control = 0; int binary = 0, list = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'b': binary = 1; break; case 'd': arglist |= CAM_ARG_DBD; break; case 'e': arglist |= CAM_ARG_MODE_EDIT; break; case 'l': list = 1; break; case 'm': mode_page = strtol(optarg, NULL, 0); if (mode_page < 0) errx(1, "invalid mode page %d", mode_page); break; case 'P': page_control = strtol(optarg, NULL, 0); if ((page_control < 0) || (page_control > 3)) errx(1, "invalid page control field %d", page_control); arglist |= CAM_ARG_PAGE_CNTL; break; default: break; } } if (mode_page == -1 && list == 0) errx(1, "you must specify a mode page!"); if (list) { mode_list(device, page_control, arglist & CAM_ARG_DBD, retry_count, timeout); } else { mode_edit(device, mode_page, page_control, arglist & CAM_ARG_DBD, arglist & CAM_ARG_MODE_EDIT, binary, retry_count, timeout); } } static int scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; u_int32_t flags = CAM_DIR_NONE; u_int8_t *data_ptr = NULL; u_int8_t cdb[20]; u_int8_t atacmd[12]; struct get_hook hook; int c, data_bytes = 0; int cdb_len = 0; int atacmd_len = 0; int dmacmd = 0; int fpdmacmd = 0; int need_res = 0; char *datastr = NULL, *tstr, *resstr = NULL; int error = 0; int fd_data = 0, fd_res = 0; int retval; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsicmd: error allocating ccb"); return(1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(ccb); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'a': tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; atacmd_len = buff_encode_visit(atacmd, sizeof(atacmd), tstr, iget, &hook); /* * Increment optind by the number of arguments the * encoding routine processed. After each call to * getopt(3), optind points to the argument that * getopt should process _next_. In this case, * that means it points to the first command string * argument, if there is one. Once we increment * this, it should point to either the next command * line argument, or it should be past the end of * the list. */ optind += hook.got; break; case 'c': tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; cdb_len = buff_encode_visit(cdb, sizeof(cdb), tstr, iget, &hook); /* * Increment optind by the number of arguments the * encoding routine processed. After each call to * getopt(3), optind points to the argument that * getopt should process _next_. In this case, * that means it points to the first command string * argument, if there is one. Once we increment * this, it should point to either the next command * line argument, or it should be past the end of * the list. */ optind += hook.got; break; case 'd': dmacmd = 1; break; case 'f': fpdmacmd = 1; break; case 'i': if (arglist & CAM_ARG_CMD_OUT) { warnx("command must either be " "read or write, not both"); error = 1; goto scsicmd_bailout; } arglist |= CAM_ARG_CMD_IN; flags = CAM_DIR_IN; data_bytes = strtol(optarg, NULL, 0); if (data_bytes <= 0) { warnx("invalid number of input bytes %d", data_bytes); error = 1; goto scsicmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; optind++; datastr = cget(&hook, NULL); /* * If the user supplied "-" instead of a format, he * wants the data to be written to stdout. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_data = 1; data_ptr = (u_int8_t *)malloc(data_bytes); if (data_ptr == NULL) { warnx("can't malloc memory for data_ptr"); error = 1; goto scsicmd_bailout; } break; case 'o': if (arglist & CAM_ARG_CMD_IN) { warnx("command must either be " "read or write, not both"); error = 1; goto scsicmd_bailout; } arglist |= CAM_ARG_CMD_OUT; flags = CAM_DIR_OUT; data_bytes = strtol(optarg, NULL, 0); if (data_bytes <= 0) { warnx("invalid number of output bytes %d", data_bytes); error = 1; goto scsicmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; datastr = cget(&hook, NULL); data_ptr = (u_int8_t *)malloc(data_bytes); if (data_ptr == NULL) { warnx("can't malloc memory for data_ptr"); error = 1; goto scsicmd_bailout; } bzero(data_ptr, data_bytes); /* * If the user supplied "-" instead of a format, he * wants the data to be read from stdin. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_data = 1; else buff_encode_visit(data_ptr, data_bytes, datastr, iget, &hook); optind += hook.got; break; case 'r': need_res = 1; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; resstr = cget(&hook, NULL); if ((resstr != NULL) && (resstr[0] == '-')) fd_res = 1; optind += hook.got; break; default: break; } } /* * If fd_data is set, and we're writing to the device, we need to * read the data the user wants written from stdin. */ if ((fd_data == 1) && (arglist & CAM_ARG_CMD_OUT)) { ssize_t amt_read; int amt_to_read = data_bytes; u_int8_t *buf_ptr = data_ptr; for (amt_read = 0; amt_to_read > 0; amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) { if (amt_read == -1) { warn("error reading data from stdin"); error = 1; goto scsicmd_bailout; } amt_to_read -= amt_read; buf_ptr += amt_read; } } if (arglist & CAM_ARG_ERR_RECOVER) flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ flags |= CAM_DEV_QFRZDIS; if (cdb_len) { /* * This is taken from the SCSI-3 draft spec. * (T10/1157D revision 0.3) * The top 3 bits of an opcode are the group code. * The next 5 bits are the command code. * Group 0: six byte commands * Group 1: ten byte commands * Group 2: ten byte commands * Group 3: reserved * Group 4: sixteen byte commands * Group 5: twelve byte commands * Group 6: vendor specific * Group 7: vendor specific */ switch((cdb[0] >> 5) & 0x7) { case 0: cdb_len = 6; break; case 1: case 2: cdb_len = 10; break; case 3: case 6: case 7: /* computed by buff_encode_visit */ break; case 4: cdb_len = 16; break; case 5: cdb_len = 12; break; } /* * We should probably use csio_build_visit or something like that * here, but it's easier to encode arguments as you go. The * alternative would be skipping the CDB argument and then encoding * it here, since we've got the data buffer argument by now. */ bcopy(cdb, &ccb->csio.cdb_io.cdb_bytes, cdb_len); cam_fill_csio(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*data_ptr*/ data_ptr, /*dxfer_len*/ data_bytes, /*sense_len*/ SSD_FULL_SIZE, /*cdb_len*/ cdb_len, /*timeout*/ timeout ? timeout : 5000); } else { atacmd_len = 12; bcopy(atacmd, &ccb->ataio.cmd.command, atacmd_len); if (need_res) ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; if (dmacmd) ccb->ataio.cmd.flags |= CAM_ATAIO_DMA; if (fpdmacmd) ccb->ataio.cmd.flags |= CAM_ATAIO_FPDMA; cam_fill_ataio(&ccb->ataio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ data_bytes, /*timeout*/ timeout ? timeout : 5000); } if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsicmd_bailout; } if (atacmd_len && need_res) { if (fd_res == 0) { buff_decode_visit(&ccb->ataio.res.status, 11, resstr, arg_put, NULL); fprintf(stdout, "\n"); } else { fprintf(stdout, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", ccb->ataio.res.status, ccb->ataio.res.error, ccb->ataio.res.lba_low, ccb->ataio.res.lba_mid, ccb->ataio.res.lba_high, ccb->ataio.res.device, ccb->ataio.res.lba_low_exp, ccb->ataio.res.lba_mid_exp, ccb->ataio.res.lba_high_exp, ccb->ataio.res.sector_count, ccb->ataio.res.sector_count_exp); fflush(stdout); } } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (arglist & CAM_ARG_CMD_IN) && (data_bytes > 0)) { if (fd_data == 0) { buff_decode_visit(data_ptr, data_bytes, datastr, arg_put, NULL); fprintf(stdout, "\n"); } else { ssize_t amt_written; int amt_to_write = data_bytes; u_int8_t *buf_ptr = data_ptr; for (amt_written = 0; (amt_to_write > 0) && (amt_written =write(1, buf_ptr,amt_to_write))> 0;){ amt_to_write -= amt_written; buf_ptr += amt_written; } if (amt_written == -1) { warn("error writing data to stdout"); error = 1; goto scsicmd_bailout; } else if ((amt_written == 0) && (amt_to_write > 0)) { warnx("only wrote %u bytes out of %u", data_bytes - amt_to_write, data_bytes); } } } scsicmd_bailout: if ((data_bytes > 0) && (data_ptr != NULL)) free(data_ptr); cam_freeccb(ccb); return(error); } static int camdebug(int argc, char **argv, char *combinedopt) { int c, fd; path_id_t bus = CAM_BUS_WILDCARD; target_id_t target = CAM_TARGET_WILDCARD; lun_id_t lun = CAM_LUN_WILDCARD; char *tstr, *tmpstr = NULL; union ccb ccb; int error = 0; bzero(&ccb, sizeof(union ccb)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'I': arglist |= CAM_ARG_DEBUG_INFO; ccb.cdbg.flags |= CAM_DEBUG_INFO; break; case 'P': arglist |= CAM_ARG_DEBUG_PERIPH; ccb.cdbg.flags |= CAM_DEBUG_PERIPH; break; case 'S': arglist |= CAM_ARG_DEBUG_SUBTRACE; ccb.cdbg.flags |= CAM_DEBUG_SUBTRACE; break; case 'T': arglist |= CAM_ARG_DEBUG_TRACE; ccb.cdbg.flags |= CAM_DEBUG_TRACE; break; case 'X': arglist |= CAM_ARG_DEBUG_XPT; ccb.cdbg.flags |= CAM_DEBUG_XPT; break; case 'c': arglist |= CAM_ARG_DEBUG_CDB; ccb.cdbg.flags |= CAM_DEBUG_CDB; break; case 'p': arglist |= CAM_ARG_DEBUG_PROBE; ccb.cdbg.flags |= CAM_DEBUG_PROBE; break; default: break; } } if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s", XPT_DEVICE); warn("%s", XPT_DEVICE); return(1); } argc -= optind; argv += optind; if (argc <= 0) { warnx("you must specify \"off\", \"all\" or a bus,"); warnx("bus:target, or bus:target:lun"); close(fd); return(1); } tstr = *argv; while (isspace(*tstr) && (*tstr != '\0')) tstr++; if (strncmp(tstr, "off", 3) == 0) { ccb.cdbg.flags = CAM_DEBUG_NONE; arglist &= ~(CAM_ARG_DEBUG_INFO|CAM_ARG_DEBUG_PERIPH| CAM_ARG_DEBUG_TRACE|CAM_ARG_DEBUG_SUBTRACE| CAM_ARG_DEBUG_XPT|CAM_ARG_DEBUG_PROBE); } else if (strncmp(tstr, "all", 3) != 0) { tmpstr = (char *)strtok(tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')){ bus = strtol(tmpstr, NULL, 0); arglist |= CAM_ARG_BUS; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')){ target = strtol(tmpstr, NULL, 0); arglist |= CAM_ARG_TARGET; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')){ lun = strtol(tmpstr, NULL, 0); arglist |= CAM_ARG_LUN; } } } else { error = 1; warnx("you must specify \"all\", \"off\", or a bus,"); warnx("bus:target, or bus:target:lun to debug"); } } if (error == 0) { ccb.ccb_h.func_code = XPT_DEBUG; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = target; ccb.ccb_h.target_lun = lun; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); error = 1; } if (error == 0) { if ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_FUNC_NOTAVAIL) { warnx("CAM debugging not available"); warnx("you need to put options CAMDEBUG in" " your kernel config file!"); error = 1; } else if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_DEBUG CCB failed with status %#x", ccb.ccb_h.status); error = 1; } else { if (ccb.cdbg.flags == CAM_DEBUG_NONE) { fprintf(stderr, "Debugging turned off\n"); } else { fprintf(stderr, "Debugging enabled for " "%d:%d:%jx\n", bus, target, (uintmax_t)lun); } } } close(fd); } return(error); } static int tagcontrol(struct cam_device *device, int argc, char **argv, char *combinedopt) { int c; union ccb *ccb; int numtags = -1; int retval = 0; int quiet = 0; char pathstr[1024]; ccb = cam_getccb(device); if (ccb == NULL) { warnx("tagcontrol: error allocating ccb"); return(1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'N': numtags = strtol(optarg, NULL, 0); if (numtags < 0) { warnx("tag count %d is < 0", numtags); retval = 1; goto tagcontrol_bailout; } break; case 'q': quiet++; break; default: break; } } cam_path_string(device, pathstr, sizeof(pathstr)); if (numtags >= 0) { - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_relsim) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->crs); ccb->ccb_h.func_code = XPT_REL_SIMQ; ccb->ccb_h.flags = CAM_DEV_QFREEZE; ccb->crs.release_flags = RELSIM_ADJUST_OPENINGS; ccb->crs.openings = numtags; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_REL_SIMQ CCB"); retval = 1; goto tagcontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_REL_SIMQ CCB failed"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto tagcontrol_bailout; } if (quiet == 0) fprintf(stdout, "%stagged openings now %d\n", pathstr, ccb->crs.openings); } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_getdevstats) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cgds); ccb->ccb_h.func_code = XPT_GDEV_STATS; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_GDEV_STATS CCB"); retval = 1; goto tagcontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_GDEV_STATS CCB failed"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto tagcontrol_bailout; } if (arglist & CAM_ARG_VERBOSE) { fprintf(stdout, "%s", pathstr); fprintf(stdout, "dev_openings %d\n", ccb->cgds.dev_openings); fprintf(stdout, "%s", pathstr); fprintf(stdout, "dev_active %d\n", ccb->cgds.dev_active); fprintf(stdout, "%s", pathstr); fprintf(stdout, "allocated %d\n", ccb->cgds.allocated); fprintf(stdout, "%s", pathstr); fprintf(stdout, "queued %d\n", ccb->cgds.queued); fprintf(stdout, "%s", pathstr); fprintf(stdout, "held %d\n", ccb->cgds.held); fprintf(stdout, "%s", pathstr); fprintf(stdout, "mintags %d\n", ccb->cgds.mintags); fprintf(stdout, "%s", pathstr); fprintf(stdout, "maxtags %d\n", ccb->cgds.maxtags); } else { if (quiet == 0) { fprintf(stdout, "%s", pathstr); fprintf(stdout, "device openings: "); } fprintf(stdout, "%d\n", ccb->cgds.dev_openings + ccb->cgds.dev_active); } tagcontrol_bailout: cam_freeccb(ccb); return(retval); } static void cts_print(struct cam_device *device, struct ccb_trans_settings *cts) { char pathstr[1024]; cam_path_string(device, pathstr, sizeof(pathstr)); if (cts->transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { fprintf(stdout, "%ssync parameter: %d\n", pathstr, spi->sync_period); if (spi->sync_offset != 0) { u_int freq; freq = scsi_calc_syncsrate(spi->sync_period); fprintf(stdout, "%sfrequency: %d.%03dMHz\n", pathstr, freq / 1000, freq % 1000); } } if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { fprintf(stdout, "%soffset: %d\n", pathstr, spi->sync_offset); } if (spi->valid & CTS_SPI_VALID_BUS_WIDTH) { fprintf(stdout, "%sbus width: %d bits\n", pathstr, (0x01 << spi->bus_width) * 8); } if (spi->valid & CTS_SPI_VALID_DISC) { fprintf(stdout, "%sdisconnection is %s\n", pathstr, (spi->flags & CTS_SPI_FLAGS_DISC_ENB) ? "enabled" : "disabled"); } } if (cts->transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) fprintf(stdout, "%sWWNN: 0x%llx\n", pathstr, (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) fprintf(stdout, "%sWWPN: 0x%llx\n", pathstr, (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) fprintf(stdout, "%sPortID: 0x%x\n", pathstr, fc->port); if (fc->valid & CTS_FC_VALID_SPEED) fprintf(stdout, "%stransfer speed: %d.%03dMB/s\n", pathstr, fc->bitrate / 1000, fc->bitrate % 1000); } if (cts->transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) fprintf(stdout, "%stransfer speed: %d.%03dMB/s\n", pathstr, sas->bitrate / 1000, sas->bitrate % 1000); } if (cts->transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &cts->xport_specific.ata; if ((pata->valid & CTS_ATA_VALID_MODE) != 0) { fprintf(stdout, "%sATA mode: %s\n", pathstr, ata_mode2string(pata->mode)); } if ((pata->valid & CTS_ATA_VALID_ATAPI) != 0) { fprintf(stdout, "%sATAPI packet length: %d\n", pathstr, pata->atapi); } if ((pata->valid & CTS_ATA_VALID_BYTECOUNT) != 0) { fprintf(stdout, "%sPIO transaction length: %d\n", pathstr, pata->bytecount); } } if (cts->transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts->xport_specific.sata; if ((sata->valid & CTS_SATA_VALID_REVISION) != 0) { fprintf(stdout, "%sSATA revision: %d.x\n", pathstr, sata->revision); } if ((sata->valid & CTS_SATA_VALID_MODE) != 0) { fprintf(stdout, "%sATA mode: %s\n", pathstr, ata_mode2string(sata->mode)); } if ((sata->valid & CTS_SATA_VALID_ATAPI) != 0) { fprintf(stdout, "%sATAPI packet length: %d\n", pathstr, sata->atapi); } if ((sata->valid & CTS_SATA_VALID_BYTECOUNT) != 0) { fprintf(stdout, "%sPIO transaction length: %d\n", pathstr, sata->bytecount); } if ((sata->valid & CTS_SATA_VALID_PM) != 0) { fprintf(stdout, "%sPMP presence: %d\n", pathstr, sata->pm_present); } if ((sata->valid & CTS_SATA_VALID_TAGS) != 0) { fprintf(stdout, "%sNumber of tags: %d\n", pathstr, sata->tags); } if ((sata->valid & CTS_SATA_VALID_CAPS) != 0) { fprintf(stdout, "%sSATA capabilities: %08x\n", pathstr, sata->caps); } } if (cts->protocol == PROTO_ATA) { struct ccb_trans_settings_ata *ata= &cts->proto_specific.ata; if (ata->valid & CTS_ATA_VALID_TQ) { fprintf(stdout, "%stagged queueing: %s\n", pathstr, (ata->flags & CTS_ATA_FLAGS_TAG_ENB) ? "enabled" : "disabled"); } } if (cts->protocol == PROTO_SCSI) { struct ccb_trans_settings_scsi *scsi= &cts->proto_specific.scsi; if (scsi->valid & CTS_SCSI_VALID_TQ) { fprintf(stdout, "%stagged queueing: %s\n", pathstr, (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) ? "enabled" : "disabled"); } } } /* * Get a path inquiry CCB for the specified device. */ static int get_cpi(struct cam_device *device, struct ccb_pathinq *cpi) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_cpi: couldn't allocate CCB"); return(1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_pathinq) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cpi); ccb->ccb_h.func_code = XPT_PATH_INQ; if (cam_send_ccb(device, ccb) < 0) { warn("get_cpi: error sending Path Inquiry CCB"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cpi_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cpi_bailout; } bcopy(&ccb->cpi, cpi, sizeof(struct ccb_pathinq)); get_cpi_bailout: cam_freeccb(ccb); return(retval); } /* * Get a get device CCB for the specified device. */ static int get_cgd(struct cam_device *device, struct ccb_getdev *cgd) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_cgd: couldn't allocate CCB"); return(1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_pathinq) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cgd); ccb->ccb_h.func_code = XPT_GDEV_TYPE; if (cam_send_ccb(device, ccb) < 0) { warn("get_cgd: error sending Path Inquiry CCB"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cgd_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cgd_bailout; } bcopy(&ccb->cgd, cgd, sizeof(struct ccb_getdev)); get_cgd_bailout: cam_freeccb(ccb); return(retval); } /* * Returns 1 if the device has the VPD page, 0 if it does not, and -1 on an * error. */ int dev_has_vpd_page(struct cam_device *dev, uint8_t page_id, int retry_count, int timeout, int verbosemode) { union ccb *ccb = NULL; struct scsi_vpd_supported_page_list sup_pages; int i; int retval = 0; ccb = cam_getccb(dev); if (ccb == NULL) { warn("Unable to allocate CCB"); retval = -1; goto bailout; } /* cam_getccb cleans up the header, caller has to zero the payload */ - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); bzero(&sup_pages, sizeof(sup_pages)); scsi_inquiry(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)&sup_pages, /* inq_len */ sizeof(sup_pages), /* evpd */ 1, /* page_code */ SVPD_SUPPORTED_PAGE_LIST, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(dev, ccb) < 0) { cam_freeccb(ccb); ccb = NULL; retval = -1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (verbosemode != 0) cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = -1; goto bailout; } for (i = 0; i < sup_pages.length; i++) { if (sup_pages.list[i] == page_id) { retval = 1; goto bailout; } } bailout: if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * devtype is filled in with the type of device. * Returns 0 for success, non-zero for failure. */ int get_device_type(struct cam_device *dev, int retry_count, int timeout, int verbosemode, camcontrol_devtype *devtype) { struct ccb_getdev cgd; int retval = 0; retval = get_cgd(dev, &cgd); if (retval != 0) goto bailout; switch (cgd.protocol) { case PROTO_SCSI: break; case PROTO_ATA: case PROTO_ATAPI: case PROTO_SATAPM: *devtype = CC_DT_ATA; goto bailout; break; /*NOTREACHED*/ default: *devtype = CC_DT_UNKNOWN; goto bailout; break; /*NOTREACHED*/ } /* * Check for the ATA Information VPD page (0x89). If this is an * ATA device behind a SCSI to ATA translation layer, this VPD page * should be present. * * If that VPD page isn't present, or we get an error back from the * INQUIRY command, we'll just treat it as a normal SCSI device. */ retval = dev_has_vpd_page(dev, SVPD_ATA_INFORMATION, retry_count, timeout, verbosemode); if (retval == 1) *devtype = CC_DT_ATA_BEHIND_SCSI; else *devtype = CC_DT_SCSI; retval = 0; bailout: return (retval); } int build_ata_cmd(union ccb *ccb, uint32_t retry_count, uint32_t flags, uint8_t tag_action, uint8_t protocol, uint8_t ata_flags, uint16_t features, uint16_t sector_count, uint64_t lba, uint8_t command, uint32_t auxiliary, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout, int is48bit, camcontrol_devtype devtype) { int retval = 0; if (devtype == CC_DT_ATA) { cam_fill_ataio(&ccb->ataio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ timeout); if (is48bit || lba > ATA_MAX_28BIT_LBA) ata_48bit_cmd(&ccb->ataio, command, features, lba, sector_count); else ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); if (auxiliary != 0) { ccb->ataio.ata_flags |= ATA_FLAG_AUX; ccb->ataio.aux = auxiliary; } if (ata_flags & AP_FLAG_CHK_COND) ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; if ((protocol & AP_PROTO_MASK) == AP_PROTO_DMA) ccb->ataio.cmd.flags |= CAM_ATAIO_DMA; else if ((protocol & AP_PROTO_MASK) == AP_PROTO_FPDMA) ccb->ataio.cmd.flags |= CAM_ATAIO_FPDMA; } else { if (is48bit || lba > ATA_MAX_28BIT_LBA) protocol |= AP_EXTEND; retval = scsi_ata_pass(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features, /*sector_count*/ sector_count, /*lba*/ lba, /*command*/ command, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ sense_len, /*timeout*/ timeout); } return (retval); } int get_ata_status(struct cam_device *dev, union ccb *ccb, uint8_t *error, uint16_t *count, uint64_t *lba, uint8_t *device, uint8_t *status) { int retval = 0; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { uint8_t opcode; int error_code = 0, sense_key = 0, asc = 0, ascq = 0; /* * In this case, we have SCSI ATA PASS-THROUGH command, 12 * or 16 byte, and need to see what */ if (ccb->ccb_h.flags & CAM_CDB_POINTER) opcode = ccb->csio.cdb_io.cdb_ptr[0]; else opcode = ccb->csio.cdb_io.cdb_bytes[0]; if ((opcode != ATA_PASS_12) && (opcode != ATA_PASS_16)) { retval = 1; warnx("%s: unsupported opcode %02x", __func__, opcode); goto bailout; } retval = scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq); /* Note: the _ccb() variant returns 0 for an error */ if (retval == 0) { retval = 1; goto bailout; } else retval = 0; switch (error_code) { case SSD_DESC_CURRENT_ERROR: case SSD_DESC_DEFERRED_ERROR: { struct scsi_sense_data_desc *sense; struct scsi_sense_ata_ret_desc *desc; uint8_t *desc_ptr; sense = (struct scsi_sense_data_desc *) &ccb->csio.sense_data; desc_ptr = scsi_find_desc(sense, ccb->csio.sense_len - ccb->csio.sense_resid, SSD_DESC_ATA); if (desc_ptr == NULL) { cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } desc = (struct scsi_sense_ata_ret_desc *)desc_ptr; *error = desc->error; *count = (desc->count_15_8 << 8) | desc->count_7_0; *lba = ((uint64_t)desc->lba_47_40 << 40) | ((uint64_t)desc->lba_39_32 << 32) | (desc->lba_31_24 << 24) | (desc->lba_23_16 << 16) | (desc->lba_15_8 << 8) | desc->lba_7_0; *device = desc->device; *status = desc->status; /* * If the extend bit isn't set, the result is for a * 12-byte ATA PASS-THROUGH command or a 16 or 32 byte * command without the extend bit set. This means * that the device is supposed to return 28-bit * status. The count field is only 8 bits, and the * LBA field is only 8 bits. */ if ((desc->flags & SSD_DESC_ATA_FLAG_EXTEND) == 0){ *count &= 0xff; *lba &= 0x0fffffff; } break; } case SSD_CURRENT_ERROR: case SSD_DEFERRED_ERROR: { #if 0 struct scsi_sense_data_fixed *sense; #endif /* * XXX KDM need to support fixed sense data. */ warnx("%s: Fixed sense data not supported yet", __func__); retval = 1; goto bailout; break; /*NOTREACHED*/ } default: retval = 1; goto bailout; break; } break; } case XPT_ATA_IO: { struct ata_res *res; /* * In this case, we have an ATA command, and we need to * fill in the requested values from the result register * set. */ res = &ccb->ataio.res; *error = res->error; *status = res->status; *device = res->device; *count = res->sector_count; *lba = (res->lba_high << 16) | (res->lba_mid << 8) | (res->lba_low); if (res->flags & CAM_ATAIO_48BIT) { *count |= (res->sector_count_exp << 8); *lba |= (res->lba_low_exp << 24) | ((uint64_t)res->lba_mid_exp << 32) | ((uint64_t)res->lba_high_exp << 40); } else { *lba |= (res->device & 0xf) << 24; } break; } default: retval = 1; break; } bailout: return (retval); } static void cpi_print(struct ccb_pathinq *cpi) { char adapter_str[1024]; int i; snprintf(adapter_str, sizeof(adapter_str), "%s%d:", cpi->dev_name, cpi->unit_number); fprintf(stdout, "%s SIM/HBA version: %d\n", adapter_str, cpi->version_num); for (i = 1; i < 0xff; i = i << 1) { const char *str; if ((i & cpi->hba_inquiry) == 0) continue; fprintf(stdout, "%s supports ", adapter_str); switch(i) { case PI_MDP_ABLE: str = "MDP message"; break; case PI_WIDE_32: str = "32 bit wide SCSI"; break; case PI_WIDE_16: str = "16 bit wide SCSI"; break; case PI_SDTR_ABLE: str = "SDTR message"; break; case PI_LINKED_CDB: str = "linked CDBs"; break; case PI_TAG_ABLE: str = "tag queue messages"; break; case PI_SOFT_RST: str = "soft reset alternative"; break; case PI_SATAPM: str = "SATA Port Multiplier"; break; default: str = "unknown PI bit set"; break; } fprintf(stdout, "%s\n", str); } for (i = 1; i < 0xff; i = i << 1) { const char *str; if ((i & cpi->hba_misc) == 0) continue; fprintf(stdout, "%s ", adapter_str); switch(i) { case PIM_SCANHILO: str = "bus scans from high ID to low ID"; break; case PIM_NOREMOVE: str = "removable devices not included in scan"; break; case PIM_NOINITIATOR: str = "initiator role not supported"; break; case PIM_NOBUSRESET: str = "user has disabled initial BUS RESET or" " controller is in target/mixed mode"; break; case PIM_NO_6_BYTE: str = "do not send 6-byte commands"; break; case PIM_SEQSCAN: str = "scan bus sequentially"; break; default: str = "unknown PIM bit set"; break; } fprintf(stdout, "%s\n", str); } for (i = 1; i < 0xff; i = i << 1) { const char *str; if ((i & cpi->target_sprt) == 0) continue; fprintf(stdout, "%s supports ", adapter_str); switch(i) { case PIT_PROCESSOR: str = "target mode processor mode"; break; case PIT_PHASE: str = "target mode phase cog. mode"; break; case PIT_DISCONNECT: str = "disconnects in target mode"; break; case PIT_TERM_IO: str = "terminate I/O message in target mode"; break; case PIT_GRP_6: str = "group 6 commands in target mode"; break; case PIT_GRP_7: str = "group 7 commands in target mode"; break; default: str = "unknown PIT bit set"; break; } fprintf(stdout, "%s\n", str); } fprintf(stdout, "%s HBA engine count: %d\n", adapter_str, cpi->hba_eng_cnt); fprintf(stdout, "%s maximum target: %d\n", adapter_str, cpi->max_target); fprintf(stdout, "%s maximum LUN: %d\n", adapter_str, cpi->max_lun); fprintf(stdout, "%s highest path ID in subsystem: %d\n", adapter_str, cpi->hpath_id); fprintf(stdout, "%s initiator ID: %d\n", adapter_str, cpi->initiator_id); fprintf(stdout, "%s SIM vendor: %s\n", adapter_str, cpi->sim_vid); fprintf(stdout, "%s HBA vendor: %s\n", adapter_str, cpi->hba_vid); fprintf(stdout, "%s HBA vendor ID: 0x%04x\n", adapter_str, cpi->hba_vendor); fprintf(stdout, "%s HBA device ID: 0x%04x\n", adapter_str, cpi->hba_device); fprintf(stdout, "%s HBA subvendor ID: 0x%04x\n", adapter_str, cpi->hba_subvendor); fprintf(stdout, "%s HBA subdevice ID: 0x%04x\n", adapter_str, cpi->hba_subdevice); fprintf(stdout, "%s bus ID: %d\n", adapter_str, cpi->bus_id); fprintf(stdout, "%s base transfer speed: ", adapter_str); if (cpi->base_transfer_speed > 1000) fprintf(stdout, "%d.%03dMB/sec\n", cpi->base_transfer_speed / 1000, cpi->base_transfer_speed % 1000); else fprintf(stdout, "%dKB/sec\n", (cpi->base_transfer_speed % 1000) * 1000); fprintf(stdout, "%s maximum transfer size: %u bytes\n", adapter_str, cpi->maxio); } static int get_print_cts(struct cam_device *device, int user_settings, int quiet, struct ccb_trans_settings *cts) { int retval; union ccb *ccb; retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_print_cts: error allocating ccb"); return(1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_trans_settings) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cts); ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; if (user_settings == 0) ccb->cts.type = CTS_TYPE_CURRENT_SETTINGS; else ccb->cts.type = CTS_TYPE_USER_SETTINGS; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_GET_TRAN_SETTINGS CCB"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_print_cts_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_GET_TRANS_SETTINGS CCB failed"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_print_cts_bailout; } if (quiet == 0) cts_print(device, &ccb->cts); if (cts != NULL) bcopy(&ccb->cts, cts, sizeof(struct ccb_trans_settings)); get_print_cts_bailout: cam_freeccb(ccb); return(retval); } static int ratecontrol(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { int c; union ccb *ccb; int user_settings = 0; int retval = 0; int disc_enable = -1, tag_enable = -1; int mode = -1; int offset = -1; double syncrate = -1; int bus_width = -1; int quiet = 0; int change_settings = 0, send_tur = 0; struct ccb_pathinq cpi; ccb = cam_getccb(device); if (ccb == NULL) { warnx("ratecontrol: error allocating ccb"); return(1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'a': send_tur = 1; break; case 'c': user_settings = 0; break; case 'D': if (strncasecmp(optarg, "enable", 6) == 0) disc_enable = 1; else if (strncasecmp(optarg, "disable", 7) == 0) disc_enable = 0; else { warnx("-D argument \"%s\" is unknown", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'M': mode = ata_string2mode(optarg); if (mode < 0) { warnx("unknown mode '%s'", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'O': offset = strtol(optarg, NULL, 0); if (offset < 0) { warnx("offset value %d is < 0", offset); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'q': quiet++; break; case 'R': syncrate = atof(optarg); if (syncrate < 0) { warnx("sync rate %f is < 0", syncrate); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'T': if (strncasecmp(optarg, "enable", 6) == 0) tag_enable = 1; else if (strncasecmp(optarg, "disable", 7) == 0) tag_enable = 0; else { warnx("-T argument \"%s\" is unknown", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'U': user_settings = 1; break; case 'W': bus_width = strtol(optarg, NULL, 0); if (bus_width < 0) { warnx("bus width %d is < 0", bus_width); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; default: break; } } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_pathinq) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cpi); /* * Grab path inquiry information, so we can determine whether * or not the initiator is capable of the things that the user * requests. */ ccb->ccb_h.func_code = XPT_PATH_INQ; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_PATH_INQ CCB"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_PATH_INQ CCB failed"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } bcopy(&ccb->cpi, &cpi, sizeof(struct ccb_pathinq)); - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_trans_settings) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cts); if (quiet == 0) { fprintf(stdout, "%s parameters:\n", user_settings ? "User" : "Current"); } retval = get_print_cts(device, user_settings, quiet, &ccb->cts); if (retval != 0) goto ratecontrol_bailout; if (arglist & CAM_ARG_VERBOSE) cpi_print(&cpi); if (change_settings) { int didsettings = 0; struct ccb_trans_settings_spi *spi = NULL; struct ccb_trans_settings_pata *pata = NULL; struct ccb_trans_settings_sata *sata = NULL; struct ccb_trans_settings_ata *ata = NULL; struct ccb_trans_settings_scsi *scsi = NULL; if (ccb->cts.transport == XPORT_SPI) spi = &ccb->cts.xport_specific.spi; if (ccb->cts.transport == XPORT_ATA) pata = &ccb->cts.xport_specific.ata; if (ccb->cts.transport == XPORT_SATA) sata = &ccb->cts.xport_specific.sata; if (ccb->cts.protocol == PROTO_ATA) ata = &ccb->cts.proto_specific.ata; if (ccb->cts.protocol == PROTO_SCSI) scsi = &ccb->cts.proto_specific.scsi; ccb->cts.xport_specific.valid = 0; ccb->cts.proto_specific.valid = 0; if (spi && disc_enable != -1) { spi->valid |= CTS_SPI_VALID_DISC; if (disc_enable == 0) spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; else spi->flags |= CTS_SPI_FLAGS_DISC_ENB; didsettings++; } if (tag_enable != -1) { if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0) { warnx("HBA does not support tagged queueing, " "so you cannot modify tag settings"); retval = 1; goto ratecontrol_bailout; } if (ata) { ata->valid |= CTS_SCSI_VALID_TQ; if (tag_enable == 0) ata->flags &= ~CTS_ATA_FLAGS_TAG_ENB; else ata->flags |= CTS_ATA_FLAGS_TAG_ENB; didsettings++; } else if (scsi) { scsi->valid |= CTS_SCSI_VALID_TQ; if (tag_enable == 0) scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; didsettings++; } } if (spi && offset != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing offset"); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->sync_offset = offset; didsettings++; } if (spi && syncrate != -1) { int prelim_sync_period; if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_SYNC_RATE; /* * The sync rate the user gives us is in MHz. * We need to translate it into KHz for this * calculation. */ syncrate *= 1000; /* * Next, we calculate a "preliminary" sync period * in tenths of a nanosecond. */ if (syncrate == 0) prelim_sync_period = 0; else prelim_sync_period = 10000000 / syncrate; spi->sync_period = scsi_calc_syncparam(prelim_sync_period); didsettings++; } if (sata && syncrate != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } if (!user_settings) { warnx("You can modify only user rate " "settings for SATA"); retval = 1; goto ratecontrol_bailout; } sata->revision = ata_speed2revision(syncrate * 100); if (sata->revision < 0) { warnx("Invalid rate %f", syncrate); retval = 1; goto ratecontrol_bailout; } sata->valid |= CTS_SATA_VALID_REVISION; didsettings++; } if ((pata || sata) && mode != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } if (!user_settings) { warnx("You can modify only user mode " "settings for ATA/SATA"); retval = 1; goto ratecontrol_bailout; } if (pata) { pata->mode = mode; pata->valid |= CTS_ATA_VALID_MODE; } else { sata->mode = mode; sata->valid |= CTS_SATA_VALID_MODE; } didsettings++; } /* * The bus_width argument goes like this: * 0 == 8 bit * 1 == 16 bit * 2 == 32 bit * Therefore, if you shift the number of bits given on the * command line right by 4, you should get the correct * number. */ if (spi && bus_width != -1) { /* * We might as well validate things here with a * decipherable error message, rather than what * will probably be an indecipherable error message * by the time it gets back to us. */ if ((bus_width == 16) && ((cpi.hba_inquiry & PI_WIDE_16) == 0)) { warnx("HBA does not support 16 bit bus width"); retval = 1; goto ratecontrol_bailout; } else if ((bus_width == 32) && ((cpi.hba_inquiry & PI_WIDE_32) == 0)) { warnx("HBA does not support 32 bit bus width"); retval = 1; goto ratecontrol_bailout; } else if ((bus_width != 8) && (bus_width != 16) && (bus_width != 32)) { warnx("Invalid bus width %d", bus_width); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_BUS_WIDTH; spi->bus_width = bus_width >> 4; didsettings++; } if (didsettings == 0) { goto ratecontrol_bailout; } ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_SET_TRAN_SETTINGS CCB"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_SET_TRANS_SETTINGS CCB failed"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } } if (send_tur) { retval = testunitready(device, retry_count, timeout, (arglist & CAM_ARG_VERBOSE) ? 0 : 1); /* * If the TUR didn't succeed, just bail. */ if (retval != 0) { if (quiet == 0) fprintf(stderr, "Test Unit Ready failed\n"); goto ratecontrol_bailout; } } if ((change_settings || send_tur) && !quiet && (ccb->cts.transport == XPORT_ATA || ccb->cts.transport == XPORT_SATA || send_tur)) { fprintf(stdout, "New parameters:\n"); retval = get_print_cts(device, user_settings, 0, NULL); } ratecontrol_bailout: cam_freeccb(ccb); return(retval); } static int scsiformat(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int c; int ycount = 0, quiet = 0; int error = 0, retval = 0; int use_timeout = 10800 * 1000; int immediate = 1; struct format_defect_list_header fh; u_int8_t *data_ptr = NULL; u_int32_t dxfer_len = 0; u_int8_t byte2 = 0; int num_warnings = 0; int reportonly = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsiformat: error allocating ccb"); return(1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'q': quiet++; break; case 'r': reportonly = 1; break; case 'w': immediate = 0; break; case 'y': ycount++; break; } } if (reportonly) goto doreport; if (quiet == 0) { fprintf(stdout, "You are about to REMOVE ALL DATA from the " "following device:\n"); error = scsidoinquiry(device, argc, argv, combinedopt, retry_count, timeout); if (error != 0) { warnx("scsiformat: error sending inquiry"); goto scsiformat_bailout; } } if (ycount == 0) { if (!get_confirmation()) { error = 1; goto scsiformat_bailout; } } if (timeout != 0) use_timeout = timeout; if (quiet == 0) { fprintf(stdout, "Current format timeout is %d seconds\n", use_timeout / 1000); } /* * If the user hasn't disabled questions and didn't specify a * timeout on the command line, ask them if they want the current * timeout. */ if ((ycount == 0) && (timeout == 0)) { char str[1024]; int new_timeout = 0; fprintf(stdout, "Enter new timeout in seconds or press\n" "return to keep the current timeout [%d] ", use_timeout / 1000); if (fgets(str, sizeof(str), stdin) != NULL) { if (str[0] != '\0') new_timeout = atoi(str); } if (new_timeout != 0) { use_timeout = new_timeout * 1000; fprintf(stdout, "Using new timeout value %d\n", use_timeout / 1000); } } /* * Keep this outside the if block below to silence any unused * variable warnings. */ bzero(&fh, sizeof(fh)); /* * If we're in immediate mode, we've got to include the format * header */ if (immediate != 0) { fh.byte2 = FU_DLH_IMMED; data_ptr = (u_int8_t *)&fh; dxfer_len = sizeof(fh); byte2 = FU_FMT_DATA; } else if (quiet == 0) { fprintf(stdout, "Formatting..."); fflush(stdout); } scsi_format_unit(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* byte2 */ byte2, /* ileave */ 0, /* data_ptr */ data_ptr, /* dxfer_len */ dxfer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ use_timeout); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((immediate == 0) && ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP))) { const char errstr[] = "error sending format command"; if (retval < 0) warn(errstr); else warnx(errstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsiformat_bailout; } /* * If we ran in non-immediate mode, we already checked for errors * above and printed out any necessary information. If we're in * immediate mode, we need to loop through and get status * information periodically. */ if (immediate == 0) { if (quiet == 0) { fprintf(stdout, "Format Complete\n"); } goto scsiformat_bailout; } doreport: do { cam_status status; - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); /* * There's really no need to do error recovery or * retries here, since we're just going to sit in a * loop and wait for the device to finish formatting. */ scsi_test_unit_ready(&ccb->csio, /* retries */ 0, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; retval = cam_send_ccb(device, ccb); /* * If we get an error from the ioctl, bail out. SCSI * errors are expected. */ if (retval < 0) { warn("error sending CAMIOCOMMAND ioctl"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsiformat_bailout; } status = ccb->ccb_h.status & CAM_STATUS_MASK; if ((status != CAM_REQ_CMP) && (status == CAM_SCSI_STATUS_ERROR) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI-2 and SCSI-3 specs, a * drive that is in the middle of a format should * return NOT READY with an ASC of "logical unit * not ready, format in progress". The sense key * specific bytes will then be a progress indicator. */ if ((sense_key == SSD_KEY_NOT_READY) && (asc == 0x04) && (ascq == 0x04)) { uint8_t sks[3]; if ((scsi_get_sks(sense, ccb->csio.sense_len - ccb->csio.sense_resid, sks) == 0) && (quiet == 0)) { int val; u_int64_t percentage; val = scsi_2btoul(&sks[1]); percentage = 10000 * val; fprintf(stdout, "\rFormatting: %ju.%02u %% " "(%d/%d) done", (uintmax_t)(percentage / (0x10000 * 100)), (unsigned)((percentage / 0x10000) % 100), val, 0x10000); fflush(stdout); } else if ((quiet == 0) && (++num_warnings <= 1)) { warnx("Unexpected SCSI Sense Key " "Specific value returned " "during format:"); scsi_sense_print(device, &ccb->csio, stderr); warnx("Unable to print status " "information, but format will " "proceed."); warnx("will exit when format is " "complete"); } sleep(1); } else { warnx("Unexpected SCSI error during format"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsiformat_bailout; } } else if (status != CAM_REQ_CMP) { warnx("Unexpected CAM status %#x", status); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsiformat_bailout; } } while((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP); if (quiet == 0) fprintf(stdout, "\nFormat Complete\n"); scsiformat_bailout: cam_freeccb(ccb); return(error); } static int scsisanitize(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; u_int8_t action = 0; int c; int ycount = 0, quiet = 0; int error = 0, retval = 0; int use_timeout = 10800 * 1000; int immediate = 1; int invert = 0; int passes = 0; int ause = 0; int fd = -1; const char *pattern = NULL; u_int8_t *data_ptr = NULL; u_int32_t dxfer_len = 0; u_int8_t byte2 = 0; int num_warnings = 0; int reportonly = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsisanitize: error allocating ccb"); return(1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'a': if (strcasecmp(optarg, "overwrite") == 0) action = SSZ_SERVICE_ACTION_OVERWRITE; else if (strcasecmp(optarg, "block") == 0) action = SSZ_SERVICE_ACTION_BLOCK_ERASE; else if (strcasecmp(optarg, "crypto") == 0) action = SSZ_SERVICE_ACTION_CRYPTO_ERASE; else if (strcasecmp(optarg, "exitfailure") == 0) action = SSZ_SERVICE_ACTION_EXIT_MODE_FAILURE; else { warnx("invalid service operation \"%s\"", optarg); error = 1; goto scsisanitize_bailout; } break; case 'c': passes = strtol(optarg, NULL, 0); if (passes < 1 || passes > 31) { warnx("invalid passes value %d", passes); error = 1; goto scsisanitize_bailout; } break; case 'I': invert = 1; break; case 'P': pattern = optarg; break; case 'q': quiet++; break; case 'U': ause = 1; break; case 'r': reportonly = 1; break; case 'w': immediate = 0; break; case 'y': ycount++; break; } } if (reportonly) goto doreport; if (action == 0) { warnx("an action is required"); error = 1; goto scsisanitize_bailout; } else if (action == SSZ_SERVICE_ACTION_OVERWRITE) { struct scsi_sanitize_parameter_list *pl; struct stat sb; ssize_t sz, amt; if (pattern == NULL) { warnx("overwrite action requires -P argument"); error = 1; goto scsisanitize_bailout; } fd = open(pattern, O_RDONLY); if (fd < 0) { warn("cannot open pattern file %s", pattern); error = 1; goto scsisanitize_bailout; } if (fstat(fd, &sb) < 0) { warn("cannot stat pattern file %s", pattern); error = 1; goto scsisanitize_bailout; } sz = sb.st_size; if (sz > SSZPL_MAX_PATTERN_LENGTH) { warnx("pattern file size exceeds maximum value %d", SSZPL_MAX_PATTERN_LENGTH); error = 1; goto scsisanitize_bailout; } dxfer_len = sizeof(*pl) + sz; data_ptr = calloc(1, dxfer_len); if (data_ptr == NULL) { warnx("cannot allocate parameter list buffer"); error = 1; goto scsisanitize_bailout; } amt = read(fd, data_ptr + sizeof(*pl), sz); if (amt < 0) { warn("cannot read pattern file"); error = 1; goto scsisanitize_bailout; } else if (amt != sz) { warnx("short pattern file read"); error = 1; goto scsisanitize_bailout; } pl = (struct scsi_sanitize_parameter_list *)data_ptr; if (passes == 0) pl->byte1 = 1; else pl->byte1 = passes; if (invert != 0) pl->byte1 |= SSZPL_INVERT; scsi_ulto2b(sz, pl->length); } else { const char *arg; if (passes != 0) arg = "-c"; else if (invert != 0) arg = "-I"; else if (pattern != NULL) arg = "-P"; else arg = NULL; if (arg != NULL) { warnx("%s argument only valid with overwrite " "operation", arg); error = 1; goto scsisanitize_bailout; } } if (quiet == 0) { fprintf(stdout, "You are about to REMOVE ALL DATA from the " "following device:\n"); error = scsidoinquiry(device, argc, argv, combinedopt, retry_count, timeout); if (error != 0) { warnx("scsisanitize: error sending inquiry"); goto scsisanitize_bailout; } } if (ycount == 0) { if (!get_confirmation()) { error = 1; goto scsisanitize_bailout; } } if (timeout != 0) use_timeout = timeout; if (quiet == 0) { fprintf(stdout, "Current sanitize timeout is %d seconds\n", use_timeout / 1000); } /* * If the user hasn't disabled questions and didn't specify a * timeout on the command line, ask them if they want the current * timeout. */ if ((ycount == 0) && (timeout == 0)) { char str[1024]; int new_timeout = 0; fprintf(stdout, "Enter new timeout in seconds or press\n" "return to keep the current timeout [%d] ", use_timeout / 1000); if (fgets(str, sizeof(str), stdin) != NULL) { if (str[0] != '\0') new_timeout = atoi(str); } if (new_timeout != 0) { use_timeout = new_timeout * 1000; fprintf(stdout, "Using new timeout value %d\n", use_timeout / 1000); } } byte2 = action; if (ause != 0) byte2 |= SSZ_UNRESTRICTED_EXIT; if (immediate != 0) byte2 |= SSZ_IMMED; scsi_sanitize(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* byte2 */ byte2, /* control */ 0, /* data_ptr */ data_ptr, /* dxfer_len */ dxfer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ use_timeout); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending sanitize command"); error = 1; goto scsisanitize_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) { sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); if (sense_key == SSD_KEY_ILLEGAL_REQUEST && asc == 0x20 && ascq == 0x00) warnx("sanitize is not supported by " "this device"); else warnx("error sanitizing this device"); } else warnx("error sanitizing this device"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsisanitize_bailout; } /* * If we ran in non-immediate mode, we already checked for errors * above and printed out any necessary information. If we're in * immediate mode, we need to loop through and get status * information periodically. */ if (immediate == 0) { if (quiet == 0) { fprintf(stdout, "Sanitize Complete\n"); } goto scsisanitize_bailout; } doreport: do { cam_status status; - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); /* * There's really no need to do error recovery or * retries here, since we're just going to sit in a * loop and wait for the device to finish sanitizing. */ scsi_test_unit_ready(&ccb->csio, /* retries */ 0, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; retval = cam_send_ccb(device, ccb); /* * If we get an error from the ioctl, bail out. SCSI * errors are expected. */ if (retval < 0) { warn("error sending CAMIOCOMMAND ioctl"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsisanitize_bailout; } status = ccb->ccb_h.status & CAM_STATUS_MASK; if ((status != CAM_REQ_CMP) && (status == CAM_SCSI_STATUS_ERROR) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI-3 spec, a drive that is in the * middle of a sanitize should return NOT READY with an * ASC of "logical unit not ready, sanitize in * progress". The sense key specific bytes will then * be a progress indicator. */ if ((sense_key == SSD_KEY_NOT_READY) && (asc == 0x04) && (ascq == 0x1b)) { uint8_t sks[3]; if ((scsi_get_sks(sense, ccb->csio.sense_len - ccb->csio.sense_resid, sks) == 0) && (quiet == 0)) { int val; u_int64_t percentage; val = scsi_2btoul(&sks[1]); percentage = 10000 * val; fprintf(stdout, "\rSanitizing: %ju.%02u %% " "(%d/%d) done", (uintmax_t)(percentage / (0x10000 * 100)), (unsigned)((percentage / 0x10000) % 100), val, 0x10000); fflush(stdout); } else if ((quiet == 0) && (++num_warnings <= 1)) { warnx("Unexpected SCSI Sense Key " "Specific value returned " "during sanitize:"); scsi_sense_print(device, &ccb->csio, stderr); warnx("Unable to print status " "information, but sanitze will " "proceed."); warnx("will exit when sanitize is " "complete"); } sleep(1); } else { warnx("Unexpected SCSI error during sanitize"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsisanitize_bailout; } } else if (status != CAM_REQ_CMP) { warnx("Unexpected CAM status %#x", status); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsisanitize_bailout; } } while((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP); if (quiet == 0) fprintf(stdout, "\nSanitize Complete\n"); scsisanitize_bailout: if (fd >= 0) close(fd); if (data_ptr != NULL) free(data_ptr); cam_freeccb(ccb); return(error); } static int scsireportluns(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int c, countonly, lunsonly; struct scsi_report_luns_data *lundata; int alloc_len; uint8_t report_type; uint32_t list_len, i, j; int retval; retval = 0; lundata = NULL; report_type = RPL_REPORT_DEFAULT; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); countonly = 0; lunsonly = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'c': countonly++; break; case 'l': lunsonly++; break; case 'r': if (strcasecmp(optarg, "default") == 0) report_type = RPL_REPORT_DEFAULT; else if (strcasecmp(optarg, "wellknown") == 0) report_type = RPL_REPORT_WELLKNOWN; else if (strcasecmp(optarg, "all") == 0) report_type = RPL_REPORT_ALL; else { warnx("%s: invalid report type \"%s\"", __func__, optarg); retval = 1; goto bailout; } break; default: break; } } if ((countonly != 0) && (lunsonly != 0)) { warnx("%s: you can only specify one of -c or -l", __func__); retval = 1; goto bailout; } /* * According to SPC-4, the allocation length must be at least 16 * bytes -- enough for the header and one LUN. */ alloc_len = sizeof(*lundata) + 8; retry: lundata = malloc(alloc_len); if (lundata == NULL) { warn("%s: error mallocing %d bytes", __func__, alloc_len); retval = 1; goto bailout; } scsi_report_luns(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*select_report*/ report_type, /*rpl_buf*/ lundata, /*alloc_len*/ alloc_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending REPORT LUNS command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } list_len = scsi_4btoul(lundata->length); /* * If we need to list the LUNs, and our allocation * length was too short, reallocate and retry. */ if ((countonly == 0) && (list_len > (alloc_len - sizeof(*lundata)))) { alloc_len = list_len + sizeof(*lundata); free(lundata); goto retry; } if (lunsonly == 0) fprintf(stdout, "%u LUN%s found\n", list_len / 8, ((list_len / 8) > 1) ? "s" : ""); if (countonly != 0) goto bailout; for (i = 0; i < (list_len / 8); i++) { int no_more; no_more = 0; for (j = 0; j < sizeof(lundata->luns[i].lundata); j += 2) { if (j != 0) fprintf(stdout, ","); switch (lundata->luns[i].lundata[j] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: if ((lundata->luns[i].lundata[j] & RPL_LUNDATA_PERIPH_BUS_MASK) != 0) fprintf(stdout, "%d:", lundata->luns[i].lundata[j] & RPL_LUNDATA_PERIPH_BUS_MASK); else if ((j == 0) && ((lundata->luns[i].lundata[j+2] & RPL_LUNDATA_PERIPH_BUS_MASK) == 0)) no_more = 1; fprintf(stdout, "%d", lundata->luns[i].lundata[j+1]); break; case RPL_LUNDATA_ATYP_FLAT: { uint8_t tmplun[2]; tmplun[0] = lundata->luns[i].lundata[j] & RPL_LUNDATA_FLAT_LUN_MASK; tmplun[1] = lundata->luns[i].lundata[j+1]; fprintf(stdout, "%d", scsi_2btoul(tmplun)); no_more = 1; break; } case RPL_LUNDATA_ATYP_LUN: fprintf(stdout, "%d:%d:%d", (lundata->luns[i].lundata[j+1] & RPL_LUNDATA_LUN_BUS_MASK) >> 5, lundata->luns[i].lundata[j] & RPL_LUNDATA_LUN_TARG_MASK, lundata->luns[i].lundata[j+1] & RPL_LUNDATA_LUN_LUN_MASK); break; case RPL_LUNDATA_ATYP_EXTLUN: { int field_len_code, eam_code; eam_code = lundata->luns[i].lundata[j] & RPL_LUNDATA_EXT_EAM_MASK; field_len_code = (lundata->luns[i].lundata[j] & RPL_LUNDATA_EXT_LEN_MASK) >> 4; if ((eam_code == RPL_LUNDATA_EXT_EAM_WK) && (field_len_code == 0x00)) { fprintf(stdout, "%d", lundata->luns[i].lundata[j+1]); } else if ((eam_code == RPL_LUNDATA_EXT_EAM_NOT_SPEC) && (field_len_code == 0x03)) { uint8_t tmp_lun[8]; /* * This format takes up all 8 bytes. * If we aren't starting at offset 0, * that's a bug. */ if (j != 0) { fprintf(stdout, "Invalid " "offset %d for " "Extended LUN not " "specified format", j); no_more = 1; break; } bzero(tmp_lun, sizeof(tmp_lun)); bcopy(&lundata->luns[i].lundata[j+1], &tmp_lun[1], sizeof(tmp_lun) - 1); fprintf(stdout, "%#jx", (intmax_t)scsi_8btou64(tmp_lun)); no_more = 1; } else { fprintf(stderr, "Unknown Extended LUN" "Address method %#x, length " "code %#x", eam_code, field_len_code); no_more = 1; } break; } default: fprintf(stderr, "Unknown LUN address method " "%#x\n", lundata->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK); break; } /* * For the flat addressing method, there are no * other levels after it. */ if (no_more != 0) break; } fprintf(stdout, "\n"); } bailout: cam_freeccb(ccb); free(lundata); return (retval); } static int scsireadcapacity(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int blocksizeonly, humanize, numblocks, quiet, sizeonly, baseten; struct scsi_read_capacity_data rcap; struct scsi_read_capacity_data_long rcaplong; uint64_t maxsector; uint32_t block_len; int retval; int c; blocksizeonly = 0; humanize = 0; numblocks = 0; quiet = 0; sizeonly = 0; baseten = 0; retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': blocksizeonly++; break; case 'h': humanize++; baseten = 0; break; case 'H': humanize++; baseten++; break; case 'N': numblocks++; break; case 'q': quiet++; break; case 's': sizeonly++; break; default: break; } } if ((blocksizeonly != 0) && (numblocks != 0)) { warnx("%s: you can only specify one of -b or -N", __func__); retval = 1; goto bailout; } if ((blocksizeonly != 0) && (sizeonly != 0)) { warnx("%s: you can only specify one of -b or -s", __func__); retval = 1; goto bailout; } if ((humanize != 0) && (quiet != 0)) { warnx("%s: you can only specify one of -h/-H or -q", __func__); retval = 1; goto bailout; } if ((humanize != 0) && (blocksizeonly != 0)) { warnx("%s: you can only specify one of -h/-H or -b", __func__); retval = 1; goto bailout; } scsi_read_capacity(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, &rcap, SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending READ CAPACITY command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } maxsector = scsi_4btoul(rcap.addr); block_len = scsi_4btoul(rcap.length); /* * A last block of 2^32-1 means that the true capacity is over 2TB, * and we need to issue the long READ CAPACITY to get the real * capacity. Otherwise, we're all set. */ if (maxsector != 0xffffffff) goto do_print; scsi_read_capacity_16(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladdr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)&rcaplong, /*rcap_buf_len*/ sizeof(rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending READ CAPACITY (16) command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } maxsector = scsi_8btou64(rcaplong.addr); block_len = scsi_4btoul(rcaplong.length); do_print: if (blocksizeonly == 0) { /* * Humanize implies !quiet, and also implies numblocks. */ if (humanize != 0) { char tmpstr[6]; int64_t tmpbytes; int ret; tmpbytes = (maxsector + 1) * block_len; ret = humanize_number(tmpstr, sizeof(tmpstr), tmpbytes, "", HN_AUTOSCALE, HN_B | HN_DECIMAL | ((baseten != 0) ? HN_DIVISOR_1000 : 0)); if (ret == -1) { warnx("%s: humanize_number failed!", __func__); retval = 1; goto bailout; } fprintf(stdout, "Device Size: %s%s", tmpstr, (sizeonly == 0) ? ", " : "\n"); } else if (numblocks != 0) { fprintf(stdout, "%s%ju%s", (quiet == 0) ? "Blocks: " : "", (uintmax_t)maxsector + 1, (sizeonly == 0) ? ", " : "\n"); } else { fprintf(stdout, "%s%ju%s", (quiet == 0) ? "Last Block: " : "", (uintmax_t)maxsector, (sizeonly == 0) ? ", " : "\n"); } } if (sizeonly == 0) fprintf(stdout, "%s%u%s\n", (quiet == 0) ? "Block Length: " : "", block_len, (quiet == 0) ? " bytes" : ""); bailout: cam_freeccb(ccb); return (retval); } static int smpcmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c, error = 0; union ccb *ccb; uint8_t *smp_request = NULL, *smp_response = NULL; int request_size = 0, response_size = 0; int fd_request = 0, fd_response = 0; char *datastr = NULL; struct get_hook hook; int retval; int flags = 0; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->smpio); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'R': arglist |= CAM_ARG_CMD_IN; response_size = strtol(optarg, NULL, 0); if (response_size <= 0) { warnx("invalid number of response bytes %d", response_size); error = 1; goto smpcmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; optind++; datastr = cget(&hook, NULL); /* * If the user supplied "-" instead of a format, he * wants the data to be written to stdout. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_response = 1; smp_response = (u_int8_t *)malloc(response_size); if (smp_response == NULL) { warn("can't malloc memory for SMP response"); error = 1; goto smpcmd_bailout; } break; case 'r': arglist |= CAM_ARG_CMD_OUT; request_size = strtol(optarg, NULL, 0); if (request_size <= 0) { warnx("invalid number of request bytes %d", request_size); error = 1; goto smpcmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; datastr = cget(&hook, NULL); smp_request = (u_int8_t *)malloc(request_size); if (smp_request == NULL) { warn("can't malloc memory for SMP request"); error = 1; goto smpcmd_bailout; } bzero(smp_request, request_size); /* * If the user supplied "-" instead of a format, he * wants the data to be read from stdin. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_request = 1; else buff_encode_visit(smp_request, request_size, datastr, iget, &hook); optind += hook.got; break; default: break; } } /* * If fd_data is set, and we're writing to the device, we need to * read the data the user wants written from stdin. */ if ((fd_request == 1) && (arglist & CAM_ARG_CMD_OUT)) { ssize_t amt_read; int amt_to_read = request_size; u_int8_t *buf_ptr = smp_request; for (amt_read = 0; amt_to_read > 0; amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) { if (amt_read == -1) { warn("error reading data from stdin"); error = 1; goto smpcmd_bailout; } amt_to_read -= amt_read; buf_ptr += amt_read; } } if (((arglist & CAM_ARG_CMD_IN) == 0) || ((arglist & CAM_ARG_CMD_OUT) == 0)) { warnx("%s: need both the request (-r) and response (-R) " "arguments", __func__); error = 1; goto smpcmd_bailout; } flags |= CAM_DEV_QFRZDIS; cam_fill_smpio(&ccb->smpio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*smp_request*/ smp_request, /*smp_request_len*/ request_size, /*smp_response*/ smp_response, /*smp_response_len*/ response_size, /*timeout*/ timeout ? timeout : 5000); ccb->smpio.flags = SMP_FLAG_NONE; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (response_size > 0)) { if (fd_response == 0) { buff_decode_visit(smp_response, response_size, datastr, arg_put, NULL); fprintf(stdout, "\n"); } else { ssize_t amt_written; int amt_to_write = response_size; u_int8_t *buf_ptr = smp_response; for (amt_written = 0; (amt_to_write > 0) && (amt_written = write(STDOUT_FILENO, buf_ptr, amt_to_write)) > 0;){ amt_to_write -= amt_written; buf_ptr += amt_written; } if (amt_written == -1) { warn("error writing data to stdout"); error = 1; goto smpcmd_bailout; } else if ((amt_written == 0) && (amt_to_write > 0)) { warnx("only wrote %u bytes out of %u", response_size - amt_to_write, response_size); } } } smpcmd_bailout: if (ccb != NULL) cam_freeccb(ccb); if (smp_request != NULL) free(smp_request); if (smp_response != NULL) free(smp_response); return (error); } static int smpreportgeneral(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_report_general_request *request = NULL; struct smp_report_general_response *response = NULL; struct sbuf *sb = NULL; int error = 0; int c, long_response = 0; int retval; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->smpio); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; default: break; } } request = malloc(sizeof(*request)); if (request == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); error = 1; goto bailout; } response = malloc(sizeof(*response)); if (response == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*response)); error = 1; goto bailout; } try_long: smp_report_general(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, request, /*request_len*/ sizeof(*request), (uint8_t *)response, /*response_len*/ sizeof(*response), /*long_response*/ long_response, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto bailout; } /* * If the device supports the long response bit, try again and see * if we can get all of the data. */ if ((response->long_response & SMP_RG_LONG_RESPONSE) && (long_response == 0)) { ccb->ccb_h.status = CAM_REQ_INPROG; - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->smpio); long_response = 1; goto try_long; } /* * XXX KDM detect and decode SMP errors here. */ sb = sbuf_new_auto(); if (sb == NULL) { warnx("%s: error allocating sbuf", __func__); goto bailout; } smp_report_general_sbuf(response, sizeof(*response), sb); if (sbuf_finish(sb) != 0) { warnx("%s: sbuf_finish", __func__); goto bailout; } printf("%s", sbuf_data(sb)); bailout: if (ccb != NULL) cam_freeccb(ccb); if (request != NULL) free(request); if (response != NULL) free(response); if (sb != NULL) sbuf_delete(sb); return (error); } static struct camcontrol_opts phy_ops[] = { {"nop", SMP_PC_PHY_OP_NOP, CAM_ARG_NONE, NULL}, {"linkreset", SMP_PC_PHY_OP_LINK_RESET, CAM_ARG_NONE, NULL}, {"hardreset", SMP_PC_PHY_OP_HARD_RESET, CAM_ARG_NONE, NULL}, {"disable", SMP_PC_PHY_OP_DISABLE, CAM_ARG_NONE, NULL}, {"clearerrlog", SMP_PC_PHY_OP_CLEAR_ERR_LOG, CAM_ARG_NONE, NULL}, {"clearaffiliation", SMP_PC_PHY_OP_CLEAR_AFFILIATON, CAM_ARG_NONE,NULL}, {"sataportsel", SMP_PC_PHY_OP_TRANS_SATA_PSS, CAM_ARG_NONE, NULL}, {"clearitnl", SMP_PC_PHY_OP_CLEAR_STP_ITN_LS, CAM_ARG_NONE, NULL}, {"setdevname", SMP_PC_PHY_OP_SET_ATT_DEV_NAME, CAM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; static int smpphycontrol(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_phy_control_request *request = NULL; struct smp_phy_control_response *response = NULL; int long_response = 0; int retval = 0; int phy = -1; uint32_t phy_operation = SMP_PC_PHY_OP_NOP; int phy_op_set = 0; uint64_t attached_dev_name = 0; int dev_name_set = 0; uint32_t min_plr = 0, max_plr = 0; uint32_t pp_timeout_val = 0; int slumber_partial = 0; int set_pp_timeout_val = 0; int c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->smpio); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': case 'A': case 's': case 'S': { int enable = -1; if (strcasecmp(optarg, "enable") == 0) enable = 1; else if (strcasecmp(optarg, "disable") == 0) enable = 2; else { warnx("%s: Invalid argument %s", __func__, optarg); retval = 1; goto bailout; } switch (c) { case 's': slumber_partial |= enable << SMP_PC_SAS_SLUMBER_SHIFT; break; case 'S': slumber_partial |= enable << SMP_PC_SAS_PARTIAL_SHIFT; break; case 'a': slumber_partial |= enable << SMP_PC_SATA_SLUMBER_SHIFT; break; case 'A': slumber_partial |= enable << SMP_PC_SATA_PARTIAL_SHIFT; break; default: warnx("%s: programmer error", __func__); retval = 1; goto bailout; break; /*NOTREACHED*/ } break; } case 'd': attached_dev_name = (uintmax_t)strtoumax(optarg, NULL,0); dev_name_set = 1; break; case 'l': long_response = 1; break; case 'm': /* * We don't do extensive checking here, so this * will continue to work when new speeds come out. */ min_plr = strtoul(optarg, NULL, 0); if ((min_plr == 0) || (min_plr > 0xf)) { warnx("%s: invalid link rate %x", __func__, min_plr); retval = 1; goto bailout; } break; case 'M': /* * We don't do extensive checking here, so this * will continue to work when new speeds come out. */ max_plr = strtoul(optarg, NULL, 0); if ((max_plr == 0) || (max_plr > 0xf)) { warnx("%s: invalid link rate %x", __func__, max_plr); retval = 1; goto bailout; } break; case 'o': { camcontrol_optret optreturn; cam_argmask argnums; const char *subopt; if (phy_op_set != 0) { warnx("%s: only one phy operation argument " "(-o) allowed", __func__); retval = 1; goto bailout; } phy_op_set = 1; /* * Allow the user to specify the phy operation * numerically, as well as with a name. This will * future-proof it a bit, so options that are added * in future specs can be used. */ if (isdigit(optarg[0])) { phy_operation = strtoul(optarg, NULL, 0); if ((phy_operation == 0) || (phy_operation > 0xff)) { warnx("%s: invalid phy operation %#x", __func__, phy_operation); retval = 1; goto bailout; } break; } optreturn = getoption(phy_ops, optarg, &phy_operation, &argnums, &subopt); if (optreturn == CC_OR_AMBIGUOUS) { warnx("%s: ambiguous option %s", __func__, optarg); usage(0); retval = 1; goto bailout; } else if (optreturn == CC_OR_NOT_FOUND) { warnx("%s: option %s not found", __func__, optarg); usage(0); retval = 1; goto bailout; } break; } case 'p': phy = atoi(optarg); break; case 'T': pp_timeout_val = strtoul(optarg, NULL, 0); if (pp_timeout_val > 15) { warnx("%s: invalid partial pathway timeout " "value %u, need a value less than 16", __func__, pp_timeout_val); retval = 1; goto bailout; } set_pp_timeout_val = 1; break; default: break; } } if (phy == -1) { warnx("%s: a PHY (-p phy) argument is required",__func__); retval = 1; goto bailout; } if (((dev_name_set != 0) && (phy_operation != SMP_PC_PHY_OP_SET_ATT_DEV_NAME)) || ((phy_operation == SMP_PC_PHY_OP_SET_ATT_DEV_NAME) && (dev_name_set == 0))) { warnx("%s: -d name and -o setdevname arguments both " "required to set device name", __func__); retval = 1; goto bailout; } request = malloc(sizeof(*request)); if (request == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); retval = 1; goto bailout; } response = malloc(sizeof(*response)); if (response == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); retval = 1; goto bailout; } smp_phy_control(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, request, sizeof(*request), (uint8_t *)response, sizeof(*response), long_response, /*expected_exp_change_count*/ 0, phy, phy_operation, (set_pp_timeout_val != 0) ? 1 : 0, attached_dev_name, min_plr, max_plr, slumber_partial, pp_timeout_val, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { /* * Use CAM_EPF_NORMAL so we only get one line of * SMP command decoding. */ cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_NORMAL, stderr); } retval = 1; goto bailout; } /* XXX KDM print out something here for success? */ bailout: if (ccb != NULL) cam_freeccb(ccb); if (request != NULL) free(request); if (response != NULL) free(response); return (retval); } static int smpmaninfo(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_report_manuf_info_request request; struct smp_report_manuf_info_response response; struct sbuf *sb = NULL; int long_response = 0; int retval = 0; int c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->smpio); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; default: break; } } bzero(&request, sizeof(request)); bzero(&response, sizeof(response)); smp_report_manuf_info(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, &request, sizeof(request), (uint8_t *)&response, sizeof(response), long_response, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } sb = sbuf_new_auto(); if (sb == NULL) { warnx("%s: error allocating sbuf", __func__); goto bailout; } smp_report_manuf_info_sbuf(&response, sizeof(response), sb); if (sbuf_finish(sb) != 0) { warnx("%s: sbuf_finish", __func__); goto bailout; } printf("%s", sbuf_data(sb)); bailout: if (ccb != NULL) cam_freeccb(ccb); if (sb != NULL) sbuf_delete(sb); return (retval); } static int getdevid(struct cam_devitem *item) { int retval = 0; union ccb *ccb = NULL; struct cam_device *dev; dev = cam_open_btl(item->dev_match.path_id, item->dev_match.target_id, item->dev_match.target_lun, O_RDWR, NULL); if (dev == NULL) { warnx("%s", cam_errbuf); retval = 1; goto bailout; } item->device_id_len = 0; ccb = cam_getccb(dev); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); retval = 1; goto bailout; } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cdai); /* * On the first try, we just probe for the size of the data, and * then allocate that much memory and try again. */ retry: ccb->ccb_h.func_code = XPT_DEV_ADVINFO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->cdai.flags = CDAI_FLAG_NONE; ccb->cdai.buftype = CDAI_TYPE_SCSI_DEVID; ccb->cdai.bufsiz = item->device_id_len; if (item->device_id_len != 0) ccb->cdai.buf = (uint8_t *)item->device_id; if (cam_send_ccb(dev, ccb) < 0) { warn("%s: error sending XPT_GDEV_ADVINFO CCB", __func__); retval = 1; goto bailout; } if (ccb->ccb_h.status != CAM_REQ_CMP) { warnx("%s: CAM status %#x", __func__, ccb->ccb_h.status); retval = 1; goto bailout; } if (item->device_id_len == 0) { /* * This is our first time through. Allocate the buffer, * and then go back to get the data. */ if (ccb->cdai.provsiz == 0) { warnx("%s: invalid .provsiz field returned with " "XPT_GDEV_ADVINFO CCB", __func__); retval = 1; goto bailout; } item->device_id_len = ccb->cdai.provsiz; item->device_id = malloc(item->device_id_len); if (item->device_id == NULL) { warn("%s: unable to allocate %d bytes", __func__, item->device_id_len); retval = 1; goto bailout; } ccb->ccb_h.status = CAM_REQ_INPROG; goto retry; } bailout: if (dev != NULL) cam_close_device(dev); if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * XXX KDM merge this code with getdevtree()? */ static int buildbusdevlist(struct cam_devlist *devlist) { union ccb ccb; int bufsize, fd = -1; struct dev_match_pattern *patterns; struct cam_devitem *item = NULL; int skip_device = 0; int retval = 0; if ((fd = open(XPT_DEVICE, O_RDWR)) == -1) { warn("couldn't open %s", XPT_DEVICE); return(1); } bzero(&ccb, sizeof(union ccb)); ccb.ccb_h.path_id = CAM_XPT_PATH_ID; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.ccb_h.func_code = XPT_DEV_MATCH; bufsize = sizeof(struct dev_match_result) * 100; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = (struct dev_match_result *)malloc(bufsize); if (ccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); close(fd); return(1); } ccb.cdm.num_matches = 0; ccb.cdm.num_patterns = 2; ccb.cdm.pattern_buf_len = sizeof(struct dev_match_pattern) * ccb.cdm.num_patterns; patterns = (struct dev_match_pattern *)malloc(ccb.cdm.pattern_buf_len); if (patterns == NULL) { warnx("can't malloc memory for patterns"); retval = 1; goto bailout; } ccb.cdm.patterns = patterns; bzero(patterns, ccb.cdm.pattern_buf_len); patterns[0].type = DEV_MATCH_DEVICE; patterns[0].pattern.device_pattern.flags = DEV_MATCH_PATH; patterns[0].pattern.device_pattern.path_id = devlist->path_id; patterns[1].type = DEV_MATCH_PERIPH; patterns[1].pattern.periph_pattern.flags = PERIPH_MATCH_PATH; patterns[1].pattern.periph_pattern.path_id = devlist->path_id; /* * We do the ioctl multiple times if necessary, in case there are * more than 100 nodes in the EDT. */ do { unsigned int i; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("error sending CAMIOCOMMAND ioctl"); retval = 1; goto bailout; } if ((ccb.ccb_h.status != CAM_REQ_CMP) || ((ccb.cdm.status != CAM_DEV_MATCH_LAST) && (ccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", ccb.ccb_h.status, ccb.cdm.status); retval = 1; goto bailout; } for (i = 0; i < ccb.cdm.num_matches; i++) { switch (ccb.cdm.matches[i].type) { case DEV_MATCH_DEVICE: { struct device_match_result *dev_result; dev_result = &ccb.cdm.matches[i].result.device_result; if (dev_result->flags & DEV_RESULT_UNCONFIGURED) { skip_device = 1; break; } else skip_device = 0; item = malloc(sizeof(*item)); if (item == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*item)); retval = 1; goto bailout; } bzero(item, sizeof(*item)); bcopy(dev_result, &item->dev_match, sizeof(*dev_result)); STAILQ_INSERT_TAIL(&devlist->dev_queue, item, links); if (getdevid(item) != 0) { retval = 1; goto bailout; } break; } case DEV_MATCH_PERIPH: { struct periph_match_result *periph_result; periph_result = &ccb.cdm.matches[i].result.periph_result; if (skip_device != 0) break; item->num_periphs++; item->periph_matches = realloc( item->periph_matches, item->num_periphs * sizeof(struct periph_match_result)); if (item->periph_matches == NULL) { warn("%s: error allocating periph " "list", __func__); retval = 1; goto bailout; } bcopy(periph_result, &item->periph_matches[ item->num_periphs - 1], sizeof(*periph_result)); break; } default: fprintf(stderr, "%s: unexpected match " "type %d\n", __func__, ccb.cdm.matches[i].type); retval = 1; goto bailout; break; /*NOTREACHED*/ } } } while ((ccb.ccb_h.status == CAM_REQ_CMP) && (ccb.cdm.status == CAM_DEV_MATCH_MORE)); bailout: if (fd != -1) close(fd); free(patterns); free(ccb.cdm.matches); if (retval != 0) freebusdevlist(devlist); return (retval); } static void freebusdevlist(struct cam_devlist *devlist) { struct cam_devitem *item, *item2; STAILQ_FOREACH_SAFE(item, &devlist->dev_queue, links, item2) { STAILQ_REMOVE(&devlist->dev_queue, item, cam_devitem, links); free(item->device_id); free(item->periph_matches); free(item); } } static struct cam_devitem * findsasdevice(struct cam_devlist *devlist, uint64_t sasaddr) { struct cam_devitem *item; STAILQ_FOREACH(item, &devlist->dev_queue, links) { struct scsi_vpd_id_descriptor *idd; /* * XXX KDM look for LUN IDs as well? */ idd = scsi_get_devid(item->device_id, item->device_id_len, scsi_devid_is_sas_target); if (idd == NULL) continue; if (scsi_8btou64(idd->identifier) == sasaddr) return (item); } return (NULL); } static int smpphylist(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { struct smp_report_general_request *rgrequest = NULL; struct smp_report_general_response *rgresponse = NULL; struct smp_discover_request *disrequest = NULL; struct smp_discover_response *disresponse = NULL; struct cam_devlist devlist; union ccb *ccb; int long_response = 0; int num_phys = 0; int quiet = 0; int retval; int i, c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->smpio); STAILQ_INIT(&devlist.dev_queue); rgrequest = malloc(sizeof(*rgrequest)); if (rgrequest == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*rgrequest)); retval = 1; goto bailout; } rgresponse = malloc(sizeof(*rgresponse)); if (rgresponse == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*rgresponse)); retval = 1; goto bailout; } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; case 'q': quiet = 1; break; default: break; } } smp_report_general(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, rgrequest, /*request_len*/ sizeof(*rgrequest), (uint8_t *)rgresponse, /*response_len*/ sizeof(*rgresponse), /*long_response*/ long_response, timeout); ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } num_phys = rgresponse->num_phys; if (num_phys == 0) { if (quiet == 0) fprintf(stdout, "%s: No Phys reported\n", __func__); retval = 1; goto bailout; } devlist.path_id = device->path_id; retval = buildbusdevlist(&devlist); if (retval != 0) goto bailout; if (quiet == 0) { fprintf(stdout, "%d PHYs:\n", num_phys); fprintf(stdout, "PHY Attached SAS Address\n"); } disrequest = malloc(sizeof(*disrequest)); if (disrequest == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*disrequest)); retval = 1; goto bailout; } disresponse = malloc(sizeof(*disresponse)); if (disresponse == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*disresponse)); retval = 1; goto bailout; } for (i = 0; i < num_phys; i++) { struct cam_devitem *item; struct device_match_result *dev_match; char vendor[16], product[48], revision[16]; char tmpstr[256]; int j; - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->smpio); ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; smp_discover(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, disrequest, sizeof(*disrequest), (uint8_t *)disresponse, sizeof(*disresponse), long_response, /*ignore_zone_group*/ 0, /*phy*/ i, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || (((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) && (disresponse->function_result != SMP_FR_PHY_VACANT))) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } if (disresponse->function_result == SMP_FR_PHY_VACANT) { if (quiet == 0) fprintf(stdout, "%3d \n", i); continue; } if (disresponse->attached_device == SMP_DIS_AD_TYPE_NONE) { item = NULL; } else { item = findsasdevice(&devlist, scsi_8btou64(disresponse->attached_sas_address)); } if ((quiet == 0) || (item != NULL)) { fprintf(stdout, "%3d 0x%016jx", i, (uintmax_t)scsi_8btou64( disresponse->attached_sas_address)); if (item == NULL) { fprintf(stdout, "\n"); continue; } } else if (quiet != 0) continue; dev_match = &item->dev_match; if (dev_match->protocol == PROTO_SCSI) { cam_strvis(vendor, dev_match->inq_data.vendor, sizeof(dev_match->inq_data.vendor), sizeof(vendor)); cam_strvis(product, dev_match->inq_data.product, sizeof(dev_match->inq_data.product), sizeof(product)); cam_strvis(revision, dev_match->inq_data.revision, sizeof(dev_match->inq_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s %s>", vendor, product, revision); } else if ((dev_match->protocol == PROTO_ATA) || (dev_match->protocol == PROTO_SATAPM)) { cam_strvis(product, dev_match->ident_data.model, sizeof(dev_match->ident_data.model), sizeof(product)); cam_strvis(revision, dev_match->ident_data.revision, sizeof(dev_match->ident_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s>", product, revision); } else { sprintf(tmpstr, "<>"); } fprintf(stdout, " %-33s ", tmpstr); /* * If we have 0 periphs, that's a bug... */ if (item->num_periphs == 0) { fprintf(stdout, "\n"); continue; } fprintf(stdout, "("); for (j = 0; j < item->num_periphs; j++) { if (j > 0) fprintf(stdout, ","); fprintf(stdout, "%s%d", item->periph_matches[j].periph_name, item->periph_matches[j].unit_number); } fprintf(stdout, ")\n"); } bailout: if (ccb != NULL) cam_freeccb(ccb); free(rgrequest); free(rgresponse); free(disrequest); free(disresponse); freebusdevlist(&devlist); return (retval); } static int atapm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int retval = 0; int t = -1; int c; u_char cmd, sc; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 't': t = atoi(optarg); break; default: break; } } if (strcmp(argv[1], "idle") == 0) { if (t == -1) cmd = ATA_IDLE_IMMEDIATE; else cmd = ATA_IDLE_CMD; } else if (strcmp(argv[1], "standby") == 0) { if (t == -1) cmd = ATA_STANDBY_IMMEDIATE; else cmd = ATA_STANDBY_CMD; } else { cmd = ATA_SLEEP; t = -1; } if (t < 0) sc = 0; else if (t <= (240 * 5)) sc = (t + 4) / 5; else if (t <= (252 * 5)) /* special encoding for 21 minutes */ sc = 252; else if (t <= (11 * 30 * 60)) sc = (t - 1) / (30 * 60) + 241; else sc = 253; retval = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/0, /*lba*/0, /*sector_count*/sc, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/1); cam_freeccb(ccb); return (retval); } static int ataaxm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int retval = 0; int l = -1; int c; u_char cmd, sc; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': l = atoi(optarg); break; default: break; } } sc = 0; if (strcmp(argv[1], "apm") == 0) { if (l == -1) cmd = 0x85; else { cmd = 0x05; sc = l; } } else /* aam */ { if (l == -1) cmd = 0xC2; else { cmd = 0x42; sc = l; } } retval = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SETFEATURES, /*features*/cmd, /*lba*/0, /*sector_count*/sc, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/1); cam_freeccb(ccb); return (retval); } int scsigetopcodes(struct cam_device *device, int opcode_set, int opcode, int show_sa_errors, int sa_set, int service_action, int timeout_desc, int retry_count, int timeout, int verbosemode, uint32_t *fill_len, uint8_t **data_ptr) { union ccb *ccb = NULL; uint8_t *buf = NULL; uint32_t alloc_len = 0, num_opcodes; uint32_t valid_len = 0; uint32_t avail_len = 0; struct scsi_report_supported_opcodes_all *all_hdr; struct scsi_report_supported_opcodes_one *one; int options = 0; int retval = 0; /* * Make it clear that we haven't yet allocated or filled anything. */ *fill_len = 0; *data_ptr = NULL; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); retval = 1; goto bailout; } /* cam_getccb cleans up the header, caller has to zero the payload */ - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); if (opcode_set != 0) { options |= RSO_OPTIONS_OC; num_opcodes = 1; alloc_len = sizeof(*one) + CAM_MAX_CDBLEN; } else { num_opcodes = 256; alloc_len = sizeof(*all_hdr) + (num_opcodes * sizeof(struct scsi_report_supported_opcodes_descr)); } if (timeout_desc != 0) { options |= RSO_RCTD; alloc_len += num_opcodes * sizeof(struct scsi_report_supported_opcodes_timeout); } if (sa_set != 0) { options |= RSO_OPTIONS_OC_SA; if (show_sa_errors != 0) options &= ~RSO_OPTIONS_OC; } retry_alloc: if (buf != NULL) { free(buf); buf = NULL; } buf = malloc(alloc_len); if (buf == NULL) { warn("Unable to allocate %u bytes", alloc_len); retval = 1; goto bailout; } bzero(buf, alloc_len); scsi_report_supported_opcodes(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*options*/ options, /*req_opcode*/ opcode, /*req_service_action*/ service_action, /*data_ptr*/ buf, /*dxfer_len*/ alloc_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 10000); ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { perror("error sending REPORT SUPPORTED OPERATION CODES"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (verbosemode != 0) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } valid_len = ccb->csio.dxfer_len - ccb->csio.resid; if (((options & RSO_OPTIONS_MASK) == RSO_OPTIONS_ALL) && (valid_len >= sizeof(*all_hdr))) { all_hdr = (struct scsi_report_supported_opcodes_all *)buf; avail_len = scsi_4btoul(all_hdr->length) + sizeof(*all_hdr); } else if (((options & RSO_OPTIONS_MASK) != RSO_OPTIONS_ALL) && (valid_len >= sizeof(*one))) { uint32_t cdb_length; one = (struct scsi_report_supported_opcodes_one *)buf; cdb_length = scsi_2btoul(one->cdb_length); avail_len = sizeof(*one) + cdb_length; if (one->support & RSO_ONE_CTDP) { struct scsi_report_supported_opcodes_timeout *td; td = (struct scsi_report_supported_opcodes_timeout *) &buf[avail_len]; if (valid_len >= (avail_len + sizeof(td->length))) { avail_len += scsi_2btoul(td->length) + sizeof(td->length); } else { avail_len += sizeof(*td); } } } /* * avail_len could be zero if we didn't get enough data back from * thet target to determine */ if ((avail_len != 0) && (avail_len > valid_len)) { alloc_len = avail_len; goto retry_alloc; } *fill_len = valid_len; *data_ptr = buf; bailout: if (retval != 0) free(buf); cam_freeccb(ccb); return (retval); } static int scsiprintoneopcode(struct cam_device *device, int req_opcode, int sa_set, int req_sa, uint8_t *buf, uint32_t valid_len) { struct scsi_report_supported_opcodes_one *one; struct scsi_report_supported_opcodes_timeout *td; uint32_t cdb_len = 0, td_len = 0; const char *op_desc = NULL; unsigned int i; int retval = 0; one = (struct scsi_report_supported_opcodes_one *)buf; /* * If we don't have the full single opcode descriptor, no point in * continuing. */ if (valid_len < __offsetof(struct scsi_report_supported_opcodes_one, cdb_length)) { warnx("Only %u bytes returned, not enough to verify support", valid_len); retval = 1; goto bailout; } op_desc = scsi_op_desc(req_opcode, &device->inq_data); printf("%s (0x%02x)", op_desc != NULL ? op_desc : "UNKNOWN", req_opcode); if (sa_set != 0) printf(", SA 0x%x", req_sa); printf(": "); switch (one->support & RSO_ONE_SUP_MASK) { case RSO_ONE_SUP_UNAVAIL: printf("No command support information currently available\n"); break; case RSO_ONE_SUP_NOT_SUP: printf("Command not supported\n"); retval = 1; goto bailout; break; /*NOTREACHED*/ case RSO_ONE_SUP_AVAIL: printf("Command is supported, complies with a SCSI standard\n"); break; case RSO_ONE_SUP_VENDOR: printf("Command is supported, vendor-specific " "implementation\n"); break; default: printf("Unknown command support flags 0x%#x\n", one->support & RSO_ONE_SUP_MASK); break; } /* * If we don't have the CDB length, it isn't exactly an error, the * command probably isn't supported. */ if (valid_len < __offsetof(struct scsi_report_supported_opcodes_one, cdb_usage)) goto bailout; cdb_len = scsi_2btoul(one->cdb_length); /* * If our valid data doesn't include the full reported length, * return. The caller should have detected this and adjusted his * allocation length to get all of the available data. */ if (valid_len < sizeof(*one) + cdb_len) { retval = 1; goto bailout; } /* * If all we have is the opcode, there is no point in printing out * the usage bitmap. */ if (cdb_len <= 1) { retval = 1; goto bailout; } printf("CDB usage bitmap:"); for (i = 0; i < cdb_len; i++) { printf(" %02x", one->cdb_usage[i]); } printf("\n"); /* * If we don't have a timeout descriptor, we're done. */ if ((one->support & RSO_ONE_CTDP) == 0) goto bailout; /* * If we don't have enough valid length to include the timeout * descriptor length, we're done. */ if (valid_len < (sizeof(*one) + cdb_len + sizeof(td->length))) goto bailout; td = (struct scsi_report_supported_opcodes_timeout *) &buf[sizeof(*one) + cdb_len]; td_len = scsi_2btoul(td->length); td_len += sizeof(td->length); /* * If we don't have the full timeout descriptor, we're done. */ if (td_len < sizeof(*td)) goto bailout; /* * If we don't have enough valid length to contain the full timeout * descriptor, we're done. */ if (valid_len < (sizeof(*one) + cdb_len + td_len)) goto bailout; printf("Timeout information:\n"); printf("Command-specific: 0x%02x\n", td->cmd_specific); printf("Nominal timeout: %u seconds\n", scsi_4btoul(td->nominal_time)); printf("Recommended timeout: %u seconds\n", scsi_4btoul(td->recommended_time)); bailout: return (retval); } static int scsiprintopcodes(struct cam_device *device, int td_req, uint8_t *buf, uint32_t valid_len) { struct scsi_report_supported_opcodes_all *hdr; struct scsi_report_supported_opcodes_descr *desc; uint32_t avail_len = 0, used_len = 0; uint8_t *cur_ptr; int retval = 0; if (valid_len < sizeof(*hdr)) { warnx("%s: not enough returned data (%u bytes) opcode list", __func__, valid_len); retval = 1; goto bailout; } hdr = (struct scsi_report_supported_opcodes_all *)buf; avail_len = scsi_4btoul(hdr->length); avail_len += sizeof(hdr->length); /* * Take the lesser of the amount of data the drive claims is * available, and the amount of data the HBA says was returned. */ avail_len = MIN(avail_len, valid_len); used_len = sizeof(hdr->length); printf("%-6s %4s %8s ", "Opcode", "SA", "CDB len" ); if (td_req != 0) printf("%5s %6s %6s ", "CS", "Nom", "Rec"); printf(" Description\n"); while ((avail_len - used_len) > sizeof(*desc)) { struct scsi_report_supported_opcodes_timeout *td; uint32_t td_len; const char *op_desc = NULL; cur_ptr = &buf[used_len]; desc = (struct scsi_report_supported_opcodes_descr *)cur_ptr; op_desc = scsi_op_desc(desc->opcode, &device->inq_data); if (op_desc == NULL) op_desc = "UNKNOWN"; printf("0x%02x %#4x %8u ", desc->opcode, scsi_2btoul(desc->service_action), scsi_2btoul(desc->cdb_length)); used_len += sizeof(*desc); if ((desc->flags & RSO_CTDP) == 0) { printf(" %s\n", op_desc); continue; } /* * If we don't have enough space to fit a timeout * descriptor, then we're done. */ if (avail_len - used_len < sizeof(*td)) { used_len = avail_len; printf(" %s\n", op_desc); continue; } cur_ptr = &buf[used_len]; td = (struct scsi_report_supported_opcodes_timeout *)cur_ptr; td_len = scsi_2btoul(td->length); td_len += sizeof(td->length); used_len += td_len; /* * If the given timeout descriptor length is less than what * we understand, skip it. */ if (td_len < sizeof(*td)) { printf(" %s\n", op_desc); continue; } printf(" 0x%02x %6u %6u %s\n", td->cmd_specific, scsi_4btoul(td->nominal_time), scsi_4btoul(td->recommended_time), op_desc); } bailout: return (retval); } static int scsiopcodes(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout, int verbosemode) { int c; uint32_t opcode = 0, service_action = 0; int td_set = 0, opcode_set = 0, sa_set = 0; int show_sa_errors = 1; uint32_t valid_len = 0; uint8_t *buf = NULL; char *endptr; int retval = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'N': show_sa_errors = 0; break; case 'o': opcode = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { warnx("Invalid opcode \"%s\", must be a number", optarg); retval = 1; goto bailout; } if (opcode > 0xff) { warnx("Invalid opcode 0x%#x, must be between" "0 and 0xff inclusive", opcode); retval = 1; goto bailout; } opcode_set = 1; break; case 's': service_action = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { warnx("Invalid service action \"%s\", must " "be a number", optarg); retval = 1; goto bailout; } if (service_action > 0xffff) { warnx("Invalid service action 0x%#x, must " "be between 0 and 0xffff inclusive", service_action); retval = 1; } sa_set = 1; break; case 'T': td_set = 1; break; default: break; } } if ((sa_set != 0) && (opcode_set == 0)) { warnx("You must specify an opcode with -o if a service " "action is given"); retval = 1; goto bailout; } retval = scsigetopcodes(device, opcode_set, opcode, show_sa_errors, sa_set, service_action, td_set, retry_count, timeout, verbosemode, &valid_len, &buf); if (retval != 0) goto bailout; if ((opcode_set != 0) || (sa_set != 0)) { retval = scsiprintoneopcode(device, opcode, sa_set, service_action, buf, valid_len); } else { retval = scsiprintopcodes(device, td_set, buf, valid_len); } bailout: free(buf); return (retval); } #endif /* MINIMALISTIC */ static int scsireprobe(struct cam_device *device) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); ccb->ccb_h.func_code = XPT_REPROBE_LUN; if (cam_send_ccb(device, ccb) < 0) { warn("error sending XPT_REPROBE_LUN CCB"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } bailout: cam_freeccb(ccb); return (retval); } void usage(int printlong) { fprintf(printlong ? stdout : stderr, "usage: camcontrol [device id][generic args][command args]\n" " camcontrol devlist [-b] [-v]\n" #ifndef MINIMALISTIC " camcontrol periphlist [dev_id][-n dev_name] [-u unit]\n" " camcontrol tur [dev_id][generic args]\n" " camcontrol inquiry [dev_id][generic args] [-D] [-S] [-R]\n" " camcontrol identify [dev_id][generic args] [-v]\n" " camcontrol reportluns [dev_id][generic args] [-c] [-l] [-r report]\n" " camcontrol readcap [dev_id][generic args] [-b] [-h] [-H] [-N]\n" " [-q] [-s]\n" " camcontrol start [dev_id][generic args]\n" " camcontrol stop [dev_id][generic args]\n" " camcontrol load [dev_id][generic args]\n" " camcontrol eject [dev_id][generic args]\n" " camcontrol reprobe [dev_id][generic args]\n" #endif /* MINIMALISTIC */ " camcontrol rescan \n" " camcontrol reset \n" #ifndef MINIMALISTIC " camcontrol defects [dev_id][generic args] <-f format> [-P][-G]\n" " [-q][-s][-S offset][-X]\n" " camcontrol modepage [dev_id][generic args] <-m page | -l>\n" " [-P pagectl][-e | -b][-d]\n" " camcontrol cmd [dev_id][generic args]\n" " <-a cmd [args] | -c cmd [args]>\n" " [-d] [-f] [-i len fmt|-o len fmt [args]] [-r fmt]\n" " camcontrol smpcmd [dev_id][generic args]\n" " <-r len fmt [args]> <-R len fmt [args]>\n" " camcontrol smprg [dev_id][generic args][-l]\n" " camcontrol smppc [dev_id][generic args] <-p phy> [-l]\n" " [-o operation][-d name][-m rate][-M rate]\n" " [-T pp_timeout][-a enable|disable]\n" " [-A enable|disable][-s enable|disable]\n" " [-S enable|disable]\n" " camcontrol smpphylist [dev_id][generic args][-l][-q]\n" " camcontrol smpmaninfo [dev_id][generic args][-l]\n" " camcontrol debug [-I][-P][-T][-S][-X][-c]\n" " \n" " camcontrol tags [dev_id][generic args] [-N tags] [-q] [-v]\n" " camcontrol negotiate [dev_id][generic args] [-a][-c]\n" " [-D ][-M mode][-O offset]\n" " [-q][-R syncrate][-v][-T ]\n" " [-U][-W bus_width]\n" " camcontrol format [dev_id][generic args][-q][-r][-w][-y]\n" " camcontrol sanitize [dev_id][generic args]\n" " [-a overwrite|block|crypto|exitfailure]\n" " [-c passes][-I][-P pattern][-q][-U][-r][-w]\n" " [-y]\n" " camcontrol idle [dev_id][generic args][-t time]\n" " camcontrol standby [dev_id][generic args][-t time]\n" " camcontrol sleep [dev_id][generic args]\n" " camcontrol apm [dev_id][generic args][-l level]\n" " camcontrol aam [dev_id][generic args][-l level]\n" " camcontrol fwdownload [dev_id][generic args] <-f fw_image> [-q]\n" " [-s][-y]\n" " camcontrol security [dev_id][generic args]\n" " <-d pwd | -e pwd | -f | -h pwd | -k pwd>\n" " [-l ] [-q] [-s pwd] [-T timeout]\n" " [-U ] [-y]\n" " camcontrol hpa [dev_id][generic args] [-f] [-l] [-P] [-p pwd]\n" " [-q] [-s max_sectors] [-U pwd] [-y]\n" " camcontrol persist [dev_id][generic args] <-i action|-o action>\n" " [-a][-I tid][-k key][-K sa_key][-p][-R rtp]\n" " [-s scope][-S][-T type][-U]\n" " camcontrol attrib [dev_id][generic args] <-r action|-w attr>\n" " [-a attr_num][-c][-e elem][-F form1,form1]\n" " [-p part][-s start][-T type][-V vol]\n" " camcontrol opcodes [dev_id][generic args][-o opcode][-s SA]\n" " [-N][-T]\n" " camcontrol zone [dev_id][generic args]<-c cmd> [-a] [-l LBA]\n" " [-o rep_opts] [-P print_opts]\n" " camcontrol epc [dev_id][generic_args]<-c cmd> [-d] [-D] [-e]\n" " [-H] [-p power_cond] [-P] [-r rst_src] [-s]\n" " [-S power_src] [-T timer]\n" #endif /* MINIMALISTIC */ " camcontrol help\n"); if (!printlong) return; #ifndef MINIMALISTIC fprintf(stdout, "Specify one of the following options:\n" "devlist list all CAM devices\n" "periphlist list all CAM peripheral drivers attached to a device\n" "tur send a test unit ready to the named device\n" "inquiry send a SCSI inquiry command to the named device\n" "identify send a ATA identify command to the named device\n" "reportluns send a SCSI report luns command to the device\n" "readcap send a SCSI read capacity command to the device\n" "start send a Start Unit command to the device\n" "stop send a Stop Unit command to the device\n" "load send a Start Unit command to the device with the load bit set\n" "eject send a Stop Unit command to the device with the eject bit set\n" "reprobe update capacity information of the given device\n" "rescan rescan all busses, the given bus, or bus:target:lun\n" "reset reset all busses, the given bus, or bus:target:lun\n" "defects read the defect list of the specified device\n" "modepage display or edit (-e) the given mode page\n" "cmd send the given SCSI command, may need -i or -o as well\n" "smpcmd send the given SMP command, requires -o and -i\n" "smprg send the SMP Report General command\n" "smppc send the SMP PHY Control command, requires -p\n" "smpphylist display phys attached to a SAS expander\n" "smpmaninfo send the SMP Report Manufacturer Info command\n" "debug turn debugging on/off for a bus, target, or lun, or all devices\n" "tags report or set the number of transaction slots for a device\n" "negotiate report or set device negotiation parameters\n" "format send the SCSI FORMAT UNIT command to the named device\n" "sanitize send the SCSI SANITIZE command to the named device\n" "idle send the ATA IDLE command to the named device\n" "standby send the ATA STANDBY command to the named device\n" "sleep send the ATA SLEEP command to the named device\n" "fwdownload program firmware of the named device with the given image\n" "security report or send ATA security commands to the named device\n" "persist send the SCSI PERSISTENT RESERVE IN or OUT commands\n" "attrib send the SCSI READ or WRITE ATTRIBUTE commands\n" "opcodes send the SCSI REPORT SUPPORTED OPCODES command\n" "zone manage Zoned Block (Shingled) devices\n" "epc send ATA Extended Power Conditions commands\n" "help this message\n" "Device Identifiers:\n" "bus:target specify the bus and target, lun defaults to 0\n" "bus:target:lun specify the bus, target and lun\n" "deviceUNIT specify the device name, like \"da4\" or \"cd2\"\n" "Generic arguments:\n" "-v be verbose, print out sense information\n" "-t timeout command timeout in seconds, overrides default timeout\n" "-n dev_name specify device name, e.g. \"da\", \"cd\"\n" "-u unit specify unit number, e.g. \"0\", \"5\"\n" "-E have the kernel attempt to perform SCSI error recovery\n" "-C count specify the SCSI command retry count (needs -E to work)\n" "modepage arguments:\n" "-l list all available mode pages\n" "-m page specify the mode page to view or edit\n" "-e edit the specified mode page\n" "-b force view to binary mode\n" "-d disable block descriptors for mode sense\n" "-P pgctl page control field 0-3\n" "defects arguments:\n" "-f format specify defect list format (block, bfi or phys)\n" "-G get the grown defect list\n" "-P get the permanent defect list\n" "inquiry arguments:\n" "-D get the standard inquiry data\n" "-S get the serial number\n" "-R get the transfer rate, etc.\n" "reportluns arguments:\n" "-c only report a count of available LUNs\n" "-l only print out luns, and not a count\n" "-r specify \"default\", \"wellknown\" or \"all\"\n" "readcap arguments\n" "-b only report the blocksize\n" "-h human readable device size, base 2\n" "-H human readable device size, base 10\n" "-N print the number of blocks instead of last block\n" "-q quiet, print numbers only\n" "-s only report the last block/device size\n" "cmd arguments:\n" "-c cdb [args] specify the SCSI CDB\n" "-i len fmt specify input data and input data format\n" "-o len fmt [args] specify output data and output data fmt\n" "smpcmd arguments:\n" "-r len fmt [args] specify the SMP command to be sent\n" "-R len fmt [args] specify SMP response format\n" "smprg arguments:\n" "-l specify the long response format\n" "smppc arguments:\n" "-p phy specify the PHY to operate on\n" "-l specify the long request/response format\n" "-o operation specify the phy control operation\n" "-d name set the attached device name\n" "-m rate set the minimum physical link rate\n" "-M rate set the maximum physical link rate\n" "-T pp_timeout set the partial pathway timeout value\n" "-a enable|disable enable or disable SATA slumber\n" "-A enable|disable enable or disable SATA partial phy power\n" "-s enable|disable enable or disable SAS slumber\n" "-S enable|disable enable or disable SAS partial phy power\n" "smpphylist arguments:\n" "-l specify the long response format\n" "-q only print phys with attached devices\n" "smpmaninfo arguments:\n" "-l specify the long response format\n" "debug arguments:\n" "-I CAM_DEBUG_INFO -- scsi commands, errors, data\n" "-T CAM_DEBUG_TRACE -- routine flow tracking\n" "-S CAM_DEBUG_SUBTRACE -- internal routine command flow\n" "-c CAM_DEBUG_CDB -- print out SCSI CDBs only\n" "tags arguments:\n" "-N tags specify the number of tags to use for this device\n" "-q be quiet, don't report the number of tags\n" "-v report a number of tag-related parameters\n" "negotiate arguments:\n" "-a send a test unit ready after negotiation\n" "-c report/set current negotiation settings\n" "-D \"enable\" or \"disable\" disconnection\n" "-M mode set ATA mode\n" "-O offset set command delay offset\n" "-q be quiet, don't report anything\n" "-R syncrate synchronization rate in MHz\n" "-T \"enable\" or \"disable\" tagged queueing\n" "-U report/set user negotiation settings\n" "-W bus_width set the bus width in bits (8, 16 or 32)\n" "-v also print a Path Inquiry CCB for the controller\n" "format arguments:\n" "-q be quiet, don't print status messages\n" "-r run in report only mode\n" "-w don't send immediate format command\n" "-y don't ask any questions\n" "sanitize arguments:\n" "-a operation operation mode: overwrite, block, crypto or exitfailure\n" "-c passes overwrite passes to perform (1 to 31)\n" "-I invert overwrite pattern after each pass\n" "-P pattern path to overwrite pattern file\n" "-q be quiet, don't print status messages\n" "-r run in report only mode\n" "-U run operation in unrestricted completion exit mode\n" "-w don't send immediate sanitize command\n" "-y don't ask any questions\n" "idle/standby arguments:\n" "-t number of seconds before respective state.\n" "fwdownload arguments:\n" "-f fw_image path to firmware image file\n" "-q don't print informational messages, only errors\n" "-s run in simulation mode\n" "-v print info for every firmware segment sent to device\n" "-y don't ask any questions\n" "security arguments:\n" "-d pwd disable security using the given password for the selected\n" " user\n" "-e pwd erase the device using the given pwd for the selected user\n" "-f freeze the security configuration of the specified device\n" "-h pwd enhanced erase the device using the given pwd for the\n" " selected user\n" "-k pwd unlock the device using the given pwd for the selected\n" " user\n" "-l specifies which security level to set: high or maximum\n" "-q be quiet, do not print any status messages\n" "-s pwd password the device (enable security) using the given\n" " pwd for the selected user\n" "-T timeout overrides the timeout (seconds) used for erase operation\n" "-U specifies which user to set: user or master\n" "-y don't ask any questions\n" "hpa arguments:\n" "-f freeze the HPA configuration of the device\n" "-l lock the HPA configuration of the device\n" "-P make the HPA max sectors persist\n" "-p pwd Set the HPA configuration password required for unlock\n" " calls\n" "-q be quiet, do not print any status messages\n" "-s sectors configures the maximum user accessible sectors of the\n" " device\n" "-U pwd unlock the HPA configuration of the device\n" "-y don't ask any questions\n" "persist arguments:\n" "-i action specify read_keys, read_reservation, report_cap, or\n" " read_full_status\n" "-o action specify register, register_ignore, reserve, release,\n" " clear, preempt, preempt_abort, register_move, replace_lost\n" "-a set the All Target Ports (ALL_TG_PT) bit\n" "-I tid specify a Transport ID, e.g.: sas,0x1234567812345678\n" "-k key specify the Reservation Key\n" "-K sa_key specify the Service Action Reservation Key\n" "-p set the Activate Persist Through Power Loss bit\n" "-R rtp specify the Relative Target Port\n" "-s scope specify the scope: lun, extent, element or a number\n" "-S specify Transport ID for register, requires -I\n" "-T res_type specify the reservation type: read_shared, wr_ex, rd_ex,\n" " ex_ac, wr_ex_ro, ex_ac_ro, wr_ex_ar, ex_ac_ar\n" "-U unregister the current initiator for register_move\n" "attrib arguments:\n" "-r action specify attr_values, attr_list, lv_list, part_list, or\n" " supp_attr\n" "-w attr specify an attribute to write, one -w argument per attr\n" "-a attr_num only display this attribute number\n" "-c get cached attributes\n" "-e elem_addr request attributes for the given element in a changer\n" "-F form1,form2 output format, comma separated list: text_esc, text_raw,\n" " nonascii_esc, nonascii_trim, nonascii_raw, field_all,\n" " field_none, field_desc, field_num, field_size, field_rw\n" "-p partition request attributes for the given partition\n" "-s start_attr request attributes starting at the given number\n" "-T elem_type specify the element type (used with -e)\n" "-V logical_vol specify the logical volume ID\n" "opcodes arguments:\n" "-o opcode specify the individual opcode to list\n" "-s service_action specify the service action for the opcode\n" "-N do not return SCSI error for unsupported SA\n" "-T request nominal and recommended timeout values\n" "zone arguments:\n" "-c cmd required: rz, open, close, finish, or rwp\n" "-a apply the action to all zones\n" "-l LBA specify the zone starting LBA\n" "-o rep_opts report zones options: all, empty, imp_open, exp_open,\n" " closed, full, ro, offline, reset, nonseq, nonwp\n" "-P print_opt report zones printing: normal, summary, script\n" "epc arguments:\n" "-c cmd required: restore, goto, timer, state, enable, disable,\n" " source, status, list\n" "-d disable power mode (timer, state)\n" "-D delayed entry (goto)\n" "-e enable power mode (timer, state)\n" "-H hold power mode (goto)\n" "-p power_cond Idle_a, Idle_b, Idle_c, Standby_y, Standby_z (timer,\n" " state, goto)\n" "-P only display power mode (status)\n" "-r rst_src restore settings from: default, saved (restore)\n" "-s save mode (timer, state, restore)\n" "-S power_src set power source: battery, nonbattery (source)\n" "-T timer set timer, seconds, .1 sec resolution (timer)\n" ); #endif /* MINIMALISTIC */ } int main(int argc, char **argv) { int c; char *device = NULL; int unit = 0; struct cam_device *cam_dev = NULL; int timeout = 0, retry_count = 1; camcontrol_optret optreturn; char *tstr; const char *mainopt = "C:En:t:u:v"; const char *subopt = NULL; char combinedopt[256]; int error = 0, optstart = 2; int devopen = 1; #ifndef MINIMALISTIC path_id_t bus; target_id_t target; lun_id_t lun; #endif /* MINIMALISTIC */ cmdlist = CAM_CMD_NONE; arglist = CAM_ARG_NONE; if (argc < 2) { usage(0); exit(1); } /* * Get the base option. */ optreturn = getoption(option_table,argv[1], &cmdlist, &arglist,&subopt); if (optreturn == CC_OR_AMBIGUOUS) { warnx("ambiguous option %s", argv[1]); usage(0); exit(1); } else if (optreturn == CC_OR_NOT_FOUND) { warnx("option %s not found", argv[1]); usage(0); exit(1); } /* * Ahh, getopt(3) is a pain. * * This is a gross hack. There really aren't many other good * options (excuse the pun) for parsing options in a situation like * this. getopt is kinda braindead, so you end up having to run * through the options twice, and give each invocation of getopt * the option string for the other invocation. * * You would think that you could just have two groups of options. * The first group would get parsed by the first invocation of * getopt, and the second group would get parsed by the second * invocation of getopt. It doesn't quite work out that way. When * the first invocation of getopt finishes, it leaves optind pointing * to the argument _after_ the first argument in the second group. * So when the second invocation of getopt comes around, it doesn't * recognize the first argument it gets and then bails out. * * A nice alternative would be to have a flag for getopt that says * "just keep parsing arguments even when you encounter an unknown * argument", but there isn't one. So there's no real clean way to * easily parse two sets of arguments without having one invocation * of getopt know about the other. * * Without this hack, the first invocation of getopt would work as * long as the generic arguments are first, but the second invocation * (in the subfunction) would fail in one of two ways. In the case * where you don't set optreset, it would fail because optind may be * pointing to the argument after the one it should be pointing at. * In the case where you do set optreset, and reset optind, it would * fail because getopt would run into the first set of options, which * it doesn't understand. * * All of this would "sort of" work if you could somehow figure out * whether optind had been incremented one option too far. The * mechanics of that, however, are more daunting than just giving * both invocations all of the expect options for either invocation. * * Needless to say, I wouldn't mind if someone invented a better * (non-GPL!) command line parsing interface than getopt. I * wouldn't mind if someone added more knobs to getopt to make it * work better. Who knows, I may talk myself into doing it someday, * if the standards weenies let me. As it is, it just leads to * hackery like this and causes people to avoid it in some cases. * * KDM, September 8th, 1998 */ if (subopt != NULL) sprintf(combinedopt, "%s%s", mainopt, subopt); else sprintf(combinedopt, "%s", mainopt); /* * For these options we do not parse optional device arguments and * we do not open a passthrough device. */ if ((cmdlist == CAM_CMD_RESCAN) || (cmdlist == CAM_CMD_RESET) || (cmdlist == CAM_CMD_DEVTREE) || (cmdlist == CAM_CMD_USAGE) || (cmdlist == CAM_CMD_DEBUG)) devopen = 0; #ifndef MINIMALISTIC if ((devopen == 1) && (argc > 2 && argv[2][0] != '-')) { char name[30]; int rv; if (isdigit(argv[2][0])) { /* device specified as bus:target[:lun] */ rv = parse_btl(argv[2], &bus, &target, &lun, &arglist); if (rv < 2) errx(1, "numeric device specification must " "be either bus:target, or " "bus:target:lun"); /* default to 0 if lun was not specified */ if ((arglist & CAM_ARG_LUN) == 0) { lun = 0; arglist |= CAM_ARG_LUN; } optstart++; } else { if (cam_get_device(argv[2], name, sizeof name, &unit) == -1) errx(1, "%s", cam_errbuf); device = strdup(name); arglist |= CAM_ARG_DEVICE | CAM_ARG_UNIT; optstart++; } } #endif /* MINIMALISTIC */ /* * Start getopt processing at argv[2/3], since we've already * accepted argv[1..2] as the command name, and as a possible * device name. */ optind = optstart; /* * Now we run through the argument list looking for generic * options, and ignoring options that possibly belong to * subfunctions. */ while ((c = getopt(argc, argv, combinedopt))!= -1){ switch(c) { case 'C': retry_count = strtol(optarg, NULL, 0); if (retry_count < 0) errx(1, "retry count %d is < 0", retry_count); arglist |= CAM_ARG_RETRIES; break; case 'E': arglist |= CAM_ARG_ERR_RECOVER; break; case 'n': arglist |= CAM_ARG_DEVICE; tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; device = (char *)strdup(tstr); break; case 't': timeout = strtol(optarg, NULL, 0); if (timeout < 0) errx(1, "invalid timeout %d", timeout); /* Convert the timeout from seconds to ms */ timeout *= 1000; arglist |= CAM_ARG_TIMEOUT; break; case 'u': arglist |= CAM_ARG_UNIT; unit = strtol(optarg, NULL, 0); break; case 'v': arglist |= CAM_ARG_VERBOSE; break; default: break; } } #ifndef MINIMALISTIC /* * For most commands we'll want to open the passthrough device * associated with the specified device. In the case of the rescan * commands, we don't use a passthrough device at all, just the * transport layer device. */ if (devopen == 1) { if (((arglist & (CAM_ARG_BUS|CAM_ARG_TARGET)) == 0) && (((arglist & CAM_ARG_DEVICE) == 0) || ((arglist & CAM_ARG_UNIT) == 0))) { errx(1, "subcommand \"%s\" requires a valid device " "identifier", argv[1]); } if ((cam_dev = ((arglist & (CAM_ARG_BUS | CAM_ARG_TARGET))? cam_open_btl(bus, target, lun, O_RDWR, NULL) : cam_open_spec_device(device,unit,O_RDWR,NULL))) == NULL) errx(1,"%s", cam_errbuf); } #endif /* MINIMALISTIC */ /* * Reset optind to 2, and reset getopt, so these routines can parse * the arguments again. */ optind = optstart; optreset = 1; switch(cmdlist) { #ifndef MINIMALISTIC case CAM_CMD_DEVLIST: error = getdevlist(cam_dev); break; case CAM_CMD_HPA: error = atahpa(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; #endif /* MINIMALISTIC */ case CAM_CMD_DEVTREE: error = getdevtree(argc, argv, combinedopt); break; #ifndef MINIMALISTIC case CAM_CMD_TUR: error = testunitready(cam_dev, retry_count, timeout, 0); break; case CAM_CMD_INQUIRY: error = scsidoinquiry(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_IDENTIFY: error = ataidentify(cam_dev, retry_count, timeout); break; case CAM_CMD_STARTSTOP: error = scsistart(cam_dev, arglist & CAM_ARG_START_UNIT, arglist & CAM_ARG_EJECT, retry_count, timeout); break; #endif /* MINIMALISTIC */ case CAM_CMD_RESCAN: error = dorescan_or_reset(argc, argv, 1); break; case CAM_CMD_RESET: error = dorescan_or_reset(argc, argv, 0); break; #ifndef MINIMALISTIC case CAM_CMD_READ_DEFECTS: error = readdefects(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_MODE_PAGE: modepage(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SCSI_CMD: error = scsicmd(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_CMD: error = smpcmd(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_RG: error = smpreportgeneral(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_PC: error = smpphycontrol(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_PHYLIST: error = smpphylist(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_MANINFO: error = smpmaninfo(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_DEBUG: error = camdebug(argc, argv, combinedopt); break; case CAM_CMD_TAG: error = tagcontrol(cam_dev, argc, argv, combinedopt); break; case CAM_CMD_RATE: error = ratecontrol(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; case CAM_CMD_FORMAT: error = scsiformat(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_REPORTLUNS: error = scsireportluns(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_READCAP: error = scsireadcapacity(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_IDLE: case CAM_CMD_STANDBY: case CAM_CMD_SLEEP: error = atapm(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_APM: case CAM_CMD_AAM: error = ataaxm(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SECURITY: error = atasecurity(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; case CAM_CMD_DOWNLOAD_FW: error = fwdownload(cam_dev, argc, argv, combinedopt, arglist & CAM_ARG_VERBOSE, retry_count, timeout); break; case CAM_CMD_SANITIZE: error = scsisanitize(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_PERSIST: error = scsipersist(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE, arglist & CAM_ARG_ERR_RECOVER); break; case CAM_CMD_ATTRIB: error = scsiattrib(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE, arglist & CAM_ARG_ERR_RECOVER); break; case CAM_CMD_OPCODES: error = scsiopcodes(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE); break; case CAM_CMD_REPROBE: error = scsireprobe(cam_dev); break; case CAM_CMD_ZONE: error = zone(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE); break; case CAM_CMD_EPC: error = epc(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE); break; #endif /* MINIMALISTIC */ case CAM_CMD_USAGE: usage(1); break; default: usage(0); error = 1; break; } if (cam_dev != NULL) cam_close_device(cam_dev); exit(error); } Index: head/sbin/camcontrol/fwdownload.c =================================================================== --- head/sbin/camcontrol/fwdownload.c (revision 300546) +++ head/sbin/camcontrol/fwdownload.c (revision 300547) @@ -1,1056 +1,1052 @@ /*- * Copyright (c) 2011 Sandvine Incorporated. All rights reserved. * Copyright (c) 2002-2011 Andre Albsmeier * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This software is derived from Andre Albsmeier's fwprog.c which contained * the following note: * * Many thanks goes to Marc Frajola from * TeraSolutions for the initial idea and his programme for upgrading * the firmware of I*M DDYS drives. */ /* * BEWARE: * * The fact that you see your favorite vendor listed below does not * imply that your equipment won't break when you use this software * with it. It only means that the firmware of at least one device type * of each vendor listed has been programmed successfully using this code. * * The -s option simulates a download but does nothing apart from that. * It can be used to check what chunk sizes would have been used with the * specified device. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "progress.h" #include "camcontrol.h" #define WB_TIMEOUT 50000 /* 50 seconds */ typedef enum { VENDOR_HGST, VENDOR_HITACHI, VENDOR_HP, VENDOR_IBM, VENDOR_PLEXTOR, VENDOR_QUALSTAR, VENDOR_QUANTUM, VENDOR_SAMSUNG, VENDOR_SEAGATE, VENDOR_SMART, VENDOR_ATA, VENDOR_UNKNOWN } fw_vendor_t; /* * FW_TUR_READY: The drive must return good status for a test unit ready. * * FW_TUR_NOT_READY: The drive must return not ready status for a test unit * ready. You may want this in a removable media drive. * * FW_TUR_NA: It doesn't matter whether the drive is ready or not. * This may be the case for a removable media drive. */ typedef enum { FW_TUR_NONE, FW_TUR_READY, FW_TUR_NOT_READY, FW_TUR_NA } fw_tur_status; /* * FW_TIMEOUT_DEFAULT: Attempt to probe for a WRITE BUFFER timeout * value from the drive. If we get an answer, * use the Recommended timeout. Otherwise, * use the default value from the table. * * FW_TIMEOUT_DEV_REPORTED: The timeout value was probed directly from * the device. * * FW_TIMEOUT_NO_PROBE: Do not ask the device for a WRITE BUFFER * timeout value. Use the device-specific * value. * * FW_TIMEOUT_USER_SPEC: The user specified a timeout on the command * line with the -t option. This overrides any * probe or default timeout. */ typedef enum { FW_TIMEOUT_DEFAULT, FW_TIMEOUT_DEV_REPORTED, FW_TIMEOUT_NO_PROBE, FW_TIMEOUT_USER_SPEC } fw_timeout_type; /* * type: Enumeration for the particular vendor. * * pattern: Pattern to match for the Vendor ID from the SCSI * Inquiry data. * * dev_type: SCSI device type to match, or T_ANY to match any * device from the given vendor. Note that if there * is a specific device type listed for a particular * vendor, it must be listed before a T_ANY entry. * * max_pkt_size: Maximum packet size when talking to a device. Note * that although large data sizes may be supported by * the target device, they may not be supported by the * OS or the controller. * * cdb_byte2: This specifies byte 2 (byte 1 when counting from 0) * of the CDB. This is generally the WRITE BUFFER mode. * * cdb_byte2_last: This specifies byte 2 for the last chunk of the * download. * * inc_cdb_buffer_id: Increment the buffer ID by 1 for each chunk sent * down to the drive. * * inc_cdb_offset: Increment the offset field in the CDB with the byte * offset into the firmware file. * * tur_status: Pay attention to whether the device is ready before * upgrading the firmware, or not. See above for the * values. */ struct fw_vendor { fw_vendor_t type; const char *pattern; int dev_type; int max_pkt_size; u_int8_t cdb_byte2; u_int8_t cdb_byte2_last; int inc_cdb_buffer_id; int inc_cdb_offset; fw_tur_status tur_status; int timeout_ms; fw_timeout_type timeout_type; }; /* * Vendor notes: * * HGST: The packets need to be sent in multiples of 4K. * * IBM: For LTO and TS drives, the buffer ID is ignored in mode 7 (and * some other modes). It treats the request as a firmware download. * The offset (and therefore the length of each chunk sent) needs * to be a multiple of the offset boundary specified for firmware * (buffer ID 4) in the read buffer command. At least for LTO-6, * that seems to be 0, but using a 32K chunk size should satisfy * most any alignment requirement. * * SmrtStor: Mode 5 is also supported, but since the firmware is 400KB or * so, we can't fit it in a single request in most cases. */ static struct fw_vendor vendors_list[] = { {VENDOR_HGST, "HGST", T_DIRECT, 0x1000, 0x07, 0x07, 1, 0, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_HITACHI, "HITACHI", T_ANY, 0x8000, 0x05, 0x05, 1, 0, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_HP, "HP", T_ANY, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_IBM, "IBM", T_SEQUENTIAL, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_NA, 300 * 1000, FW_TIMEOUT_DEFAULT}, {VENDOR_IBM, "IBM", T_ANY, 0x8000, 0x05, 0x05, 1, 0, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_PLEXTOR, "PLEXTOR", T_ANY, 0x2000, 0x04, 0x05, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_QUALSTAR, "QUALSTAR", T_ANY, 0x2030, 0x05, 0x05, 0, 0, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_QUANTUM, "QUANTUM", T_ANY, 0x2000, 0x04, 0x05, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_SAMSUNG, "SAMSUNG", T_ANY, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_SEAGATE, "SEAGATE", T_ANY, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_SMART, "SmrtStor", T_DIRECT, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, /* * We match any ATA device. This is really just a placeholder, * since we won't actually send a WRITE BUFFER with any of the * listed parameters. If a SATA device is behind a SAS controller, * the SCSI to ATA translation code (at least for LSI) doesn't * generally translate a SCSI WRITE BUFFER into an ATA DOWNLOAD * MICROCODE command. So, we use the SCSI ATA PASS_THROUGH command * to send the ATA DOWNLOAD MICROCODE command instead. */ {VENDOR_ATA, "ATA", T_ANY, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_NO_PROBE}, {VENDOR_UNKNOWN, NULL, T_ANY, 0x0000, 0x00, 0x00, 0, 0, FW_TUR_NONE, WB_TIMEOUT, FW_TIMEOUT_DEFAULT} }; struct fw_timeout_desc { fw_timeout_type timeout_type; const char *timeout_desc; }; static const struct fw_timeout_desc fw_timeout_desc_table[] = { { FW_TIMEOUT_DEFAULT, "the default" }, { FW_TIMEOUT_DEV_REPORTED, "recommended by this particular device" }, { FW_TIMEOUT_NO_PROBE, "the default" }, { FW_TIMEOUT_USER_SPEC, "what was specified on the command line" } }; #ifndef ATA_DOWNLOAD_MICROCODE #define ATA_DOWNLOAD_MICROCODE 0x92 #endif #define USE_OFFSETS_FEATURE 0x3 #ifndef LOW_SECTOR_SIZE #define LOW_SECTOR_SIZE 512 #endif #define ATA_MAKE_LBA(o, p) \ ((((((o) / LOW_SECTOR_SIZE) >> 8) & 0xff) << 16) | \ ((((o) / LOW_SECTOR_SIZE) & 0xff) << 8) | \ ((((p) / LOW_SECTOR_SIZE) >> 8) & 0xff)) #define ATA_MAKE_SECTORS(p) (((p) / 512) & 0xff) #ifndef UNKNOWN_MAX_PKT_SIZE #define UNKNOWN_MAX_PKT_SIZE 0x8000 #endif static struct fw_vendor *fw_get_vendor(struct cam_device *cam_dev, struct ata_params *ident_buf); static int fw_get_timeout(struct cam_device *cam_dev, struct fw_vendor *vp, int retry_count, int timeout); static int fw_validate_ibm(struct cam_device *dev, int retry_count, int timeout, int fd, char *buf, const char *fw_img_path, int quiet); static char *fw_read_img(struct cam_device *dev, int retry_count, int timeout, int quiet, const char *fw_img_path, struct fw_vendor *vp, int *num_bytes); static int fw_check_device_ready(struct cam_device *dev, camcontrol_devtype devtype, struct fw_vendor *vp, int printerrors, int timeout); static int fw_download_img(struct cam_device *cam_dev, struct fw_vendor *vp, char *buf, int img_size, int sim_mode, int printerrors, int quiet, int retry_count, int timeout, const char */*name*/, camcontrol_devtype devtype); /* * Find entry in vendors list that belongs to * the vendor of given cam device. */ static struct fw_vendor * fw_get_vendor(struct cam_device *cam_dev, struct ata_params *ident_buf) { char vendor[42]; struct fw_vendor *vp; if (cam_dev == NULL) return (NULL); if (ident_buf != NULL) { cam_strvis((u_char *)vendor, ident_buf->model, sizeof(ident_buf->model), sizeof(vendor)); for (vp = vendors_list; vp->pattern != NULL; vp++) { if (vp->type == VENDOR_ATA) return (vp); } } else { cam_strvis((u_char *)vendor, (u_char *)cam_dev->inq_data.vendor, sizeof(cam_dev->inq_data.vendor), sizeof(vendor)); } for (vp = vendors_list; vp->pattern != NULL; vp++) { if (!cam_strmatch((const u_char *)vendor, (const u_char *)vp->pattern, strlen(vendor))) { if ((vp->dev_type == T_ANY) || (vp->dev_type == SID_TYPE(&cam_dev->inq_data))) break; } } return (vp); } static int fw_get_timeout(struct cam_device *cam_dev, struct fw_vendor *vp, int retry_count, int timeout) { struct scsi_report_supported_opcodes_one *one; struct scsi_report_supported_opcodes_timeout *td; uint8_t *buf = NULL; uint32_t fill_len = 0, cdb_len = 0, rec_timeout = 0; int retval = 0; /* * If the user has specified a timeout on the command line, we let * him override any default or probed value. */ if (timeout != 0) { vp->timeout_type = FW_TIMEOUT_USER_SPEC; vp->timeout_ms = timeout; goto bailout; } /* * Check to see whether we should probe for a timeout for this * device. */ if (vp->timeout_type == FW_TIMEOUT_NO_PROBE) goto bailout; retval = scsigetopcodes(/*device*/ cam_dev, /*opcode_set*/ 1, /*opcode*/ WRITE_BUFFER, /*show_sa_errors*/ 1, /*sa_set*/ 0, /*service_action*/ 0, /*timeout_desc*/ 1, /*retry_count*/ retry_count, /*timeout*/ 10000, /*verbose*/ 0, /*fill_len*/ &fill_len, /*data_ptr*/ &buf); /* * It isn't an error if we can't get a timeout descriptor. We just * continue on with the default timeout. */ if (retval != 0) { retval = 0; goto bailout; } /* * Even if the drive didn't return a SCSI error, if we don't have * enough data to contain the one opcode descriptor, the CDB * structure and a timeout descriptor, we don't have the timeout * value we're looking for. So we'll just fall back to the * default value. */ if (fill_len < (sizeof(*one) + sizeof(struct scsi_write_buffer) + sizeof(*td))) goto bailout; one = (struct scsi_report_supported_opcodes_one *)buf; /* * If the drive claims to not support the WRITE BUFFER command... * fall back to the default timeout value and let things fail on * the actual firmware download. */ if ((one->support & RSO_ONE_SUP_MASK) == RSO_ONE_SUP_NOT_SUP) goto bailout; cdb_len = scsi_2btoul(one->cdb_length); td = (struct scsi_report_supported_opcodes_timeout *) &buf[sizeof(*one) + cdb_len]; rec_timeout = scsi_4btoul(td->recommended_time); /* * If the recommended timeout is 0, then the device has probably * returned a bogus value. */ if (rec_timeout == 0) goto bailout; /* CAM timeouts are in ms */ rec_timeout *= 1000; vp->timeout_ms = rec_timeout; vp->timeout_type = FW_TIMEOUT_DEV_REPORTED; bailout: return (retval); } #define SVPD_IBM_FW_DESIGNATION 0x03 /* * IBM LTO and TS tape drives have an INQUIRY VPD page 0x3 with the following * format: */ struct fw_ibm_tape_fw_designation { uint8_t device; uint8_t page_code; uint8_t reserved; uint8_t length; uint8_t ascii_length; uint8_t reserved2[3]; uint8_t load_id[4]; uint8_t fw_rev[4]; uint8_t ptf_number[4]; uint8_t patch_number[4]; uint8_t ru_name[8]; uint8_t lib_seq_num[5]; }; /* * The firmware for IBM tape drives has the following header format. The * load_id and ru_name in the header file should match what is returned in * VPD page 0x3. */ struct fw_ibm_tape_fw_header { uint8_t unspec[4]; uint8_t length[4]; /* Firmware and header! */ uint8_t load_id[4]; uint8_t fw_rev[4]; uint8_t reserved[8]; uint8_t ru_name[8]; }; static int fw_validate_ibm(struct cam_device *dev, int retry_count, int timeout, int fd, char *buf, const char *fw_img_path, int quiet) { union ccb *ccb; struct fw_ibm_tape_fw_designation vpd_page; struct fw_ibm_tape_fw_header *header; char drive_rev[sizeof(vpd_page.fw_rev) + 1]; char file_rev[sizeof(vpd_page.fw_rev) + 1]; int retval = 1; ccb = cam_getccb(dev); if (ccb == NULL) { warnx("couldn't allocate CCB"); goto bailout; } /* cam_getccb cleans up the header, caller has to zero the payload */ - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); bzero(&vpd_page, sizeof(vpd_page)); scsi_inquiry(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)&vpd_page, /* inq_len */ sizeof(vpd_page), /* evpd */ 1, /* page_code */ SVPD_IBM_FW_DESIGNATION, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(dev, ccb) < 0) { warn("error getting firmware designation page"); cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); cam_freeccb(ccb); ccb = NULL; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } /* * Read the firmware header only. */ if (read(fd, buf, sizeof(*header)) != sizeof(*header)) { warn("unable to read %zu bytes from %s", sizeof(*header), fw_img_path); goto bailout; } /* Rewind the file back to 0 for the full file read. */ if (lseek(fd, 0, SEEK_SET) == -1) { warn("Unable to lseek"); goto bailout; } header = (struct fw_ibm_tape_fw_header *)buf; bzero(drive_rev, sizeof(drive_rev)); bcopy(vpd_page.fw_rev, drive_rev, sizeof(vpd_page.fw_rev)); bzero(file_rev, sizeof(file_rev)); bcopy(header->fw_rev, file_rev, sizeof(header->fw_rev)); if (quiet == 0) { fprintf(stdout, "Current Drive Firmware version: %s\n", drive_rev); fprintf(stdout, "Firmware File version: %s\n", file_rev); } /* * For IBM tape drives the load ID and RU name reported by the * drive should match what is in the firmware file. */ if (bcmp(vpd_page.load_id, header->load_id, MIN(sizeof(vpd_page.load_id), sizeof(header->load_id))) != 0) { warnx("Drive Firmware load ID 0x%x does not match firmware " "file load ID 0x%x", scsi_4btoul(vpd_page.load_id), scsi_4btoul(header->load_id)); goto bailout; } if (bcmp(vpd_page.ru_name, header->ru_name, MIN(sizeof(vpd_page.ru_name), sizeof(header->ru_name))) != 0) { warnx("Drive Firmware RU name 0x%jx does not match firmware " "file RU name 0x%jx", (uintmax_t)scsi_8btou64(vpd_page.ru_name), (uintmax_t)scsi_8btou64(header->ru_name)); goto bailout; } if (quiet == 0) fprintf(stdout, "Firmware file is valid for this drive.\n"); retval = 0; bailout: if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * Allocate a buffer and read fw image file into it * from given path. Number of bytes read is stored * in num_bytes. */ static char * fw_read_img(struct cam_device *dev, int retry_count, int timeout, int quiet, const char *fw_img_path, struct fw_vendor *vp, int *num_bytes) { int fd; struct stat stbuf; char *buf; off_t img_size; int skip_bytes = 0; if ((fd = open(fw_img_path, O_RDONLY)) < 0) { warn("Could not open image file %s", fw_img_path); return (NULL); } if (fstat(fd, &stbuf) < 0) { warn("Could not stat image file %s", fw_img_path); goto bailout1; } if ((img_size = stbuf.st_size) == 0) { warnx("Zero length image file %s", fw_img_path); goto bailout1; } if ((buf = malloc(img_size)) == NULL) { warnx("Could not allocate buffer to read image file %s", fw_img_path); goto bailout1; } /* Skip headers if applicable. */ switch (vp->type) { case VENDOR_SEAGATE: if (read(fd, buf, 16) != 16) { warn("Could not read image file %s", fw_img_path); goto bailout; } if (lseek(fd, 0, SEEK_SET) == -1) { warn("Unable to lseek"); goto bailout; } if ((strncmp(buf, "SEAGATE,SEAGATE ", 16) == 0) || (img_size % 512 == 80)) skip_bytes = 80; break; case VENDOR_QUALSTAR: skip_bytes = img_size % 1030; break; case VENDOR_IBM: { if (vp->dev_type != T_SEQUENTIAL) break; if (fw_validate_ibm(dev, retry_count, timeout, fd, buf, fw_img_path, quiet) != 0) goto bailout; break; } default: break; } if (skip_bytes != 0) { fprintf(stdout, "Skipping %d byte header.\n", skip_bytes); if (lseek(fd, skip_bytes, SEEK_SET) == -1) { warn("Could not lseek"); goto bailout; } img_size -= skip_bytes; } /* Read image into a buffer. */ if (read(fd, buf, img_size) != img_size) { warn("Could not read image file %s", fw_img_path); goto bailout; } *num_bytes = img_size; close(fd); return (buf); bailout: free(buf); bailout1: close(fd); *num_bytes = 0; return (NULL); } /* * Returns 0 for "success", where success means that the device has met the * requirement in the vendor structure for being ready or not ready when * firmware is downloaded. * * Returns 1 for a failure to be ready to accept a firmware download. * (e.g., a drive needs to be ready, but returns not ready) * * Returns -1 for any other failure. */ static int fw_check_device_ready(struct cam_device *dev, camcontrol_devtype devtype, struct fw_vendor *vp, int printerrors, int timeout) { union ccb *ccb; int retval = 0; int16_t *ptr = NULL; size_t dxfer_len = 0; if ((ccb = cam_getccb(dev)) == NULL) { warnx("Could not allocate CCB"); retval = -1; goto bailout; } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(ccb); if (devtype != CC_DT_SCSI) { dxfer_len = sizeof(struct ata_params); ptr = (uint16_t *)malloc(dxfer_len); if (ptr == NULL) { warnx("can't malloc memory for identify"); retval = -1; goto bailout; } bzero(ptr, dxfer_len); } switch (devtype) { case CC_DT_SCSI: scsi_test_unit_ready(&ccb->csio, /*retries*/ 0, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 5000); break; case CC_DT_ATA_BEHIND_SCSI: case CC_DT_ATA: { retval = build_ata_cmd(ccb, /*retries*/ 1, /*flags*/ CAM_DIR_IN, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*protocol*/ AP_PROTO_PIO_IN, /*ata_flags*/ AP_FLAG_BYT_BLOK_BYTES | AP_FLAG_TLEN_SECT_CNT | AP_FLAG_TDIR_FROM_DEV, /*features*/ 0, /*sector_count*/ (uint8_t) dxfer_len, /*lba*/ 0, /*command*/ ATA_ATA_IDENTIFY, /*auxiliary*/ 0, /*data_ptr*/ (uint8_t *)ptr, /*dxfer_len*/ dxfer_len, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 30 * 1000, /*is48bit*/ 0, /*devtype*/ devtype); if (retval != 0) { retval = -1; warnx("%s: build_ata_cmd() failed, likely " "programmer error", __func__); goto bailout; } break; } default: warnx("Unknown disk type %d", devtype); retval = -1; goto bailout; break; /*NOTREACHED*/ } ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; retval = cam_send_ccb(dev, ccb); if (retval != 0) { warn("error sending %s CCB", (devtype == CC_DT_SCSI) ? "Test Unit Ready" : "Identify"); retval = -1; goto bailout; } if (((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) && (vp->tur_status == FW_TUR_READY)) { warnx("Device is not ready"); if (printerrors) cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } else if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (vp->tur_status == FW_TUR_NOT_READY)) { warnx("Device cannot have media loaded when firmware is " "downloaded"); retval = 1; goto bailout; } bailout: if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * Download firmware stored in buf to cam_dev. If simulation mode * is enabled, only show what packet sizes would be sent to the * device but do not sent any actual packets */ static int fw_download_img(struct cam_device *cam_dev, struct fw_vendor *vp, char *buf, int img_size, int sim_mode, int printerrors, int quiet, int retry_count, int timeout, const char *imgname, camcontrol_devtype devtype) { struct scsi_write_buffer cdb; progress_t progress; int size = 0; union ccb *ccb = NULL; int pkt_count = 0; int max_pkt_size; u_int32_t pkt_size = 0; char *pkt_ptr = buf; u_int32_t offset; int last_pkt = 0; int retval = 0; /* * Check to see whether the device is ready to accept a firmware * download. */ retval = fw_check_device_ready(cam_dev, devtype, vp, printerrors, timeout); if (retval != 0) goto bailout; if ((ccb = cam_getccb(cam_dev)) == NULL) { warnx("Could not allocate CCB"); retval = 1; goto bailout; } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(ccb); max_pkt_size = vp->max_pkt_size; if (max_pkt_size == 0) max_pkt_size = UNKNOWN_MAX_PKT_SIZE; pkt_size = max_pkt_size; progress_init(&progress, imgname, size = img_size); /* Download single fw packets. */ do { if (img_size <= max_pkt_size) { last_pkt = 1; pkt_size = img_size; } progress_update(&progress, size - img_size); if (((sim_mode == 0) && (quiet == 0)) || ((sim_mode != 0) && (printerrors == 0))) progress_draw(&progress); bzero(&cdb, sizeof(cdb)); switch (devtype) { case CC_DT_SCSI: cdb.opcode = WRITE_BUFFER; cdb.control = 0; /* Parameter list length. */ scsi_ulto3b(pkt_size, &cdb.length[0]); offset = vp->inc_cdb_offset ? (pkt_ptr - buf) : 0; scsi_ulto3b(offset, &cdb.offset[0]); cdb.byte2 = last_pkt ? vp->cdb_byte2_last : vp->cdb_byte2; cdb.buffer_id = vp->inc_cdb_buffer_id ? pkt_count : 0; /* Zero out payload of ccb union after ccb header. */ - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); /* * Copy previously constructed cdb into ccb_scsiio * struct. */ bcopy(&cdb, &ccb->csio.cdb_io.cdb_bytes[0], sizeof(struct scsi_write_buffer)); /* Fill rest of ccb_scsiio struct. */ cam_fill_csio(&ccb->csio, /* ccb_scsiio*/ retry_count, /* retries*/ NULL, /* cbfcnp*/ CAM_DIR_OUT | CAM_DEV_QFRZDIS, /* flags*/ CAM_TAG_ACTION_NONE, /* tag_action*/ (u_char *)pkt_ptr, /* data_ptr*/ pkt_size, /* dxfer_len*/ SSD_FULL_SIZE, /* sense_len*/ sizeof(struct scsi_write_buffer), /* cdb_len*/ timeout ? timeout : WB_TIMEOUT); /* timeout*/ break; case CC_DT_ATA: case CC_DT_ATA_BEHIND_SCSI: { uint32_t off; off = (uint32_t)(pkt_ptr - buf); retval = build_ata_cmd(ccb, /*retry_count*/ retry_count, /*flags*/ CAM_DIR_OUT | CAM_DEV_QFRZDIS, /*tag_action*/ CAM_TAG_ACTION_NONE, /*protocol*/ AP_PROTO_PIO_OUT, /*ata_flags*/ AP_FLAG_BYT_BLOK_BYTES | AP_FLAG_TLEN_SECT_CNT | AP_FLAG_TDIR_TO_DEV, /*features*/ USE_OFFSETS_FEATURE, /*sector_count*/ ATA_MAKE_SECTORS(pkt_size), /*lba*/ ATA_MAKE_LBA(off, pkt_size), /*command*/ ATA_DOWNLOAD_MICROCODE, /*auxiliary*/ 0, /*data_ptr*/ (uint8_t *)pkt_ptr, /*dxfer_len*/ pkt_size, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : WB_TIMEOUT, /*is48bit*/ 0, /*devtype*/ devtype); if (retval != 0) { warnx("%s: build_ata_cmd() failed, likely " "programmer error", __func__); goto bailout; } break; } default: warnx("Unknown device type %d", devtype); retval = 1; goto bailout; break; /*NOTREACHED*/ } if (!sim_mode) { /* Execute the command. */ if (cam_send_ccb(cam_dev, ccb) < 0 || (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("Error writing image to device"); if (printerrors) cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } } else if (printerrors) { cam_error_print(cam_dev, ccb, CAM_ESF_COMMAND, 0, stdout); } /* Prepare next round. */ pkt_count++; pkt_ptr += pkt_size; img_size -= pkt_size; } while(!last_pkt); bailout: if (quiet == 0) progress_complete(&progress, size - img_size); if (ccb != NULL) cam_freeccb(ccb); return (retval); } int fwdownload(struct cam_device *device, int argc, char **argv, char *combinedopt, int printerrors, int retry_count, int timeout) { struct fw_vendor *vp; char *fw_img_path = NULL; struct ata_params *ident_buf = NULL; camcontrol_devtype devtype; char *buf = NULL; int img_size; int c; int sim_mode = 0; int confirmed = 0; int quiet = 0; int retval = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'f': fw_img_path = optarg; break; case 'q': quiet = 1; break; case 's': sim_mode = 1; break; case 'y': confirmed = 1; break; default: break; } } if (fw_img_path == NULL) errx(1, "you must specify a firmware image file using -f " "option"); retval = get_device_type(device, retry_count, timeout, printerrors, &devtype); if (retval != 0) errx(1, "Unable to determine device type"); if ((devtype == CC_DT_ATA) || (devtype == CC_DT_ATA_BEHIND_SCSI)) { union ccb *ccb; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); retval = 1; goto bailout; } if (ata_do_identify(device, retry_count, timeout, ccb, &ident_buf) != 0) { cam_freeccb(ccb); retval = 1; goto bailout; } } else if (devtype != CC_DT_SCSI) errx(1, "Unsupported device type %d", devtype); vp = fw_get_vendor(device, ident_buf); /* * Bail out if we have an unknown vendor and this isn't an ATA * disk. For a SCSI disk, we have no chance of working properly * with the default values in the VENDOR_UNKNOWN case. For an ATA * disk connected via an ATA transport, we may work for drives that * support the ATA_DOWNLOAD_MICROCODE command. */ if (((vp == NULL) || (vp->type == VENDOR_UNKNOWN)) && (devtype == CC_DT_SCSI)) errx(1, "Unsupported device"); retval = fw_get_timeout(device, vp, retry_count, timeout); if (retval != 0) { warnx("Unable to get a firmware download timeout value"); goto bailout; } buf = fw_read_img(device, retry_count, timeout, quiet, fw_img_path, vp, &img_size); if (buf == NULL) { retval = 1; goto bailout; } if (!confirmed) { fprintf(stdout, "You are about to download firmware image (%s)" " into the following device:\n", fw_img_path); if (devtype == CC_DT_SCSI) { if (scsidoinquiry(device, argc, argv, combinedopt, 0, 5000) != 0) { warnx("Error sending inquiry"); retval = 1; goto bailout; } } else { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); free(ident_buf); } fprintf(stdout, "Using a timeout of %u ms, which is %s.\n", vp->timeout_ms, fw_timeout_desc_table[vp->timeout_type].timeout_desc); fprintf(stdout, "\nIt may damage your drive. "); if (!get_confirmation()) { retval = 1; goto bailout; } } if ((sim_mode != 0) && (quiet == 0)) fprintf(stdout, "Running in simulation mode\n"); if (fw_download_img(device, vp, buf, img_size, sim_mode, printerrors, quiet, retry_count, vp->timeout_ms, fw_img_path, devtype) != 0) { fprintf(stderr, "Firmware download failed\n"); retval = 1; goto bailout; } else if (quiet == 0) fprintf(stdout, "Firmware download successful\n"); bailout: free(buf); return (retval); } Index: head/sbin/camcontrol/persist.c =================================================================== --- head/sbin/camcontrol/persist.c (revision 300546) +++ head/sbin/camcontrol/persist.c (revision 300547) @@ -1,964 +1,963 @@ /*- * Copyright (c) 2013 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * Authors: Ken Merry (Spectra Logic Corporation) */ /* * SCSI Persistent Reservation support for camcontrol(8). */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "camcontrol.h" struct persist_transport_id { struct scsi_transportid_header *hdr; unsigned int alloc_len; STAILQ_ENTRY(persist_transport_id) links; }; /* * Service Actions for PERSISTENT RESERVE IN. */ static struct scsi_nv persist_in_actions[] = { { "read_keys", SPRI_RK }, { "read_reservation", SPRI_RR }, { "report_capabilities", SPRI_RC }, { "read_full_status", SPRI_RS } }; /* * Service Actions for PERSISTENT RESERVE OUT. */ static struct scsi_nv persist_out_actions[] = { { "register", SPRO_REGISTER }, { "reserve", SPRO_RESERVE }, { "release" , SPRO_RELEASE }, { "clear", SPRO_CLEAR }, { "preempt", SPRO_PREEMPT }, { "preempt_abort", SPRO_PRE_ABO }, { "register_ignore", SPRO_REG_IGNO }, { "register_move", SPRO_REG_MOVE }, { "replace_lost", SPRO_REPL_LOST_RES } }; /* * Known reservation scopes. As of SPC-4, only LU_SCOPE is used in the * spec. The others are obsolete. */ static struct scsi_nv persist_scope_table[] = { { "lun", SPR_LU_SCOPE }, { "extent", SPR_EXTENT_SCOPE }, { "element", SPR_ELEMENT_SCOPE } }; /* * Reservation types. The longer name for a given reservation type is * listed first, so that it makes more sense when we print out the * reservation type. We step through the table linearly when looking for * the text name for a particular numeric reservation type value. */ static struct scsi_nv persist_type_table[] = { { "read_shared", SPR_TYPE_RD_SHARED }, { "write_exclusive", SPR_TYPE_WR_EX }, { "wr_ex", SPR_TYPE_WR_EX }, { "read_exclusive", SPR_TYPE_RD_EX }, { "rd_ex", SPR_TYPE_RD_EX }, { "exclusive_access", SPR_TYPE_EX_AC }, { "ex_ac", SPR_TYPE_EX_AC }, { "write_exclusive_reg_only", SPR_TYPE_WR_EX_RO }, { "wr_ex_ro", SPR_TYPE_WR_EX_RO }, { "exclusive_access_reg_only", SPR_TYPE_EX_AC_RO }, { "ex_ac_ro", SPR_TYPE_EX_AC_RO }, { "write_exclusive_all_regs", SPR_TYPE_WR_EX_AR }, { "wr_ex_ar", SPR_TYPE_WR_EX_AR }, { "exclusive_access_all_regs", SPR_TYPE_EX_AC_AR }, { "ex_ac_ar", SPR_TYPE_EX_AC_AR } }; /* * Print out the standard scope/type field. */ static void persist_print_scopetype(uint8_t scopetype) { const char *tmpstr; int num_entries; num_entries = sizeof(persist_scope_table) / sizeof(persist_scope_table[0]); tmpstr = scsi_nv_to_str(persist_scope_table, num_entries, scopetype & SPR_SCOPE_MASK); fprintf(stdout, "Scope: %s (%#x)\n", (tmpstr != NULL) ? tmpstr : "Unknown", (scopetype & SPR_SCOPE_MASK) >> SPR_SCOPE_SHIFT); num_entries = sizeof(persist_type_table) / sizeof(persist_type_table[0]); tmpstr = scsi_nv_to_str(persist_type_table, num_entries, scopetype & SPR_TYPE_MASK); fprintf(stdout, "Type: %s (%#x)\n", (tmpstr != NULL) ? tmpstr : "Unknown", scopetype & SPR_TYPE_MASK); } static void persist_print_transportid(uint8_t *buf, uint32_t len) { struct sbuf *sb; sb = sbuf_new_auto(); if (sb == NULL) fprintf(stderr, "Unable to allocate sbuf\n"); scsi_transportid_sbuf(sb, (struct scsi_transportid_header *)buf, len); sbuf_finish(sb); fprintf(stdout, "%s\n", sbuf_data(sb)); sbuf_delete(sb); } /* * Print out a persistent reservation. This is used with the READ * RESERVATION (0x01) service action of the PERSISTENT RESERVE IN command. */ static void persist_print_res(struct scsi_per_res_in_header *hdr, uint32_t valid_len) { uint32_t length; struct scsi_per_res_in_rsrv *res; length = scsi_4btoul(hdr->length); length = MIN(length, valid_len); res = (struct scsi_per_res_in_rsrv *)hdr; if (length < sizeof(res->data) - sizeof(res->data.extent_length)) { if (length == 0) fprintf(stdout, "No reservations.\n"); else warnx("unable to print reservation, only got %u " "valid bytes", length); return; } fprintf(stdout, "PRgeneration: %#x\n", scsi_4btoul(res->header.generation)); fprintf(stdout, "Reservation Key: %#jx\n", (uintmax_t)scsi_8btou64(res->data.reservation)); fprintf(stdout, "Scope address: %#x\n", scsi_4btoul(res->data.scope_addr)); persist_print_scopetype(res->data.scopetype); fprintf(stdout, "Extent length: %u\n", scsi_2btoul(res->data.extent_length)); } /* * Print out persistent reservation keys. This is used with the READ KEYS * service action of the PERSISTENT RESERVE IN command. */ static void persist_print_keys(struct scsi_per_res_in_header *hdr, uint32_t valid_len) { uint32_t length, num_keys, i; struct scsi_per_res_key *key; length = scsi_4btoul(hdr->length); length = MIN(length, valid_len); num_keys = length / sizeof(*key); fprintf(stdout, "PRgeneration: %#x\n", scsi_4btoul(hdr->generation)); fprintf(stdout, "%u key%s%s\n", num_keys, (num_keys == 1) ? "" : "s", (num_keys == 0) ? "." : ":"); for (i = 0, key = (struct scsi_per_res_key *)&hdr[1]; i < num_keys; i++, key++) { fprintf(stdout, "%u: %#jx\n", i, (uintmax_t)scsi_8btou64(key->key)); } } /* * Print out persistent reservation capabilities. This is used with the * REPORT CAPABILITIES service action of the PERSISTENT RESERVE IN command. */ static void persist_print_cap(struct scsi_per_res_cap *cap, uint32_t valid_len) { uint32_t length; int check_type_mask = 0; length = scsi_2btoul(cap->length); length = MIN(length, valid_len); if (length < __offsetof(struct scsi_per_res_cap, type_mask)) { fprintf(stdout, "Insufficient data (%u bytes) to report " "full capabilities\n", length); return; } if (length >= __offsetof(struct scsi_per_res_cap, reserved)) check_type_mask = 1; fprintf(stdout, "Replace Lost Reservation Capable (RLR_C): %d\n", (cap->flags1 & SPRI_RLR_C) ? 1 : 0); fprintf(stdout, "Compatible Reservation Handling (CRH): %d\n", (cap->flags1 & SPRI_CRH) ? 1 : 0); fprintf(stdout, "Specify Initiator Ports Capable (SIP_C): %d\n", (cap->flags1 & SPRI_SIP_C) ? 1 : 0); fprintf(stdout, "All Target Ports Capable (ATP_C): %d\n", (cap->flags1 & SPRI_ATP_C) ? 1 : 0); fprintf(stdout, "Persist Through Power Loss Capable (PTPL_C): %d\n", (cap->flags1 & SPRI_PTPL_C) ? 1 : 0); fprintf(stdout, "ALLOW COMMANDS field: (%#x)\n", (cap->flags2 & SPRI_ALLOW_CMD_MASK) >> SPRI_ALLOW_CMD_SHIFT); /* * These cases are cut-and-pasted from SPC4r36l. There is no * succinct way to describe these otherwise, and even with the * verbose description, the user will probably have to refer to * the spec to fully understand what is going on. */ switch (cap->flags2 & SPRI_ALLOW_CMD_MASK) { case SPRI_ALLOW_1: fprintf(stdout, " The device server allows the TEST UNIT READY command through Write\n" " Exclusive type reservations and Exclusive Access type reservations\n" " and does not provide information about whether the following commands\n" " are allowed through Write Exclusive type reservations:\n" " a) the MODE SENSE command, READ ATTRIBUTE command, READ BUFFER\n" " command, RECEIVE COPY RESULTS command, RECEIVE DIAGNOSTIC\n" " RESULTS command, REPORT SUPPORTED OPERATION CODES command,\n" " and REPORT SUPPORTED TASK MANAGEMENT FUNCTION command; and\n" " b) the READ DEFECT DATA command (see SBC-3).\n"); break; case SPRI_ALLOW_2: fprintf(stdout, " The device server allows the TEST UNIT READY command through Write\n" " Exclusive type reservations and Exclusive Access type reservations\n" " and does not allow the following commands through Write Exclusive type\n" " reservations:\n" " a) the MODE SENSE command, READ ATTRIBUTE command, READ BUFFER\n" " command, RECEIVE DIAGNOSTIC RESULTS command, REPORT SUPPORTED\n" " OPERATION CODES command, and REPORT SUPPORTED TASK MANAGEMENT\n" " FUNCTION command; and\n" " b) the READ DEFECT DATA command.\n" " The device server does not allow the RECEIVE COPY RESULTS command\n" " through Write Exclusive type reservations or Exclusive Access type\n" " reservations.\n"); break; case SPRI_ALLOW_3: fprintf(stdout, " The device server allows the TEST UNIT READY command through Write\n" " Exclusive type reservations and Exclusive Access type reservations\n" " and allows the following commands through Write Exclusive type\n" " reservations:\n" " a) the MODE SENSE command, READ ATTRIBUTE command, READ BUFFER\n" " command, RECEIVE DIAGNOSTIC RESULTS command, REPORT SUPPORTED\n" " OPERATION CODES command, and REPORT SUPPORTED TASK MANAGEMENT\n" " FUNCTION command; and\n" " b) the READ DEFECT DATA command.\n" " The device server does not allow the RECEIVE COPY RESULTS command\n" " through Write Exclusive type reservations or Exclusive Access type\n" " reservations.\n"); break; case SPRI_ALLOW_4: fprintf(stdout, " The device server allows the TEST UNIT READY command and the RECEIVE\n" " COPY RESULTS command through Write Exclusive type reservations and\n" " Exclusive Access type reservations and allows the following commands\n" " through Write Exclusive type reservations:\n" " a) the MODE SENSE command, READ ATTRIBUTE command, READ BUFFER\n" " command, RECEIVE DIAGNOSTIC RESULTS command, REPORT SUPPORTED\n" " OPERATION CODES command, and REPORT SUPPORTED TASK MANAGEMENT\n" " FUNCTION command; and\n" " b) the READ DEFECT DATA command.\n"); break; case SPRI_ALLOW_NA: fprintf(stdout, " No information is provided about whether certain commands are allowed\n" " through certain types of persistent reservations.\n"); break; default: fprintf(stdout, " Unknown ALLOW COMMANDS value %#x\n", (cap->flags2 & SPRI_ALLOW_CMD_MASK) >> SPRI_ALLOW_CMD_SHIFT); break; } fprintf(stdout, "Persist Through Power Loss Activated (PTPL_A): %d\n", (cap->flags2 & SPRI_PTPL_A) ? 1 : 0); if ((check_type_mask != 0) && (cap->flags2 & SPRI_TMV)) { fprintf(stdout, "Supported Persistent Reservation Types:\n"); fprintf(stdout, " Write Exclusive - All Registrants " "(WR_EX_AR): %d\n", (cap->type_mask[0] & SPRI_TM_WR_EX_AR)? 1 : 0); fprintf(stdout, " Exclusive Access - Registrants Only " "(EX_AC_RO): %d\n", (cap->type_mask[0] & SPRI_TM_EX_AC_RO) ? 1 : 0); fprintf(stdout, " Write Exclusive - Registrants Only " "(WR_EX_RO): %d\n", (cap->type_mask[0] & SPRI_TM_WR_EX_RO)? 1 : 0); fprintf(stdout, " Exclusive Access (EX_AC): %d\n", (cap->type_mask[0] & SPRI_TM_EX_AC) ? 1 : 0); fprintf(stdout, " Write Exclusive (WR_EX): %d\n", (cap->type_mask[0] & SPRI_TM_WR_EX) ? 1 : 0); fprintf(stdout, " Exclusive Access - All Registrants " "(EX_AC_AR): %d\n", (cap->type_mask[1] & SPRI_TM_EX_AC_AR) ? 1 : 0); } else { fprintf(stdout, "Persistent Reservation Type Mask is NOT " "valid\n"); } } static void persist_print_full(struct scsi_per_res_in_header *hdr, uint32_t valid_len) { uint32_t length, len_to_go = 0; struct scsi_per_res_in_full_desc *desc; uint8_t *cur_pos; int i; length = scsi_4btoul(hdr->length); length = MIN(length, valid_len); if (length < sizeof(*desc)) { if (length == 0) fprintf(stdout, "No reservations.\n"); else warnx("unable to print reservation, only got %u " "valid bytes", length); return; } fprintf(stdout, "PRgeneration: %#x\n", scsi_4btoul(hdr->generation)); cur_pos = (uint8_t *)&hdr[1]; for (len_to_go = length, i = 0, desc = (struct scsi_per_res_in_full_desc *)cur_pos; len_to_go >= sizeof(*desc); desc = (struct scsi_per_res_in_full_desc *)cur_pos, i++) { uint32_t additional_length, cur_length; fprintf(stdout, "Reservation Key: %#jx\n", (uintmax_t)scsi_8btou64(desc->res_key.key)); fprintf(stdout, "All Target Ports (ALL_TG_PT): %d\n", (desc->flags & SPRI_FULL_ALL_TG_PT) ? 1 : 0); fprintf(stdout, "Reservation Holder (R_HOLDER): %d\n", (desc->flags & SPRI_FULL_R_HOLDER) ? 1 : 0); if (desc->flags & SPRI_FULL_R_HOLDER) persist_print_scopetype(desc->scopetype); if ((desc->flags & SPRI_FULL_ALL_TG_PT) == 0) fprintf(stdout, "Relative Target Port ID: %#x\n", scsi_2btoul(desc->rel_trgt_port_id)); additional_length = scsi_4btoul(desc->additional_length); persist_print_transportid(desc->transport_id, additional_length); cur_length = sizeof(*desc) + additional_length; len_to_go -= cur_length; cur_pos += cur_length; } } int scsipersist(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout, int verbosemode, int err_recover) { union ccb *ccb = NULL; int c, in = 0, out = 0; int action = -1, num_ids = 0; int error = 0; uint32_t res_len = 0; unsigned long rel_tgt_port = 0; uint8_t *res_buf = NULL; int scope = SPR_LU_SCOPE, res_type = 0; struct persist_transport_id *id, *id2; STAILQ_HEAD(, persist_transport_id) transport_id_list; uint64_t key = 0, sa_key = 0; struct scsi_nv *table = NULL; size_t table_size = 0, id_len = 0; uint32_t valid_len = 0; int all_tg_pt = 0, aptpl = 0, spec_i_pt = 0, unreg = 0,rel_port_set = 0; STAILQ_INIT(&transport_id_list); ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); error = 1; goto bailout; } - bzero(&(&ccb->ccb_h)[1], - sizeof(union ccb) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': all_tg_pt = 1; break; case 'I': { int error_str_len = 128; char error_str[error_str_len]; char *id_str; id = malloc(sizeof(*id)); if (id == NULL) { warnx("%s: error allocating %zu bytes", __func__, sizeof(*id)); error = 1; goto bailout; } bzero(id, sizeof(*id)); id_str = strdup(optarg); if (id_str == NULL) { warnx("%s: error duplicating string %s", __func__, optarg); free(id); error = 1; goto bailout; } error = scsi_parse_transportid(id_str, &id->hdr, &id->alloc_len, error_str, error_str_len); if (error != 0) { warnx("%s", error_str); error = 1; free(id); free(id_str); goto bailout; } free(id_str); STAILQ_INSERT_TAIL(&transport_id_list, id, links); num_ids++; id_len += id->alloc_len; break; } case 'k': case 'K': { char *endptr; uint64_t tmpval; tmpval = strtoumax(optarg, &endptr, 0); if (*endptr != '\0') { warnx("%s: invalid key argument %s", __func__, optarg); error = 1; goto bailout; } if (c == 'k') { key = tmpval; } else { sa_key = tmpval; } break; } case 'i': case 'o': { scsi_nv_status status; int table_entry = 0; if (c == 'i') { in = 1; table = persist_in_actions; table_size = sizeof(persist_in_actions) / sizeof(persist_in_actions[0]); } else { out = 1; table = persist_out_actions; table_size = sizeof(persist_out_actions) / sizeof(persist_out_actions[0]); } if ((in + out) > 1) { warnx("%s: only one in (-i) or out (-o) " "action is allowed", __func__); error = 1; goto bailout; } status = scsi_get_nv(table, table_size, optarg, &table_entry,SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) action = table[table_entry].value; else { warnx("%s: %s %s option %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", in ? "in" : "out", optarg); error = 1; goto bailout; } break; } case 'p': aptpl = 1; break; case 'R': { char *endptr; rel_tgt_port = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { warnx("%s: invalid relative target port %s", __func__, optarg); error = 1; goto bailout; } rel_port_set = 1; break; } case 's': { size_t scope_size; struct scsi_nv *scope_table = NULL; scsi_nv_status status; int table_entry = 0; char *endptr; /* * First check to see if the user gave us a numeric * argument. If so, we'll try using it. */ if (isdigit(optarg[0])) { scope = strtol(optarg, &endptr, 0); if (*endptr != '\0') { warnx("%s: invalid scope %s", __func__, optarg); error = 1; goto bailout; } scope = (scope << SPR_SCOPE_SHIFT) & SPR_SCOPE_MASK; break; } scope_size = sizeof(persist_scope_table) / sizeof(persist_scope_table[0]); scope_table = persist_scope_table; status = scsi_get_nv(scope_table, scope_size, optarg, &table_entry,SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) scope = scope_table[table_entry].value; else { warnx("%s: %s scope %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", optarg); error = 1; goto bailout; } break; } case 'S': spec_i_pt = 1; break; case 'T': { size_t res_type_size; struct scsi_nv *rtype_table = NULL; scsi_nv_status status; char *endptr; int table_entry = 0; /* * First check to see if the user gave us a numeric * argument. If so, we'll try using it. */ if (isdigit(optarg[0])) { res_type = strtol(optarg, &endptr, 0); if (*endptr != '\0') { warnx("%s: invalid reservation type %s", __func__, optarg); error = 1; goto bailout; } break; } res_type_size = sizeof(persist_type_table) / sizeof(persist_type_table[0]); rtype_table = persist_type_table; status = scsi_get_nv(rtype_table, res_type_size, optarg, &table_entry, SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) res_type = rtype_table[table_entry].value; else { warnx("%s: %s reservation type %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", optarg); error = 1; goto bailout; } break; } case 'U': unreg = 1; break; default: break; } } if ((in + out) != 1) { warnx("%s: you must specify one of -i or -o", __func__); error = 1; goto bailout; } /* * Note that we don't really try to figure out whether the user * needs to specify one or both keys. There are a number of * scenarios, and sometimes 0 is a valid and desired value. */ if (in != 0) { switch (action) { case SPRI_RK: case SPRI_RR: case SPRI_RS: /* * Allocate the maximum length possible for these * service actions. According to the spec, the * target is supposed to return the available * length in the header, regardless of the * allocation length. In practice, though, with * the READ FULL STATUS (SPRI_RS) service action, * some Seagate drives (in particular a * Constellation ES, ) * don't return the available length if you only * allocate the length of the header. So just * allocate the maximum here so we don't miss * anything. */ res_len = SPRI_MAX_LEN; break; case SPRI_RC: res_len = sizeof(struct scsi_per_res_cap); break; default: /* In theory we should catch this above */ warnx("%s: invalid action %d", __func__, action); error = 1; goto bailout; break; } } else { /* * XXX KDM need to add length for transport IDs for the * register and move service action and the register * service action with the SPEC_I_PT bit set. */ if (action == SPRO_REG_MOVE) { if (num_ids != 1) { warnx("%s: register and move requires a " "single transport ID (-I)", __func__); error = 1; goto bailout; } if (rel_port_set == 0) { warnx("%s: register and move requires a " "relative target port (-R)", __func__); error = 1; goto bailout; } res_len = sizeof(struct scsi_per_res_reg_move) + id_len; } else { res_len = sizeof(struct scsi_per_res_out_parms); if ((action == SPRO_REGISTER) && (num_ids != 0)) { /* * If the user specifies any IDs with the * register service action, turn on the * spec_i_pt bit. */ spec_i_pt = 1; res_len += id_len; res_len += sizeof(struct scsi_per_res_out_trans_ids); } } } retry: if (res_buf != NULL) { free(res_buf); res_buf = NULL; } res_buf = malloc(res_len); if (res_buf == NULL) { warn("%s: error allocating %d bytes", __func__, res_len); error = 1; goto bailout; } bzero(res_buf, res_len); if (in != 0) { scsi_persistent_reserve_in(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ action, /*data_ptr*/ res_buf, /*dxfer_len*/ res_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout :5000); } else { switch (action) { case SPRO_REGISTER: if (spec_i_pt != 0) { struct scsi_per_res_out_trans_ids *id_hdr; uint8_t *bufptr; bufptr = res_buf + sizeof(struct scsi_per_res_out_parms) + sizeof(struct scsi_per_res_out_trans_ids); STAILQ_FOREACH(id, &transport_id_list, links) { bcopy(id->hdr, bufptr, id->alloc_len); bufptr += id->alloc_len; } id_hdr = (struct scsi_per_res_out_trans_ids *) (res_buf + sizeof(struct scsi_per_res_out_parms)); scsi_ulto4b(id_len, id_hdr->additional_length); } case SPRO_REG_IGNO: case SPRO_PREEMPT: case SPRO_PRE_ABO: case SPRO_RESERVE: case SPRO_RELEASE: case SPRO_CLEAR: case SPRO_REPL_LOST_RES: { struct scsi_per_res_out_parms *parms; parms = (struct scsi_per_res_out_parms *)res_buf; scsi_u64to8b(key, parms->res_key.key); scsi_u64to8b(sa_key, parms->serv_act_res_key); if (spec_i_pt != 0) parms->flags |= SPR_SPEC_I_PT; if (all_tg_pt != 0) parms->flags |= SPR_ALL_TG_PT; if (aptpl != 0) parms->flags |= SPR_APTPL; break; } case SPRO_REG_MOVE: { struct scsi_per_res_reg_move *reg_move; uint8_t *bufptr; reg_move = (struct scsi_per_res_reg_move *)res_buf; scsi_u64to8b(key, reg_move->res_key.key); scsi_u64to8b(sa_key, reg_move->serv_act_res_key); if (unreg != 0) reg_move->flags |= SPR_REG_MOVE_UNREG; if (aptpl != 0) reg_move->flags |= SPR_REG_MOVE_APTPL; scsi_ulto2b(rel_tgt_port, reg_move->rel_trgt_port_id); id = STAILQ_FIRST(&transport_id_list); /* * This shouldn't happen, since we already checked * the number of IDs above. */ if (id == NULL) { warnx("%s: No transport IDs found!", __func__); error = 1; goto bailout; } bufptr = (uint8_t *)®_move[1]; bcopy(id->hdr, bufptr, id->alloc_len); scsi_ulto4b(id->alloc_len, reg_move->transport_id_length); break; } default: break; } scsi_persistent_reserve_out(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ action, /*scope*/ scope, /*res_type*/ res_type, /*data_ptr*/ res_buf, /*dxfer_len*/ res_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ?timeout :5000); } /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (err_recover != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending PERSISTENT RESERVE %s", (in != 0) ? "IN" : "OUT"); if (verbosemode != 0) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (verbosemode != 0) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto bailout; } if (in == 0) goto bailout; valid_len = res_len - ccb->csio.resid; switch (action) { case SPRI_RK: case SPRI_RR: case SPRI_RS: { struct scsi_per_res_in_header *hdr; uint32_t hdr_len; if (valid_len < sizeof(*hdr)) { warnx("%s: only got %d valid bytes, need %zd", __func__, valid_len, sizeof(*hdr)); error = 1; goto bailout; } hdr = (struct scsi_per_res_in_header *)res_buf; hdr_len = scsi_4btoul(hdr->length); if (hdr_len > (res_len - sizeof(*hdr))) { res_len = hdr_len + sizeof(*hdr); goto retry; } if (action == SPRI_RK) { persist_print_keys(hdr, valid_len); } else if (action == SPRI_RR) { persist_print_res(hdr, valid_len); } else { persist_print_full(hdr, valid_len); } break; } case SPRI_RC: { struct scsi_per_res_cap *cap; uint32_t cap_len; if (valid_len < sizeof(*cap)) { warnx("%s: only got %u valid bytes, need %zd", __func__, valid_len, sizeof(*cap)); error = 1; goto bailout; } cap = (struct scsi_per_res_cap *)res_buf; cap_len = scsi_2btoul(cap->length); if (cap_len != sizeof(*cap)) { /* * We should be able to deal with this, * it's just more trouble. */ warnx("%s: reported size %u is different " "than expected size %zd", __func__, cap_len, sizeof(*cap)); } /* * If there is more data available, grab it all, * even though we don't really know what to do with * the extra data since it obviously wasn't in the * spec when this code was written. */ if (cap_len > res_len) { res_len = cap_len; goto retry; } persist_print_cap(cap, valid_len); break; } default: break; } bailout: free(res_buf); if (ccb != NULL) cam_freeccb(ccb); STAILQ_FOREACH_SAFE(id, &transport_id_list, links, id2) { STAILQ_REMOVE(&transport_id_list, id, persist_transport_id, links); free(id); } return (error); } Index: head/sbin/iscontrol/fsm.c =================================================================== --- head/sbin/iscontrol/fsm.c (revision 300546) +++ head/sbin/iscontrol/fsm.c (revision 300547) @@ -1,757 +1,757 @@ /*- * Copyright (c) 2005-2010 Daniel Braniss * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* | $Id: fsm.c,v 2.8 2007/05/19 16:34:21 danny Exp danny $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iscontrol.h" typedef enum { T1 = 1, T2, /*T3,*/ T4, T5, /*T6,*/ T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T18 } trans_t; /* | now supports IPV6 | thanks to: | Hajimu UMEMOTO @ Internet Mutual Aid Society Yokohama, Japan | ume@mahoroba.org ume@{,jp.}FreeBSD.org | http://www.imasy.org/~ume/ */ static trans_t tcpConnect(isess_t *sess) { isc_opt_t *op = sess->op; int val, sv_errno, soc; struct addrinfo *res, *res0, hints; char pbuf[10]; debug_called(3); if(sess->flags & (SESS_RECONNECT|SESS_REDIRECT)) { syslog(LOG_INFO, "%s", (sess->flags & SESS_RECONNECT) ? "Reconnect": "Redirected"); debug(1, "%s", (sess->flags & SESS_RECONNECT) ? "Reconnect": "Redirected"); shutdown(sess->soc, SHUT_RDWR); //close(sess->soc); sess->soc = -1; sess->flags &= ~SESS_CONNECTED; if(sess->flags & SESS_REDIRECT) { sess->redirect_cnt++; sess->flags |= SESS_RECONNECT; } else sleep(2); // XXX: actually should be ? #ifdef notyet { time_t sec; // make sure we are not in a loop // XXX: this code has to be tested sec = time(0) - sess->reconnect_time; if(sec > (5*60)) { // if we've been connected for more that 5 minutes // then just reconnect sess->reconnect_time = sec; sess->reconnect_cnt1 = 0; } else { // sess->reconnect_cnt1++; if((sec / sess->reconnect_cnt1) < 2) { // if less that 2 seconds from the last reconnect // we are most probably looping syslog(LOG_CRIT, "too many reconnects %d", sess->reconnect_cnt1); return 0; } } } #endif sess->reconnect_cnt++; } snprintf(pbuf, sizeof(pbuf), "%d", op->port); memset(&hints, 0, sizeof(hints)); hints.ai_family = PF_UNSPEC; hints.ai_socktype = SOCK_STREAM; debug(1, "targetAddress=%s port=%d", op->targetAddress, op->port); if((val = getaddrinfo(op->targetAddress, pbuf, &hints, &res0)) != 0) { fprintf(stderr, "getaddrinfo(%s): %s\n", op->targetAddress, gai_strerror(val)); return 0; } sess->flags &= ~SESS_CONNECTED; sv_errno = 0; soc = -1; for(res = res0; res; res = res->ai_next) { soc = socket(res->ai_family, res->ai_socktype, res->ai_protocol); if (soc == -1) continue; // from Patrick.Guelat@imp.ch: // iscontrol can be called without waiting for the socket entry to time out val = 1; if(setsockopt(soc, SOL_SOCKET, SO_REUSEADDR, &val, (socklen_t)sizeof(val)) < 0) { fprintf(stderr, "Cannot set socket SO_REUSEADDR %d: %s\n\n", errno, strerror(errno)); } if(connect(soc, res->ai_addr, res->ai_addrlen) == 0) break; sv_errno = errno; close(soc); soc = -1; } freeaddrinfo(res0); if(soc != -1) { sess->soc = soc; #if 0 struct timeval timeout; val = 1; if(setsockopt(sess->soc, IPPROTO_TCP, TCP_KEEPALIVE, &val, sizeof(val)) < 0) fprintf(stderr, "Cannot set socket KEEPALIVE option err=%d %s\n", errno, strerror(errno)); if(setsockopt(sess->soc, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)) < 0) fprintf(stderr, "Cannot set socket NO delay option err=%d %s\n", errno, strerror(errno)); timeout.tv_sec = 10; timeout.tv_usec = 0; if((setsockopt(sess->soc, SOL_SOCKET, SO_SNDTIMEO, &timeout, sizeof(timeout)) < 0) || (setsockopt(sess->soc, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof(timeout)) < 0)) { fprintf(stderr, "Cannot set socket timeout to %ld err=%d %s\n", timeout.tv_sec, errno, strerror(errno)); } #endif #ifdef CURIOUS { int len = sizeof(val); if(getsockopt(sess->soc, SOL_SOCKET, SO_SNDBUF, &val, &len) == 0) fprintf(stderr, "was: SO_SNDBUF=%dK\n", val/1024); } #endif if(sess->op->sockbufsize) { val = sess->op->sockbufsize * 1024; if((setsockopt(sess->soc, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)) < 0) || (setsockopt(sess->soc, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)) < 0)) { fprintf(stderr, "Cannot set socket sndbuf & rcvbuf to %d err=%d %s\n", val, errno, strerror(errno)); return 0; } } sess->flags |= SESS_CONNECTED; return T1; } fprintf(stderr, "errno=%d\n", sv_errno); perror("connect"); switch(sv_errno) { case ECONNREFUSED: case EHOSTUNREACH: case ENETUNREACH: case ETIMEDOUT: if((sess->flags & SESS_REDIRECT) == 0) { if(strcmp(op->targetAddress, sess->target.address) != 0) { syslog(LOG_INFO, "reconnecting to original target address"); free(op->targetAddress); op->targetAddress = sess->target.address; op->port = sess->target.port; op->targetPortalGroupTag = sess->target.pgt; return T1; } } sleep(5); // for now ... return T1; default: return 0; // terminal error } } int setOptions(isess_t *sess, int flag) { isc_opt_t oop; char *sep; debug_called(3); bzero(&oop, sizeof(isc_opt_t)); if((flag & SESS_FULLFEATURE) == 0) { oop.initiatorName = sess->op->initiatorName; oop.targetAddress = sess->op->targetAddress; if(sess->op->targetName != 0) oop.targetName = sess->op->targetName; oop.maxRecvDataSegmentLength = sess->op->maxRecvDataSegmentLength; oop.maxXmitDataSegmentLength = sess->op->maxXmitDataSegmentLength; // XXX: oop.maxBurstLength = sess->op->maxBurstLength; oop.maxluns = sess->op->maxluns; } else { /* | turn on digestion only after login */ if(sess->op->headerDigest != NULL) { sep = strchr(sess->op->headerDigest, ','); if(sep == NULL) oop.headerDigest = sess->op->headerDigest; debug(1, "oop.headerDigest=%s", oop.headerDigest); } if(sess->op->dataDigest != NULL) { sep = strchr(sess->op->dataDigest, ','); if(sep == NULL) oop.dataDigest = sess->op->dataDigest; debug(1, "oop.dataDigest=%s", oop.dataDigest); } } if(ioctl(sess->fd, ISCSISETOPT, &oop)) { perror("ISCSISETOPT"); return -1; } return 0; } static trans_t startSession(isess_t *sess) { int n, fd, nfd; char *dev; debug_called(3); if((sess->flags & SESS_CONNECTED) == 0) { return T2; } if(sess->fd == -1) { fd = open(iscsidev, O_RDWR); if(fd < 0) { perror(iscsidev); return 0; } { // XXX: this has to go size_t n; n = sizeof(sess->isid); if(sysctlbyname("net.iscsi_initiator.isid", (void *)sess->isid, (size_t *)&n, 0, 0) != 0) perror("sysctlbyname"); } if(ioctl(fd, ISCSISETSES, &n)) { perror("ISCSISETSES"); return 0; } asprintf(&dev, "%s%d", iscsidev, n); nfd = open(dev, O_RDWR); if(nfd < 0) { perror(dev); free(dev); return 0; } free(dev); close(fd); sess->fd = nfd; if(setOptions(sess, 0) != 0) return -1; } if(ioctl(sess->fd, ISCSISETSOC, &sess->soc)) { perror("ISCSISETSOC"); return 0; } return T4; } isess_t *currsess; static void trap(int sig) { syslog(LOG_NOTICE, "trapped signal %d", sig); fprintf(stderr, "trapped signal %d\n", sig); switch(sig) { case SIGHUP: currsess->flags |= SESS_DISCONNECT; break; case SIGUSR1: currsess->flags |= SESS_RECONNECT; break; case SIGINT: case SIGTERM: default: return; // ignore } } static int doCAM(isess_t *sess) { char pathstr[1024]; union ccb *ccb; int i, n; if(ioctl(sess->fd, ISCSIGETCAM, &sess->cam) != 0) { syslog(LOG_WARNING, "ISCSIGETCAM failed: %d", errno); return 0; } debug(1, "nluns=%d", sess->cam.target_nluns); /* | for now will do this for each lun ... */ for(n = i = 0; i < sess->cam.target_nluns; i++) { debug(2, "CAM path_id=%d target_id=%d", sess->cam.path_id, sess->cam.target_id); sess->camdev = cam_open_btl(sess->cam.path_id, sess->cam.target_id, i, O_RDWR, NULL); if(sess->camdev == NULL) { //syslog(LOG_WARNING, "%s", cam_errbuf); debug(3, "%s", cam_errbuf); continue; } cam_path_string(sess->camdev, pathstr, sizeof(pathstr)); debug(2, "pathstr=%s", pathstr); ccb = cam_getccb(sess->camdev); - bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_relsim) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->crs); ccb->ccb_h.func_code = XPT_REL_SIMQ; ccb->crs.release_flags = RELSIM_ADJUST_OPENINGS; ccb->crs.openings = sess->op->tags; if(cam_send_ccb(sess->camdev, ccb) < 0) debug(2, "%s", cam_errbuf); else if((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { syslog(LOG_WARNING, "XPT_REL_SIMQ CCB failed"); // cam_error_print(sess->camdev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } else { n++; syslog(LOG_INFO, "%s tagged openings now %d\n", pathstr, ccb->crs.openings); } cam_freeccb(ccb); cam_close_device(sess->camdev); } return n; } static trans_t supervise(isess_t *sess) { int sig, val; debug_called(3); if(strcmp(sess->op->sessionType, "Discovery") == 0) { sess->flags |= SESS_DISCONNECT; return T9; } if(vflag) printf("ready to go scsi\n"); if(setOptions(sess, SESS_FULLFEATURE) != 0) return 0; // failure if((sess->flags & SESS_FULLFEATURE) == 0) { if(daemon(0, 1) != 0) { perror("daemon"); exit(1); } if(sess->op->pidfile != NULL) { FILE *pidf; pidf = fopen(sess->op->pidfile, "w"); if(pidf != NULL) { fprintf(pidf, "%d\n", getpid()); fclose(pidf); } } openlog("iscontrol", LOG_CONS|LOG_PERROR|LOG_PID|LOG_NDELAY, LOG_KERN); syslog(LOG_INFO, "running"); currsess = sess; if(ioctl(sess->fd, ISCSISTART)) { perror("ISCSISTART"); return -1; } if(doCAM(sess) == 0) { syslog(LOG_WARNING, "no device found"); ioctl(sess->fd, ISCSISTOP); return T15; } } else { if(ioctl(sess->fd, ISCSIRESTART)) { perror("ISCSIRESTART"); return -1; } } signal(SIGINT, trap); signal(SIGHUP, trap); signal(SIGTERM, trap); sig = SIGUSR1; signal(sig, trap); if(ioctl(sess->fd, ISCSISIGNAL, &sig)) { perror("ISCSISIGNAL"); return -1; } sess->flags |= SESS_FULLFEATURE; sess->flags &= ~(SESS_REDIRECT | SESS_RECONNECT); if(vflag) printf("iscontrol: supervise starting main loop\n"); /* | the main loop - actually do nothing | all the work is done inside the kernel */ while((sess->flags & (SESS_REDIRECT|SESS_RECONNECT|SESS_DISCONNECT)) == 0) { // do something? // like sending a nop_out? sleep(60); } printf("iscontrol: supervise going down\n"); syslog(LOG_INFO, "sess flags=%x", sess->flags); sig = 0; if(ioctl(sess->fd, ISCSISIGNAL, &sig)) { perror("ISCSISIGNAL"); } if(sess->flags & SESS_DISCONNECT) { sess->flags &= ~SESS_FULLFEATURE; return T9; } else { val = 0; if(ioctl(sess->fd, ISCSISTOP, &val)) { perror("ISCSISTOP"); } sess->flags |= SESS_INITIALLOGIN1; } return T8; } static int handledDiscoveryResp(isess_t *sess, pdu_t *pp) { u_char *ptr; int len, n; debug_called(3); len = pp->ds_len; ptr = pp->ds_addr; while(len > 0) { if(*ptr != 0) printf("%s\n", ptr); n = strlen((char *)ptr) + 1; len -= n; ptr += n; } return 0; } static int doDiscovery(isess_t *sess) { pdu_t spp; text_req_t *tp = (text_req_t *)&spp.ipdu.bhs; debug_called(3); bzero(&spp, sizeof(pdu_t)); tp->cmd = ISCSI_TEXT_CMD /*| 0x40 */; // because of a bug in openiscsi-target tp->F = 1; tp->ttt = 0xffffffff; addText(&spp, "SendTargets=All"); return sendPDU(sess, &spp, handledDiscoveryResp); } static trans_t doLogin(isess_t *sess) { isc_opt_t *op = sess->op; int status, count; debug_called(3); if(op->chapSecret == NULL && op->tgtChapSecret == NULL) /* | don't need any security negotiation | or in other words: we don't have any secrets to exchange */ sess->csg = LON_PHASE; else sess->csg = SN_PHASE; if(sess->tsih) { sess->tsih = 0; // XXX: no 'reconnect' yet sess->flags &= ~SESS_NEGODONE; // XXX: KLUDGE } count = 10; // should be more than enough do { debug(3, "count=%d csg=%d", count, sess->csg); status = loginPhase(sess); if(count-- == 0) // just in case we get into a loop status = -1; } while(status == 0 && (sess->csg != FF_PHASE)); sess->flags &= ~SESS_INITIALLOGIN; debug(3, "status=%d", status); switch(status) { case 0: // all is ok ... sess->flags |= SESS_LOGGEDIN; if(strcmp(sess->op->sessionType, "Discovery") == 0) doDiscovery(sess); return T5; case 1: // redirect - temporary/permanent /* | start from scratch? */ sess->flags &= ~SESS_NEGODONE; sess->flags |= (SESS_REDIRECT | SESS_INITIALLOGIN1); syslog(LOG_DEBUG, "target sent REDIRECT"); return T7; case 2: // initiator terminal error return 0; case 3: // target terminal error -- could retry ... sleep(5); return T7; // lets try default: return 0; } } static int handleLogoutResp(isess_t *sess, pdu_t *pp) { if(sess->flags & SESS_DISCONNECT) { int val = 0; if(ioctl(sess->fd, ISCSISTOP, &val)) { perror("ISCSISTOP"); } return 0; } return T13; } static trans_t startLogout(isess_t *sess) { pdu_t spp; logout_req_t *p = (logout_req_t *)&spp.ipdu.bhs; bzero(&spp, sizeof(pdu_t)); p->cmd = ISCSI_LOGOUT_CMD| 0x40; p->reason = BIT(7) | 0; p->CID = htons(1); return sendPDU(sess, &spp, handleLogoutResp); } static trans_t inLogout(isess_t *sess) { if(sess->flags & SESS_RECONNECT) return T18; return 0; } typedef enum { S1, S2, /*S3,*/ S4, S5, S6, S7, S8 } state_t; /** S1: FREE S2: XPT_WAIT S4: IN_LOGIN S5: LOGGED_IN S6: IN_LOGOUT S7: LOGOUT_REQUESTED S8: CLEANUP_WAIT -------<-------------+ +--------->/ S1 \<----+ | T13| +->\ /<-+ \ | | / ---+--- \ \ | | / | T2 \ | | | T8 | |T1 | | | | | | / |T7 | | | | / | | | | | / | | | | V / / | | | ------- / / | | | / S2 \ / | | | \ / / | | | ---+--- / | | | |T4 / | | | V / | T18 | | ------- / | | | / S4 \ | | | \ / | | | ---+--- | T15 | | |T5 +--------+---------+ | | | /T16+-----+------+ | | | | / -+-----+--+ | | | | | / / S7 \ |T12| | | | | / +->\ /<-+ V V | | | / / -+----- ------- | | | / /T11 |T10 / S8 \ | | V / / V +----+ \ / | | ---+-+- ----+-- | ------- | | / S5 \T9 / S6 \<+ ^ | +-----\ /--->\ / T14 | | ------- --+----+------+T17 +---------------------------+ */ int fsm(isc_opt_t *op) { state_t state; isess_t *sess; if((sess = calloc(1, sizeof(isess_t))) == NULL) { // boy, is this a bad start ... fprintf(stderr, "no memory!\n"); return -1; } state = S1; sess->op = op; sess->fd = -1; sess->soc = -1; sess->target.address = strdup(op->targetAddress); sess->target.port = op->port; sess->target.pgt = op->targetPortalGroupTag; sess->flags = SESS_INITIALLOGIN | SESS_INITIALLOGIN1; do { switch(state) { case S1: switch(tcpConnect(sess)) { case T1: state = S2; break; default: state = S8; break; } break; case S2: switch(startSession(sess)) { case T2: state = S1; break; case T4: state = S4; break; default: state = S8; break; } break; case S4: switch(doLogin(sess)) { case T7: state = S1; break; case T5: state = S5; break; default: state = S8; break; } break; case S5: switch(supervise(sess)) { case T8: state = S1; break; case T9: state = S6; break; case T11: state = S7; break; case T15: state = S8; break; default: state = S8; break; } break; case S6: switch(startLogout(sess)) { case T13: state = S1; break; case T14: state = S6; break; case T16: state = S8; break; default: state = S8; break; } break; case S7: switch(inLogout(sess)) { case T18: state = S1; break; case T10: state = S6; break; case T12: state = S7; break; case T16: state = S8; break; default: state = S8; break; } break; case S8: // maybe do some clean up? syslog(LOG_INFO, "terminated"); return 0; } } while(1); } Index: head/sys/cam/cam_ccb.h =================================================================== --- head/sys/cam/cam_ccb.h (revision 300546) +++ head/sys/cam/cam_ccb.h (revision 300547) @@ -1,1365 +1,1369 @@ /*- * Data structures and definitions for CAM Control Blocks (CCBs). * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_CCB_H #define _CAM_CAM_CCB_H 1 #include #include #include #include #ifndef _KERNEL #include #endif #include #include #include /* General allocation length definitions for CCB structures */ #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ #define VUHBALEN 14 /* Vendor Unique HBA length */ #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ #define DEV_IDLEN 16 /* ASCII string len for device names */ #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ /* Struct definitions for CAM control blocks */ /* Common CCB header */ /* CAM CCB flags */ typedef enum { CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ CAM_QUEUE_ENABLE = 0x00000002,/* SIM queue actions are enabled */ CAM_CDB_LINKED = 0x00000004,/* CCB contains a linked CDB */ CAM_NEGOTIATE = 0x00000008,/* * Perform transport negotiation * with this command. */ CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */ CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */ CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */ CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */ CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */ CAM_DATA_MASK = 0x00240010,/* Data type mask */ CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */ CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */ CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/ CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/ CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */ /* Phase cognizant mode flags */ CAM_DIS_AUTOSRP = 0x01000000,/* Disable autosave/restore ptrs */ CAM_DIS_AUTODISC = 0x02000000,/* Disable auto disconnect */ CAM_TGT_CCB_AVAIL = 0x04000000,/* Target CCB available */ CAM_TGT_PHASE_MODE = 0x08000000,/* The SIM runs in phase mode */ CAM_MSGB_VALID = 0x10000000,/* Message buffer valid */ CAM_STATUS_VALID = 0x20000000,/* Status buffer valid */ CAM_DATAB_VALID = 0x40000000,/* Data buffer valid */ /* Host target Mode flags */ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */ CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */ CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */ CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */ } ccb_flags; typedef enum { CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */ CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/ CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */ } ccb_xflags; /* XPT Opcodes for xpt_action */ typedef enum { /* Function code flags are bits greater than 0xff */ XPT_FC_QUEUED = 0x100, /* Non-immediate function code */ XPT_FC_USER_CCB = 0x200, XPT_FC_XPT_ONLY = 0x400, /* Only for the transport layer device */ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, /* Passes through the device queues */ /* Common function commands: 0x00->0x0F */ XPT_NOOP = 0x00, /* Execute Nothing */ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, /* Execute the requested I/O operation */ XPT_GDEV_TYPE = 0x02, /* Get type information for specified device */ XPT_GDEVLIST = 0x03, /* Get a list of peripheral devices */ XPT_PATH_INQ = 0x04, /* Path routing inquiry */ XPT_REL_SIMQ = 0x05, /* Release a frozen device queue */ XPT_SASYNC_CB = 0x06, /* Set Asynchronous Callback Parameters */ XPT_SDEV_TYPE = 0x07, /* Set device type information */ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* (Re)Scan the SCSI Bus */ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, /* Get EDT entries matching the given pattern */ XPT_DEBUG = 0x0a, /* Turn on debugging for a bus, target or lun */ XPT_PATH_STATS = 0x0b, /* Path statistics (error counts, etc.) */ XPT_GDEV_STATS = 0x0c, /* Device statistics (error counts, etc.) */ XPT_DEV_ADVINFO = 0x0e, /* Get/Set Device advanced information */ XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Asynchronous event */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, /* Reset the specified SCSI bus */ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, /* Bus Device Reset the specified SCSI device */ XPT_TERM_IO = 0x13, /* Terminate the I/O process */ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Logical Unit */ XPT_GET_TRAN_SETTINGS = 0x15, /* * Get default/user transfer settings * for the target */ XPT_SET_TRAN_SETTINGS = 0x16, /* * Set transfer rate/width * negotiation settings */ XPT_CALC_GEOMETRY = 0x17, /* * Calculate the geometry parameters for * a device give the sector size and * volume size. */ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, /* Execute the requested ATA I/O operation */ XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */ XPT_SET_SIM_KNOB = 0x19, /* * Set SIM specific knob values. */ XPT_GET_SIM_KNOB = 0x1a, /* * Get SIM specific knob values. */ XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED, /* Serial Management Protocol */ XPT_SCAN_TGT = 0x1E | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Target */ /* HBA engine commands 0x20->0x2F */ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, /* HBA engine feature inquiry */ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, /* HBA execute engine request */ /* Target mode commands: 0x30->0x3F */ XPT_EN_LUN = 0x30, /* Enable LUN as a target */ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, /* Execute target I/O request */ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Accept Host Target Mode CDB */ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, /* Continue Host Target I/O Connection */ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event (obsolete) */ XPT_NOTIFY_ACK = 0x35, /* Acknowledgement of event (obsolete) */ XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event */ XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Acknowledgement of event */ XPT_REPROBE_LUN = 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Query device capacity and notify GEOM */ /* Vendor Unique codes: 0x80->0x8F */ XPT_VUNIQUE = 0x80 } xpt_opcode; #define XPT_FC_GROUP_MASK 0xF0 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) #define XPT_FC_GROUP_COMMON 0x00 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 #define XPT_FC_GROUP_HBA_ENGINE 0x20 #define XPT_FC_GROUP_TMODE 0x30 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 #define XPT_FC_IS_DEV_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) #define XPT_FC_IS_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) typedef enum { PROTO_UNKNOWN, PROTO_UNSPECIFIED, PROTO_SCSI, /* Small Computer System Interface */ PROTO_ATA, /* AT Attachment */ PROTO_ATAPI, /* AT Attachment Packetized Interface */ PROTO_SATAPM, /* SATA Port Multiplier */ PROTO_SEMB, /* SATA Enclosure Management Bridge */ } cam_proto; typedef enum { XPORT_UNKNOWN, XPORT_UNSPECIFIED, XPORT_SPI, /* SCSI Parallel Interface */ XPORT_FC, /* Fiber Channel */ XPORT_SSA, /* Serial Storage Architecture */ XPORT_USB, /* Universal Serial Bus */ XPORT_PPB, /* Parallel Port Bus */ XPORT_ATA, /* AT Attachment */ XPORT_SAS, /* Serial Attached SCSI */ XPORT_SATA, /* Serial AT Attachment */ XPORT_ISCSI, /* iSCSI */ XPORT_SRP, /* SCSI RDMA Protocol */ } cam_xport; #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA) #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \ (t) != XPORT_UNSPECIFIED && \ !XPORT_IS_ATA(t)) #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \ XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \ DEVSTAT_TYPE_IF_OTHER) #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) #define PROTO_VERSION_UNSPECIFIED UINT_MAX #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) #define XPORT_VERSION_UNSPECIFIED UINT_MAX typedef union { LIST_ENTRY(ccb_hdr) le; SLIST_ENTRY(ccb_hdr) sle; TAILQ_ENTRY(ccb_hdr) tqe; STAILQ_ENTRY(ccb_hdr) stqe; } camq_entry; typedef union { void *ptr; u_long field; u_int8_t bytes[sizeof(uintptr_t)]; } ccb_priv_entry; typedef union { ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_ppriv_area; typedef union { ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_spriv_area; typedef struct { struct timeval *etime; uintptr_t sim_data; uintptr_t periph_data; } ccb_qos_area; struct ccb_hdr { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ u_int32_t retry_count; void (*cbfcnp)(struct cam_periph *, union ccb *); /* Callback on completion function */ xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ lun_id_t target_lun; /* Target LUN number */ u_int32_t flags; /* ccb_flags */ u_int32_t xflags; /* Extended flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; ccb_qos_area qos; u_int32_t timeout; /* Hard timeout value in mseconds */ struct timeval softtimeout; /* Soft timeout value in sec + usec */ }; /* Get Device Information CCB */ struct ccb_getdev { struct ccb_hdr ccb_h; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; u_int8_t serial_num[252]; u_int8_t inq_flags; u_int8_t serial_num_len; }; /* Device Statistics CCB */ struct ccb_getdevstats { struct ccb_hdr ccb_h; int dev_openings; /* Space left for more work on device*/ int dev_active; /* Transactions running on the device */ int allocated; /* CCBs allocated for the device */ int queued; /* CCBs queued to be sent to the device */ int held; /* * CCBs held by peripheral drivers * for this device */ int maxtags; /* * Boundary conditions for number of * tagged operations */ int mintags; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { CAM_GDEVLIST_LAST_DEVICE, CAM_GDEVLIST_LIST_CHANGED, CAM_GDEVLIST_MORE_DEVS, CAM_GDEVLIST_ERROR } ccb_getdevlist_status_e; struct ccb_getdevlist { struct ccb_hdr ccb_h; char periph_name[DEV_IDLEN]; u_int32_t unit_number; unsigned int generation; u_int32_t index; ccb_getdevlist_status_e status; }; typedef enum { PERIPH_MATCH_NONE = 0x000, PERIPH_MATCH_PATH = 0x001, PERIPH_MATCH_TARGET = 0x002, PERIPH_MATCH_LUN = 0x004, PERIPH_MATCH_NAME = 0x008, PERIPH_MATCH_UNIT = 0x010, PERIPH_MATCH_ANY = 0x01f } periph_pattern_flags; struct periph_match_pattern { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; periph_pattern_flags flags; }; typedef enum { DEV_MATCH_NONE = 0x000, DEV_MATCH_PATH = 0x001, DEV_MATCH_TARGET = 0x002, DEV_MATCH_LUN = 0x004, DEV_MATCH_INQUIRY = 0x008, DEV_MATCH_DEVID = 0x010, DEV_MATCH_ANY = 0x00f } dev_pattern_flags; struct device_id_match_pattern { uint8_t id_len; uint8_t id[256]; }; struct device_match_pattern { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; dev_pattern_flags flags; union { struct scsi_static_inquiry_pattern inq_pat; struct device_id_match_pattern devid_pat; } data; }; typedef enum { BUS_MATCH_NONE = 0x000, BUS_MATCH_PATH = 0x001, BUS_MATCH_NAME = 0x002, BUS_MATCH_UNIT = 0x004, BUS_MATCH_BUS_ID = 0x008, BUS_MATCH_ANY = 0x00f } bus_pattern_flags; struct bus_match_pattern { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; bus_pattern_flags flags; }; union match_pattern { struct periph_match_pattern periph_pattern; struct device_match_pattern device_pattern; struct bus_match_pattern bus_pattern; }; typedef enum { DEV_MATCH_PERIPH, DEV_MATCH_DEVICE, DEV_MATCH_BUS } dev_match_type; struct dev_match_pattern { dev_match_type type; union match_pattern pattern; }; struct periph_match_result { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; }; typedef enum { DEV_RESULT_NOFLAG = 0x00, DEV_RESULT_UNCONFIGURED = 0x01 } dev_result_flags; struct device_match_result { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; }; struct bus_match_result { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; }; union match_result { struct periph_match_result periph_result; struct device_match_result device_result; struct bus_match_result bus_result; }; struct dev_match_result { dev_match_type type; union match_result result; }; typedef enum { CAM_DEV_MATCH_LAST, CAM_DEV_MATCH_MORE, CAM_DEV_MATCH_LIST_CHANGED, CAM_DEV_MATCH_SIZE_ERROR, CAM_DEV_MATCH_ERROR } ccb_dev_match_status; typedef enum { CAM_DEV_POS_NONE = 0x000, CAM_DEV_POS_BUS = 0x001, CAM_DEV_POS_TARGET = 0x002, CAM_DEV_POS_DEVICE = 0x004, CAM_DEV_POS_PERIPH = 0x008, CAM_DEV_POS_PDPTR = 0x010, CAM_DEV_POS_TYPEMASK = 0xf00, CAM_DEV_POS_EDT = 0x100, CAM_DEV_POS_PDRV = 0x200 } dev_pos_type; struct ccb_dm_cookie { void *bus; void *target; void *device; void *periph; void *pdrv; }; struct ccb_dev_position { u_int generations[4]; #define CAM_BUS_GENERATION 0x00 #define CAM_TARGET_GENERATION 0x01 #define CAM_DEV_GENERATION 0x02 #define CAM_PERIPH_GENERATION 0x03 dev_pos_type position_type; struct ccb_dm_cookie cookie; }; struct ccb_dev_match { struct ccb_hdr ccb_h; ccb_dev_match_status status; u_int32_t num_patterns; u_int32_t pattern_buf_len; struct dev_match_pattern *patterns; u_int32_t num_matches; u_int32_t match_buf_len; struct dev_match_result *matches; struct ccb_dev_position pos; }; /* * Definitions for the path inquiry CCB fields. */ #define CAM_VERSION 0x19 /* Hex value for current version */ typedef enum { PI_MDP_ABLE = 0x80, /* Supports MDP message */ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ PI_SATAPM = 0x04, /* Supports SATA PM */ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ } pi_inqflag; typedef enum { PIT_PROCESSOR = 0x80, /* Target mode processor mode */ PIT_PHASE = 0x40, /* Target mode phase cog. mode */ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ PIT_GRP_6 = 0x08, /* Group 6 commands supported */ PIT_GRP_7 = 0x04 /* Group 7 commands supported */ } pi_tmflag; typedef enum { PIM_ATA_EXT = 0x200,/* ATA requests can understand ata_ext requests */ PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */ PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */ PIM_UNMAPPED = 0x02, PIM_NOSCAN = 0x01 /* SIM does its own scanning */ } pi_miscflag; /* Path Inquiry CCB */ struct ccb_pathinq_settings_spi { u_int8_t ppr_options; }; struct ccb_pathinq_settings_fc { u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_sas { u_int32_t bitrate; /* Mbps */ }; #define PATHINQ_SETTINGS_SIZE 128 struct ccb_pathinq { struct ccb_hdr ccb_h; u_int8_t version_num; /* Version number for the SIM/HBA */ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ u_int16_t target_sprt; /* Flags for target mode support */ u_int32_t hba_misc; /* Misc HBA features */ u_int16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ u_int8_t vuhba_flags[VUHBALEN]; u_int32_t max_target; /* Maximum supported Target */ u_int32_t max_lun; /* Maximum supported Lun */ u_int32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ u_int32_t unit_number; /* Unit number for SIM */ u_int32_t bus_id; /* Bus ID for SIM */ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ u_int16_t hba_vendor; /* HBA vendor ID */ u_int16_t hba_device; /* HBA device ID */ u_int16_t hba_subvendor; /* HBA subvendor ID */ u_int16_t hba_subdevice; /* HBA subdevice ID */ }; /* Path Statistics CCB */ struct ccb_pathstats { struct ccb_hdr ccb_h; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { SMP_FLAG_NONE = 0x00, SMP_FLAG_REQ_SG = 0x01, SMP_FLAG_RSP_SG = 0x02 } ccb_smp_pass_flags; /* * Serial Management Protocol CCB * XXX Currently the semantics for this CCB are that it is executed either * by the addressed device, or that device's parent (i.e. an expander for * any device on an expander) if the addressed device doesn't support SMP. * Later, once we have the ability to probe SMP-only devices and put them * in CAM's topology, the CCB will only be executed by the addressed device * if possible. */ struct ccb_smpio { struct ccb_hdr ccb_h; uint8_t *smp_request; int smp_request_len; uint16_t smp_request_sglist_cnt; uint8_t *smp_response; int smp_response_len; uint16_t smp_response_sglist_cnt; ccb_smp_pass_flags flags; }; typedef union { u_int8_t *sense_ptr; /* * Pointer to storage * for sense information */ /* Storage Area for sense information */ struct scsi_sense_data sense_buf; } sense_t; typedef union { u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ /* Area for the CDB send */ u_int8_t cdb_bytes[IOCDBLEN]; } cdb_t; /* * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO * function codes. */ struct ccb_scsiio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ u_int8_t *req_map; /* Ptr to mapping info */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ /* Autosense storage */ struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes to autosense */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int16_t sglist_cnt; /* Number of SG list entries */ u_int8_t scsi_status; /* Returned SCSI status */ u_int8_t sense_resid; /* Autosense resid length: 2's comp */ u_int32_t resid; /* Transfer residual length: 2's comp */ cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t *msg_ptr; /* Pointer to the message buffer */ u_int16_t msg_len; /* Number of bytes for the Message */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ }; static __inline uint8_t * scsiio_cdb_ptr(struct ccb_scsiio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* * ATA I/O Request CCB used for the XPT_ATA_IO function code. */ struct ccb_ataio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct ata_cmd cmd; /* ATA command register set */ struct ata_res res; /* ATA result register set */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int32_t resid; /* Transfer residual length: 2's comp */ u_int8_t ata_flags; /* Flags for the rest of the buffer */ #define ATA_FLAG_AUX 0x1 uint32_t aux; uint32_t unused; }; struct ccb_accept_tio { struct ccb_hdr ccb_h; cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int8_t tag_action; /* What to do for tag queueing */ u_int8_t sense_len; /* Number of bytes of Sense Data */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ struct scsi_sense_data sense_data; }; /* Release SIM Queue */ struct ccb_relsim { struct ccb_hdr ccb_h; u_int32_t release_flags; #define RELSIM_ADJUST_OPENINGS 0x01 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 u_int32_t openings; u_int32_t release_timeout; /* Abstract argument. */ u_int32_t qfrozen_cnt; }; /* * Definitions for the asynchronous callback CCB fields. */ typedef enum { AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */ AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */ AC_CONTRACT = 0x1000,/* A contractual callback */ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ AC_LOST_DEVICE = 0x100,/* A device went away */ AC_FOUND_DEVICE = 0x080,/* A new device was found */ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ } ac_code; typedef void ac_callback_t (void *softc, u_int32_t code, struct cam_path *path, void *args); /* * Generic Asynchronous callbacks. * * Generic arguments passed bac which are then interpreted between a per-system * contract number. */ #define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t)) struct ac_contract { u_int64_t contract_number; u_int8_t contract_data[AC_CONTRACT_DATA_MAX]; }; #define AC_CONTRACT_DEV_CHG 1 struct ac_device_changed { u_int64_t wwpn; u_int32_t port; target_id_t target; u_int8_t arrived; }; /* Set Asynchronous Callback CCB */ struct ccb_setasync { struct ccb_hdr ccb_h; u_int32_t event_enable; /* Async Event enables */ ac_callback_t *callback; void *callback_arg; }; /* Set Device Type CCB */ struct ccb_setdev { struct ccb_hdr ccb_h; u_int8_t dev_type; /* Value for dev type field in EDT */ }; /* SCSI Control Functions */ /* Abort XPT request CCB */ struct ccb_abort { struct ccb_hdr ccb_h; union ccb *abort_ccb; /* Pointer to CCB to abort */ }; /* Reset SCSI Bus CCB */ struct ccb_resetbus { struct ccb_hdr ccb_h; }; /* Reset SCSI Device CCB */ struct ccb_resetdev { struct ccb_hdr ccb_h; }; /* Terminate I/O Process Request CCB */ struct ccb_termio { struct ccb_hdr ccb_h; union ccb *termio_ccb; /* Pointer to CCB to terminate */ }; typedef enum { CTS_TYPE_CURRENT_SETTINGS, CTS_TYPE_USER_SETTINGS } cts_type; struct ccb_trans_settings_scsi { u_int valid; /* Which fields to honor */ #define CTS_SCSI_VALID_TQ 0x01 u_int flags; #define CTS_SCSI_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_ata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_TQ 0x01 u_int flags; #define CTS_ATA_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_spi { u_int valid; /* Which fields to honor */ #define CTS_SPI_VALID_SYNC_RATE 0x01 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 #define CTS_SPI_VALID_BUS_WIDTH 0x04 #define CTS_SPI_VALID_DISC 0x08 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 u_int flags; #define CTS_SPI_FLAGS_DISC_ENB 0x01 u_int sync_period; u_int sync_offset; u_int bus_width; u_int ppr_options; }; struct ccb_trans_settings_fc { u_int valid; /* Which fields to honor */ #define CTS_FC_VALID_WWNN 0x8000 #define CTS_FC_VALID_WWPN 0x4000 #define CTS_FC_VALID_PORT 0x2000 #define CTS_FC_VALID_SPEED 0x1000 u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sas { u_int valid; /* Which fields to honor */ #define CTS_SAS_VALID_SPEED 0x1000 u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_pata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_MODE 0x01 #define CTS_ATA_VALID_BYTECOUNT 0x02 #define CTS_ATA_VALID_ATAPI 0x20 #define CTS_ATA_VALID_CAPS 0x40 int mode; /* Mode */ u_int bytecount; /* Length of PIO transaction */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_ATA_CAPS_H 0x0000ffff #define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */ #define CTS_ATA_CAPS_D 0xffff0000 }; struct ccb_trans_settings_sata { u_int valid; /* Which fields to honor */ #define CTS_SATA_VALID_MODE 0x01 #define CTS_SATA_VALID_BYTECOUNT 0x02 #define CTS_SATA_VALID_REVISION 0x04 #define CTS_SATA_VALID_PM 0x08 #define CTS_SATA_VALID_TAGS 0x10 #define CTS_SATA_VALID_ATAPI 0x20 #define CTS_SATA_VALID_CAPS 0x40 int mode; /* Legacy PATA mode */ u_int bytecount; /* Length of PIO transaction */ int revision; /* SATA revision */ u_int pm_present; /* PM is present (XPT->SIM) */ u_int tags; /* Number of allowed tags */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_SATA_CAPS_H 0x0000ffff #define CTS_SATA_CAPS_H_PMREQ 0x00000001 #define CTS_SATA_CAPS_H_APST 0x00000002 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */ #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */ #define CTS_SATA_CAPS_D 0xffff0000 #define CTS_SATA_CAPS_D_PMREQ 0x00010000 #define CTS_SATA_CAPS_D_APST 0x00020000 }; /* Get/Set transfer rate/width/disconnection/tag queueing settings */ struct ccb_trans_settings { struct ccb_hdr ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; } xport_specific; }; /* * Calculate the geometry parameters for a device * give the block size and volume size in blocks. */ struct ccb_calc_geometry { struct ccb_hdr ccb_h; u_int32_t block_size; u_int64_t volume_size; u_int32_t cylinders; u_int8_t heads; u_int8_t secs_per_track; }; /* * Set or get SIM (and transport) specific knobs */ #define KNOB_VALID_ADDRESS 0x1 #define KNOB_VALID_ROLE 0x2 #define KNOB_ROLE_NONE 0x0 #define KNOB_ROLE_INITIATOR 0x1 #define KNOB_ROLE_TARGET 0x2 #define KNOB_ROLE_BOTH 0x3 struct ccb_sim_knob_settings_spi { u_int valid; u_int initiator_id; u_int role; }; struct ccb_sim_knob_settings_fc { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int role; }; struct ccb_sim_knob_settings_sas { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int role; }; #define KNOB_SETTINGS_SIZE 128 struct ccb_sim_knob { struct ccb_hdr ccb_h; union { u_int valid; /* Which fields to honor */ struct ccb_sim_knob_settings_spi spi; struct ccb_sim_knob_settings_fc fc; struct ccb_sim_knob_settings_sas sas; char pad[KNOB_SETTINGS_SIZE]; } xport_specific; }; /* * Rescan the given bus, or bus/target/lun */ struct ccb_rescan { struct ccb_hdr ccb_h; cam_flags flags; }; /* * Turn on debugging for the given bus, bus/target, or bus/target/lun. */ struct ccb_debug { struct ccb_hdr ccb_h; cam_debug_flags flags; }; /* Target mode structures. */ struct ccb_en_lun { struct ccb_hdr ccb_h; u_int16_t grp6_len; /* Group 6 VU CDB length */ u_int16_t grp7_len; /* Group 7 VU CDB length */ u_int8_t enable; }; /* old, barely used immediate notify, binary compatibility */ struct ccb_immed_notify { struct ccb_hdr ccb_h; struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes in sense buffer */ u_int8_t initiator_id; /* Id of initiator that selected */ u_int8_t message_args[7]; /* Message Arguments */ }; struct ccb_notify_ack { struct ccb_hdr ccb_h; u_int16_t seq_id; /* Sequence identifier */ u_int8_t event; /* Event flags */ }; struct ccb_immediate_notify { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tag for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; struct ccb_notify_acknowledge { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tar for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Response information */ /* * Lower byte of arg is one of RESPONSE CODE values defined below * (subset of response codes from SPL-4 and FCP-4 specifications), * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION. */ #define CAM_RSP_TMF_COMPLETE 0x00 #define CAM_RSP_TMF_REJECTED 0x04 #define CAM_RSP_TMF_FAILED 0x05 #define CAM_RSP_TMF_SUCCEEDED 0x08 #define CAM_RSP_TMF_INCORRECT_LUN 0x09 }; /* HBA engine structures. */ typedef enum { EIT_BUFFER, /* Engine type: buffer memory */ EIT_LOSSLESS, /* Engine type: lossless compression */ EIT_LOSSY, /* Engine type: lossy compression */ EIT_ENCRYPT /* Engine type: encryption */ } ei_type; typedef enum { EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ } ei_algo; struct ccb_eng_inq { struct ccb_hdr ccb_h; u_int16_t eng_num; /* The engine number for this inquiry */ ei_type eng_type; /* Returned engine type */ ei_algo eng_algo; /* Returned engine algorithm type */ u_int32_t eng_memeory; /* Returned engine memory size */ }; struct ccb_eng_exec { /* This structure must match SCSIIO size */ struct ccb_hdr ccb_h; u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ u_int8_t *req_map; /* Ptr for mapping info on the req. */ u_int8_t *data_ptr; /* Pointer to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */ u_int16_t sglist_cnt; /* Num of scatter gather list entries */ u_int32_t dmax_len; /* Destination data maximum length */ u_int32_t dest_len; /* Destination data length */ int32_t src_resid; /* Source residual length: 2's comp */ u_int32_t timeout; /* Timeout value */ u_int16_t eng_num; /* Engine number for this request */ u_int16_t vu_flags; /* Vendor Unique flags */ }; /* * Definitions for the timeout field in the SCSI I/O CCB. */ #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ #define CAM_SUCCESS 0 /* For signaling general success */ #define CAM_FAILURE 1 /* For signaling general failure */ #define CAM_FALSE 0 #define CAM_TRUE 1 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ /* * CCB for working with advanced device information. This operates in a fashion * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer * type requested, and provide a buffer size/buffer to write to. If the * buffer is too small, provsiz will be larger than bufsiz. */ struct ccb_dev_advinfo { struct ccb_hdr ccb_h; uint32_t flags; #define CDAI_FLAG_NONE 0x0 /* No flags set */ #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */ uint32_t buftype; /* IN: Type of data being requested */ /* NB: buftype is interpreted on a per-transport basis */ #define CDAI_TYPE_SCSI_DEVID 1 #define CDAI_TYPE_SERIAL_NUM 2 #define CDAI_TYPE_PHYS_PATH 3 #define CDAI_TYPE_RCAPLONG 4 #define CDAI_TYPE_EXT_INQ 5 off_t bufsiz; /* IN: Size of external buffer */ #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */ off_t provsiz; /* OUT: Size required/used */ uint8_t *buf; /* IN/OUT: Buffer for requested data */ }; /* * CCB for sending async events */ struct ccb_async { struct ccb_hdr ccb_h; uint32_t async_code; off_t async_arg_size; void *async_arg_ptr; }; /* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc * and the argument to xpt_ccb_free. */ union ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_getdev cgd; struct ccb_getdevlist cgdl; struct ccb_pathinq cpi; struct ccb_relsim crs; struct ccb_setasync csa; struct ccb_setdev csd; struct ccb_pathstats cpis; struct ccb_getdevstats cgds; struct ccb_dev_match cdm; struct ccb_trans_settings cts; struct ccb_calc_geometry ccg; struct ccb_sim_knob knob; struct ccb_abort cab; struct ccb_resetbus crb; struct ccb_resetdev crd; struct ccb_termio tio; struct ccb_accept_tio atio; struct ccb_scsiio ctio; struct ccb_en_lun cel; struct ccb_immed_notify cin; struct ccb_notify_ack cna; struct ccb_immediate_notify cin1; struct ccb_notify_acknowledge cna2; struct ccb_eng_inq cei; struct ccb_eng_exec cee; struct ccb_smpio smpio; struct ccb_rescan crcn; struct ccb_debug cdbg; struct ccb_ataio ataio; struct ccb_dev_advinfo cdai; struct ccb_async casync; }; +#define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp) \ + bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h), \ + sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h)) + __BEGIN_DECLS static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout); static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout); static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; } static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_CONT_TARGET_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->scsi_status = scsi_status; csio->tag_action = tag_action; csio->tag_id = tag_id; csio->init_id = init_id; } static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action __unused, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { ataio->ccb_h.func_code = XPT_ATA_IO; ataio->ccb_h.flags = flags; ataio->ccb_h.retry_count = retries; ataio->ccb_h.cbfcnp = cbfcnp; ataio->ccb_h.timeout = timeout; ataio->data_ptr = data_ptr; ataio->dxfer_len = dxfer_len; ataio->ata_flags = 0; } static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout) { #ifdef _KERNEL KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH, ("direction != CAM_DIR_BOTH")); KASSERT((smp_request != NULL) && (smp_response != NULL), ("need valid request and response buffers")); KASSERT((smp_request_len != 0) && (smp_response_len != 0), ("need non-zero request and response lengths")); #endif /*_KERNEL*/ smpio->ccb_h.func_code = XPT_SMP_IO; smpio->ccb_h.flags = flags; smpio->ccb_h.retry_count = retries; smpio->ccb_h.cbfcnp = cbfcnp; smpio->ccb_h.timeout = timeout; smpio->smp_request = smp_request; smpio->smp_request_len = smp_request_len; smpio->smp_response = smp_response; smpio->smp_response_len = smp_response_len; } static __inline void cam_set_ccbstatus(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static __inline cam_status cam_ccb_status(union ccb *ccb) { return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK)); } void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); __END_DECLS #endif /* _CAM_CAM_CCB_H */ Index: head/usr.sbin/camdd/camdd.c =================================================================== --- head/usr.sbin/camdd/camdd.c (revision 300546) +++ head/usr.sbin/camdd/camdd.c (revision 300547) @@ -1,3426 +1,3423 @@ /*- * Copyright (c) 1997-2007 Kenneth D. Merry * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * Authors: Ken Merry (Spectra Logic Corporation) */ /* * This is eventually intended to be: * - A basic data transfer/copy utility * - A simple benchmark utility * - An example of how to use the asynchronous pass(4) driver interface. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { CAMDD_CMD_NONE = 0x00000000, CAMDD_CMD_HELP = 0x00000001, CAMDD_CMD_WRITE = 0x00000002, CAMDD_CMD_READ = 0x00000003 } camdd_cmdmask; typedef enum { CAMDD_ARG_NONE = 0x00000000, CAMDD_ARG_VERBOSE = 0x00000001, CAMDD_ARG_DEVICE = 0x00000002, CAMDD_ARG_BUS = 0x00000004, CAMDD_ARG_TARGET = 0x00000008, CAMDD_ARG_LUN = 0x00000010, CAMDD_ARG_UNIT = 0x00000020, CAMDD_ARG_TIMEOUT = 0x00000040, CAMDD_ARG_ERR_RECOVER = 0x00000080, CAMDD_ARG_RETRIES = 0x00000100 } camdd_argmask; typedef enum { CAMDD_DEV_NONE = 0x00, CAMDD_DEV_PASS = 0x01, CAMDD_DEV_FILE = 0x02 } camdd_dev_type; struct camdd_io_opts { camdd_dev_type dev_type; char *dev_name; uint64_t blocksize; uint64_t queue_depth; uint64_t offset; int min_cmd_size; int write_dev; uint64_t debug; }; typedef enum { CAMDD_BUF_NONE, CAMDD_BUF_DATA, CAMDD_BUF_INDIRECT } camdd_buf_type; struct camdd_buf_indirect { /* * Pointer to the source buffer. */ struct camdd_buf *src_buf; /* * Offset into the source buffer, in bytes. */ uint64_t offset; /* * Pointer to the starting point in the source buffer. */ uint8_t *start_ptr; /* * Length of this chunk in bytes. */ size_t len; }; struct camdd_buf_data { /* * Buffer allocated when we allocate this camdd_buf. This should * be the size of the blocksize for this device. */ uint8_t *buf; /* * The amount of backing store allocated in buf. Generally this * will be the blocksize of the device. */ uint32_t alloc_len; /* * The amount of data that was put into the buffer (on reads) or * the amount of data we have put onto the src_list so far (on * writes). */ uint32_t fill_len; /* * The amount of data that was not transferred. */ uint32_t resid; /* * Starting byte offset on the reader. */ uint64_t src_start_offset; /* * CCB used for pass(4) device targets. */ union ccb ccb; /* * Number of scatter/gather segments. */ int sg_count; /* * Set if we had to tack on an extra buffer to round the transfer * up to a sector size. */ int extra_buf; /* * Scatter/gather list used generally when we're the writer for a * pass(4) device. */ bus_dma_segment_t *segs; /* * Scatter/gather list used generally when we're the writer for a * file or block device; */ struct iovec *iovec; }; union camdd_buf_types { struct camdd_buf_indirect indirect; struct camdd_buf_data data; }; typedef enum { CAMDD_STATUS_NONE, CAMDD_STATUS_OK, CAMDD_STATUS_SHORT_IO, CAMDD_STATUS_EOF, CAMDD_STATUS_ERROR } camdd_buf_status; struct camdd_buf { camdd_buf_type buf_type; union camdd_buf_types buf_type_spec; camdd_buf_status status; uint64_t lba; size_t len; /* * A reference count of how many indirect buffers point to this * buffer. */ int refcount; /* * A link back to our parent device. */ struct camdd_dev *dev; STAILQ_ENTRY(camdd_buf) links; STAILQ_ENTRY(camdd_buf) work_links; /* * A count of the buffers on the src_list. */ int src_count; /* * List of buffers from our partner thread that are the components * of this buffer for the I/O. Uses src_links. */ STAILQ_HEAD(,camdd_buf) src_list; STAILQ_ENTRY(camdd_buf) src_links; }; #define NUM_DEV_TYPES 2 struct camdd_dev_pass { int scsi_dev_type; struct cam_device *dev; uint64_t max_sector; uint32_t block_len; uint32_t cpi_maxio; }; typedef enum { CAMDD_FILE_NONE, CAMDD_FILE_REG, CAMDD_FILE_STD, CAMDD_FILE_PIPE, CAMDD_FILE_DISK, CAMDD_FILE_TAPE, CAMDD_FILE_TTY, CAMDD_FILE_MEM } camdd_file_type; typedef enum { CAMDD_FF_NONE = 0x00, CAMDD_FF_CAN_SEEK = 0x01 } camdd_file_flags; struct camdd_dev_file { int fd; struct stat sb; char filename[MAXPATHLEN + 1]; camdd_file_type file_type; camdd_file_flags file_flags; uint8_t *tmp_buf; }; struct camdd_dev_block { int fd; uint64_t size_bytes; uint32_t block_len; }; union camdd_dev_spec { struct camdd_dev_pass pass; struct camdd_dev_file file; struct camdd_dev_block block; }; typedef enum { CAMDD_DEV_FLAG_NONE = 0x00, CAMDD_DEV_FLAG_EOF = 0x01, CAMDD_DEV_FLAG_PEER_EOF = 0x02, CAMDD_DEV_FLAG_ACTIVE = 0x04, CAMDD_DEV_FLAG_EOF_SENT = 0x08, CAMDD_DEV_FLAG_EOF_QUEUED = 0x10 } camdd_dev_flags; struct camdd_dev { camdd_dev_type dev_type; union camdd_dev_spec dev_spec; camdd_dev_flags flags; char device_name[MAXPATHLEN+1]; uint32_t blocksize; uint32_t sector_size; uint64_t max_sector; uint64_t sector_io_limit; int min_cmd_size; int write_dev; int retry_count; int io_timeout; int debug; uint64_t start_offset_bytes; uint64_t next_io_pos_bytes; uint64_t next_peer_pos_bytes; uint64_t next_completion_pos_bytes; uint64_t peer_bytes_queued; uint64_t bytes_transferred; uint32_t target_queue_depth; uint32_t cur_active_io; uint8_t *extra_buf; uint32_t extra_buf_len; struct camdd_dev *peer_dev; pthread_mutex_t mutex; pthread_cond_t cond; int kq; int (*run)(struct camdd_dev *dev); int (*fetch)(struct camdd_dev *dev); /* * Buffers that are available for I/O. Uses links. */ STAILQ_HEAD(,camdd_buf) free_queue; /* * Free indirect buffers. These are used for breaking a large * buffer into multiple pieces. */ STAILQ_HEAD(,camdd_buf) free_indirect_queue; /* * Buffers that have been queued to the kernel. Uses links. */ STAILQ_HEAD(,camdd_buf) active_queue; /* * Will generally contain one of our buffers that is waiting for enough * I/O from our partner thread to be able to execute. This will * generally happen when our per-I/O-size is larger than the * partner thread's per-I/O-size. Uses links. */ STAILQ_HEAD(,camdd_buf) pending_queue; /* * Number of buffers on the pending queue */ int num_pending_queue; /* * Buffers that are filled and ready to execute. This is used when * our partner (reader) thread sends us blocks that are larger than * our blocksize, and so we have to split them into multiple pieces. */ STAILQ_HEAD(,camdd_buf) run_queue; /* * Number of buffers on the run queue. */ int num_run_queue; STAILQ_HEAD(,camdd_buf) reorder_queue; int num_reorder_queue; /* * Buffers that have been queued to us by our partner thread * (generally the reader thread) to be written out. Uses * work_links. */ STAILQ_HEAD(,camdd_buf) work_queue; /* * Buffers that have been completed by our partner thread. Uses * work_links. */ STAILQ_HEAD(,camdd_buf) peer_done_queue; /* * Number of buffers on the peer done queue. */ uint32_t num_peer_done_queue; /* * A list of buffers that we have queued to our peer thread. Uses * links. */ STAILQ_HEAD(,camdd_buf) peer_work_queue; /* * Number of buffers on the peer work queue. */ uint32_t num_peer_work_queue; }; static sem_t camdd_sem; static int need_exit = 0; static int error_exit = 0; static int need_status = 0; #ifndef min #define min(a, b) (a < b) ? a : b #endif /* * XXX KDM private copy of timespecsub(). This is normally defined in * sys/time.h, but is only enabled in the kernel. If that definition is * enabled in userland, it breaks the build of libnetbsd. */ #ifndef timespecsub #define timespecsub(vvp, uvp) \ do { \ (vvp)->tv_sec -= (uvp)->tv_sec; \ (vvp)->tv_nsec -= (uvp)->tv_nsec; \ if ((vvp)->tv_nsec < 0) { \ (vvp)->tv_sec--; \ (vvp)->tv_nsec += 1000000000; \ } \ } while (0) #endif /* Generically useful offsets into the peripheral private area */ #define ppriv_ptr0 periph_priv.entries[0].ptr #define ppriv_ptr1 periph_priv.entries[1].ptr #define ppriv_field0 periph_priv.entries[0].field #define ppriv_field1 periph_priv.entries[1].field #define ccb_buf ppriv_ptr0 #define CAMDD_FILE_DEFAULT_BLOCK 524288 #define CAMDD_FILE_DEFAULT_DEPTH 1 #define CAMDD_PASS_MAX_BLOCK 1048576 #define CAMDD_PASS_DEFAULT_DEPTH 6 #define CAMDD_PASS_RW_TIMEOUT 60 * 1000 static int parse_btl(char *tstr, int *bus, int *target, int *lun, camdd_argmask *arglst); void camdd_free_dev(struct camdd_dev *dev); struct camdd_dev *camdd_alloc_dev(camdd_dev_type dev_type, struct kevent *new_ke, int num_ke, int retry_count, int timeout); static struct camdd_buf *camdd_alloc_buf(struct camdd_dev *dev, camdd_buf_type buf_type); void camdd_release_buf(struct camdd_buf *buf); struct camdd_buf *camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type); int camdd_buf_sg_create(struct camdd_buf *buf, int iovec, uint32_t sector_size, uint32_t *num_sectors_used, int *double_buf_needed); uint32_t camdd_buf_get_len(struct camdd_buf *buf); void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf); int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran); struct camdd_dev *camdd_probe_file(int fd, struct camdd_io_opts *io_opts, int retry_count, int timeout); struct camdd_dev *camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts, camdd_argmask arglist, int probe_retry_count, int probe_timeout, int io_retry_count, int io_timeout); void *camdd_file_worker(void *arg); camdd_buf_status camdd_ccb_status(union ccb *ccb); int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf); int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf); void camdd_peer_done(struct camdd_buf *buf); void camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf, int *error_count); int camdd_pass_fetch(struct camdd_dev *dev); int camdd_file_run(struct camdd_dev *dev); int camdd_pass_run(struct camdd_dev *dev); int camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len); int camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf); void camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth, uint32_t *peer_depth, uint32_t *our_bytes, uint32_t *peer_bytes); void *camdd_worker(void *arg); void camdd_sig_handler(int sig); void camdd_print_status(struct camdd_dev *camdd_dev, struct camdd_dev *other_dev, struct timespec *start_time); int camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, uint64_t max_io, int retry_count, int timeout); int camdd_parse_io_opts(char *args, int is_write, struct camdd_io_opts *io_opts); void usage(void); /* * Parse out a bus, or a bus, target and lun in the following * format: * bus * bus:target * bus:target:lun * * Returns the number of parsed components, or 0. */ static int parse_btl(char *tstr, int *bus, int *target, int *lun, camdd_argmask *arglst) { char *tmpstr; int convs = 0; while (isspace(*tstr) && (*tstr != '\0')) tstr++; tmpstr = (char *)strtok(tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *bus = strtol(tmpstr, NULL, 0); *arglst |= CAMDD_ARG_BUS; convs++; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *target = strtol(tmpstr, NULL, 0); *arglst |= CAMDD_ARG_TARGET; convs++; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *lun = strtol(tmpstr, NULL, 0); *arglst |= CAMDD_ARG_LUN; convs++; } } } return convs; } /* * XXX KDM clean up and free all of the buffers on the queue! */ void camdd_free_dev(struct camdd_dev *dev) { if (dev == NULL) return; switch (dev->dev_type) { case CAMDD_DEV_FILE: { struct camdd_dev_file *file_dev = &dev->dev_spec.file; if (file_dev->fd != -1) close(file_dev->fd); free(file_dev->tmp_buf); break; } case CAMDD_DEV_PASS: { struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; if (pass_dev->dev != NULL) cam_close_device(pass_dev->dev); break; } default: break; } free(dev); } struct camdd_dev * camdd_alloc_dev(camdd_dev_type dev_type, struct kevent *new_ke, int num_ke, int retry_count, int timeout) { struct camdd_dev *dev = NULL; struct kevent *ke; size_t ke_size; int retval = 0; dev = malloc(sizeof(*dev)); if (dev == NULL) { warn("%s: unable to malloc %zu bytes", __func__, sizeof(*dev)); goto bailout; } bzero(dev, sizeof(*dev)); dev->dev_type = dev_type; dev->io_timeout = timeout; dev->retry_count = retry_count; STAILQ_INIT(&dev->free_queue); STAILQ_INIT(&dev->free_indirect_queue); STAILQ_INIT(&dev->active_queue); STAILQ_INIT(&dev->pending_queue); STAILQ_INIT(&dev->run_queue); STAILQ_INIT(&dev->reorder_queue); STAILQ_INIT(&dev->work_queue); STAILQ_INIT(&dev->peer_done_queue); STAILQ_INIT(&dev->peer_work_queue); retval = pthread_mutex_init(&dev->mutex, NULL); if (retval != 0) { warnc(retval, "%s: failed to initialize mutex", __func__); goto bailout; } retval = pthread_cond_init(&dev->cond, NULL); if (retval != 0) { warnc(retval, "%s: failed to initialize condition variable", __func__); goto bailout; } dev->kq = kqueue(); if (dev->kq == -1) { warn("%s: Unable to create kqueue", __func__); goto bailout; } ke_size = sizeof(struct kevent) * (num_ke + 4); ke = malloc(ke_size); if (ke == NULL) { warn("%s: unable to malloc %zu bytes", __func__, ke_size); goto bailout; } bzero(ke, ke_size); if (num_ke > 0) bcopy(new_ke, ke, num_ke * sizeof(struct kevent)); EV_SET(&ke[num_ke++], (uintptr_t)&dev->work_queue, EVFILT_USER, EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0); EV_SET(&ke[num_ke++], (uintptr_t)&dev->peer_done_queue, EVFILT_USER, EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0); EV_SET(&ke[num_ke++], SIGINFO, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0); EV_SET(&ke[num_ke++], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0); retval = kevent(dev->kq, ke, num_ke, NULL, 0, NULL); if (retval == -1) { warn("%s: Unable to register kevents", __func__); goto bailout; } return (dev); bailout: free(dev); return (NULL); } static struct camdd_buf * camdd_alloc_buf(struct camdd_dev *dev, camdd_buf_type buf_type) { struct camdd_buf *buf = NULL; uint8_t *data_ptr = NULL; /* * We only need to allocate data space for data buffers. */ switch (buf_type) { case CAMDD_BUF_DATA: data_ptr = malloc(dev->blocksize); if (data_ptr == NULL) { warn("unable to allocate %u bytes", dev->blocksize); goto bailout_error; } break; default: break; } buf = malloc(sizeof(*buf)); if (buf == NULL) { warn("unable to allocate %zu bytes", sizeof(*buf)); goto bailout_error; } bzero(buf, sizeof(*buf)); buf->buf_type = buf_type; buf->dev = dev; switch (buf_type) { case CAMDD_BUF_DATA: { struct camdd_buf_data *data; data = &buf->buf_type_spec.data; data->alloc_len = dev->blocksize; data->buf = data_ptr; break; } case CAMDD_BUF_INDIRECT: break; default: break; } STAILQ_INIT(&buf->src_list); return (buf); bailout_error: if (data_ptr != NULL) free(data_ptr); if (buf != NULL) free(buf); return (NULL); } void camdd_release_buf(struct camdd_buf *buf) { struct camdd_dev *dev; dev = buf->dev; switch (buf->buf_type) { case CAMDD_BUF_DATA: { struct camdd_buf_data *data; data = &buf->buf_type_spec.data; if (data->segs != NULL) { if (data->extra_buf != 0) { void *extra_buf; extra_buf = (void *) data->segs[data->sg_count - 1].ds_addr; free(extra_buf); data->extra_buf = 0; } free(data->segs); data->segs = NULL; data->sg_count = 0; } else if (data->iovec != NULL) { if (data->extra_buf != 0) { free(data->iovec[data->sg_count - 1].iov_base); data->extra_buf = 0; } free(data->iovec); data->iovec = NULL; data->sg_count = 0; } STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); break; } case CAMDD_BUF_INDIRECT: STAILQ_INSERT_TAIL(&dev->free_indirect_queue, buf, links); break; default: err(1, "%s: Invalid buffer type %d for released buffer", __func__, buf->buf_type); break; } } struct camdd_buf * camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type) { struct camdd_buf *buf = NULL; switch (buf_type) { case CAMDD_BUF_DATA: buf = STAILQ_FIRST(&dev->free_queue); if (buf != NULL) { struct camdd_buf_data *data; uint8_t *data_ptr; uint32_t alloc_len; STAILQ_REMOVE_HEAD(&dev->free_queue, links); data = &buf->buf_type_spec.data; data_ptr = data->buf; alloc_len = data->alloc_len; bzero(buf, sizeof(*buf)); data->buf = data_ptr; data->alloc_len = alloc_len; } break; case CAMDD_BUF_INDIRECT: buf = STAILQ_FIRST(&dev->free_indirect_queue); if (buf != NULL) { STAILQ_REMOVE_HEAD(&dev->free_indirect_queue, links); bzero(buf, sizeof(*buf)); } break; default: warnx("Unknown buffer type %d requested", buf_type); break; } if (buf == NULL) return (camdd_alloc_buf(dev, buf_type)); else { STAILQ_INIT(&buf->src_list); buf->dev = dev; buf->buf_type = buf_type; return (buf); } } int camdd_buf_sg_create(struct camdd_buf *buf, int iovec, uint32_t sector_size, uint32_t *num_sectors_used, int *double_buf_needed) { struct camdd_buf *tmp_buf; struct camdd_buf_data *data; uint8_t *extra_buf = NULL; size_t extra_buf_len = 0; int i, retval = 0; data = &buf->buf_type_spec.data; data->sg_count = buf->src_count; /* * Compose a scatter/gather list from all of the buffers in the list. * If the length of the buffer isn't a multiple of the sector size, * we'll have to add an extra buffer. This should only happen * at the end of a transfer. */ if ((data->fill_len % sector_size) != 0) { extra_buf_len = sector_size - (data->fill_len % sector_size); extra_buf = calloc(extra_buf_len, 1); if (extra_buf == NULL) { warn("%s: unable to allocate %zu bytes for extra " "buffer space", __func__, extra_buf_len); retval = 1; goto bailout; } data->extra_buf = 1; data->sg_count++; } if (iovec == 0) { data->segs = calloc(data->sg_count, sizeof(bus_dma_segment_t)); if (data->segs == NULL) { warn("%s: unable to allocate %zu bytes for S/G list", __func__, sizeof(bus_dma_segment_t) * data->sg_count); retval = 1; goto bailout; } } else { data->iovec = calloc(data->sg_count, sizeof(struct iovec)); if (data->iovec == NULL) { warn("%s: unable to allocate %zu bytes for S/G list", __func__, sizeof(struct iovec) * data->sg_count); retval = 1; goto bailout; } } for (i = 0, tmp_buf = STAILQ_FIRST(&buf->src_list); i < buf->src_count && tmp_buf != NULL; i++, tmp_buf = STAILQ_NEXT(tmp_buf, src_links)) { if (tmp_buf->buf_type == CAMDD_BUF_DATA) { struct camdd_buf_data *tmp_data; tmp_data = &tmp_buf->buf_type_spec.data; if (iovec == 0) { data->segs[i].ds_addr = (bus_addr_t) tmp_data->buf; data->segs[i].ds_len = tmp_data->fill_len - tmp_data->resid; } else { data->iovec[i].iov_base = tmp_data->buf; data->iovec[i].iov_len = tmp_data->fill_len - tmp_data->resid; } if (((tmp_data->fill_len - tmp_data->resid) % sector_size) != 0) *double_buf_needed = 1; } else { struct camdd_buf_indirect *tmp_ind; tmp_ind = &tmp_buf->buf_type_spec.indirect; if (iovec == 0) { data->segs[i].ds_addr = (bus_addr_t)tmp_ind->start_ptr; data->segs[i].ds_len = tmp_ind->len; } else { data->iovec[i].iov_base = tmp_ind->start_ptr; data->iovec[i].iov_len = tmp_ind->len; } if ((tmp_ind->len % sector_size) != 0) *double_buf_needed = 1; } } if (extra_buf != NULL) { if (iovec == 0) { data->segs[i].ds_addr = (bus_addr_t)extra_buf; data->segs[i].ds_len = extra_buf_len; } else { data->iovec[i].iov_base = extra_buf; data->iovec[i].iov_len = extra_buf_len; } i++; } if ((tmp_buf != NULL) || (i != data->sg_count)) { warnx("buffer source count does not match " "number of buffers in list!"); retval = 1; goto bailout; } bailout: if (retval == 0) { *num_sectors_used = (data->fill_len + extra_buf_len) / sector_size; } return (retval); } uint32_t camdd_buf_get_len(struct camdd_buf *buf) { uint32_t len = 0; if (buf->buf_type != CAMDD_BUF_DATA) { struct camdd_buf_indirect *indirect; indirect = &buf->buf_type_spec.indirect; len = indirect->len; } else { struct camdd_buf_data *data; data = &buf->buf_type_spec.data; len = data->fill_len; } return (len); } void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf) { struct camdd_buf_data *data; assert(buf->buf_type == CAMDD_BUF_DATA); data = &buf->buf_type_spec.data; STAILQ_INSERT_TAIL(&buf->src_list, child_buf, src_links); buf->src_count++; data->fill_len += camdd_buf_get_len(child_buf); } typedef enum { CAMDD_TS_MAX_BLK, CAMDD_TS_MIN_BLK, CAMDD_TS_BLK_GRAN, CAMDD_TS_EFF_IOSIZE } camdd_status_item_index; static struct camdd_status_items { const char *name; struct mt_status_entry *entry; } req_status_items[] = { { "max_blk", NULL }, { "min_blk", NULL }, { "blk_gran", NULL }, { "max_effective_iosize", NULL } }; int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran) { struct mt_status_data status_data; char *xml_str = NULL; unsigned int i; int retval = 0; retval = mt_get_xml_str(fd, MTIOCEXTGET, &xml_str); if (retval != 0) err(1, "Couldn't get XML string from %s", filename); retval = mt_get_status(xml_str, &status_data); if (retval != XML_STATUS_OK) { warn("couldn't get status for %s", filename); retval = 1; goto bailout; } else retval = 0; if (status_data.error != 0) { warnx("%s", status_data.error_str); retval = 1; goto bailout; } for (i = 0; i < sizeof(req_status_items) / sizeof(req_status_items[0]); i++) { char *name; name = __DECONST(char *, req_status_items[i].name); req_status_items[i].entry = mt_status_entry_find(&status_data, name); if (req_status_items[i].entry == NULL) { errx(1, "Cannot find status entry %s", req_status_items[i].name); } } *max_iosize = req_status_items[CAMDD_TS_EFF_IOSIZE].entry->value_unsigned; *max_blk= req_status_items[CAMDD_TS_MAX_BLK].entry->value_unsigned; *min_blk= req_status_items[CAMDD_TS_MIN_BLK].entry->value_unsigned; *blk_gran = req_status_items[CAMDD_TS_BLK_GRAN].entry->value_unsigned; bailout: free(xml_str); mt_status_free(&status_data); return (retval); } struct camdd_dev * camdd_probe_file(int fd, struct camdd_io_opts *io_opts, int retry_count, int timeout) { struct camdd_dev *dev = NULL; struct camdd_dev_file *file_dev; uint64_t blocksize = io_opts->blocksize; dev = camdd_alloc_dev(CAMDD_DEV_FILE, NULL, 0, retry_count, timeout); if (dev == NULL) goto bailout; file_dev = &dev->dev_spec.file; file_dev->fd = fd; strlcpy(file_dev->filename, io_opts->dev_name, sizeof(file_dev->filename)); strlcpy(dev->device_name, io_opts->dev_name, sizeof(dev->device_name)); if (blocksize == 0) dev->blocksize = CAMDD_FILE_DEFAULT_BLOCK; else dev->blocksize = blocksize; if ((io_opts->queue_depth != 0) && (io_opts->queue_depth != 1)) { warnx("Queue depth %ju for %s ignored, only 1 outstanding " "command supported", (uintmax_t)io_opts->queue_depth, io_opts->dev_name); } dev->target_queue_depth = CAMDD_FILE_DEFAULT_DEPTH; dev->run = camdd_file_run; dev->fetch = NULL; /* * We can effectively access files on byte boundaries. We'll reset * this for devices like disks that can be accessed on sector * boundaries. */ dev->sector_size = 1; if ((fd != STDIN_FILENO) && (fd != STDOUT_FILENO)) { int retval; retval = fstat(fd, &file_dev->sb); if (retval != 0) { warn("Cannot stat %s", dev->device_name); goto bailout_error; } if (S_ISREG(file_dev->sb.st_mode)) { file_dev->file_type = CAMDD_FILE_REG; } else if (S_ISCHR(file_dev->sb.st_mode)) { int type; if (ioctl(fd, FIODTYPE, &type) == -1) err(1, "FIODTYPE ioctl failed on %s", dev->device_name); else { if (type & D_TAPE) file_dev->file_type = CAMDD_FILE_TAPE; else if (type & D_DISK) file_dev->file_type = CAMDD_FILE_DISK; else if (type & D_MEM) file_dev->file_type = CAMDD_FILE_MEM; else if (type & D_TTY) file_dev->file_type = CAMDD_FILE_TTY; } } else if (S_ISDIR(file_dev->sb.st_mode)) { errx(1, "cannot operate on directory %s", dev->device_name); } else if (S_ISFIFO(file_dev->sb.st_mode)) { file_dev->file_type = CAMDD_FILE_PIPE; } else errx(1, "Cannot determine file type for %s", dev->device_name); switch (file_dev->file_type) { case CAMDD_FILE_REG: if (file_dev->sb.st_size != 0) dev->max_sector = file_dev->sb.st_size - 1; else dev->max_sector = 0; file_dev->file_flags |= CAMDD_FF_CAN_SEEK; break; case CAMDD_FILE_TAPE: { uint64_t max_iosize, max_blk, min_blk, blk_gran; /* * Check block limits and maximum effective iosize. * Make sure the blocksize is within the block * limits (and a multiple of the minimum blocksize) * and that the blocksize is <= maximum effective * iosize. */ retval = camdd_probe_tape(fd, dev->device_name, &max_iosize, &max_blk, &min_blk, &blk_gran); if (retval != 0) errx(1, "Unable to probe tape %s", dev->device_name); /* * The blocksize needs to be <= the maximum * effective I/O size of the tape device. Note * that this also takes into account the maximum * blocksize reported by READ BLOCK LIMITS. */ if (dev->blocksize > max_iosize) { warnx("Blocksize %u too big for %s, limiting " "to %ju", dev->blocksize, dev->device_name, max_iosize); dev->blocksize = max_iosize; } /* * The blocksize needs to be at least min_blk; */ if (dev->blocksize < min_blk) { warnx("Blocksize %u too small for %s, " "increasing to %ju", dev->blocksize, dev->device_name, min_blk); dev->blocksize = min_blk; } /* * And the blocksize needs to be a multiple of * the block granularity. */ if ((blk_gran != 0) && (dev->blocksize % (1 << blk_gran))) { warnx("Blocksize %u for %s not a multiple of " "%d, adjusting to %d", dev->blocksize, dev->device_name, (1 << blk_gran), dev->blocksize & ~((1 << blk_gran) - 1)); dev->blocksize &= ~((1 << blk_gran) - 1); } if (dev->blocksize == 0) { errx(1, "Unable to derive valid blocksize for " "%s", dev->device_name); } /* * For tape drives, set the sector size to the * blocksize so that we make sure not to write * less than the blocksize out to the drive. */ dev->sector_size = dev->blocksize; break; } case CAMDD_FILE_DISK: { off_t media_size; unsigned int sector_size; file_dev->file_flags |= CAMDD_FF_CAN_SEEK; if (ioctl(fd, DIOCGSECTORSIZE, §or_size) == -1) { err(1, "DIOCGSECTORSIZE ioctl failed on %s", dev->device_name); } if (sector_size == 0) { errx(1, "DIOCGSECTORSIZE ioctl returned " "invalid sector size %u for %s", sector_size, dev->device_name); } if (ioctl(fd, DIOCGMEDIASIZE, &media_size) == -1) { err(1, "DIOCGMEDIASIZE ioctl failed on %s", dev->device_name); } if (media_size == 0) { errx(1, "DIOCGMEDIASIZE ioctl returned " "invalid media size %ju for %s", (uintmax_t)media_size, dev->device_name); } if (dev->blocksize % sector_size) { errx(1, "%s blocksize %u not a multiple of " "sector size %u", dev->device_name, dev->blocksize, sector_size); } dev->sector_size = sector_size; dev->max_sector = (media_size / sector_size) - 1; break; } case CAMDD_FILE_MEM: file_dev->file_flags |= CAMDD_FF_CAN_SEEK; break; default: break; } } if ((io_opts->offset != 0) && ((file_dev->file_flags & CAMDD_FF_CAN_SEEK) == 0)) { warnx("Offset %ju specified for %s, but we cannot seek on %s", io_opts->offset, io_opts->dev_name, io_opts->dev_name); goto bailout_error; } #if 0 else if ((io_opts->offset != 0) && ((io_opts->offset % dev->sector_size) != 0)) { warnx("Offset %ju for %s is not a multiple of the " "sector size %u", io_opts->offset, io_opts->dev_name, dev->sector_size); goto bailout_error; } else { dev->start_offset_bytes = io_opts->offset; } #endif bailout: return (dev); bailout_error: camdd_free_dev(dev); return (NULL); } /* * Need to implement this. Do a basic probe: * - Check the inquiry data, make sure we're talking to a device that we * can reasonably expect to talk to -- direct, RBC, CD, WORM. * - Send a test unit ready, make sure the device is available. * - Get the capacity and block size. */ struct camdd_dev * camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts, camdd_argmask arglist, int probe_retry_count, int probe_timeout, int io_retry_count, int io_timeout) { union ccb *ccb; uint64_t maxsector; uint32_t cpi_maxio, max_iosize, pass_numblocks; uint32_t block_len; struct scsi_read_capacity_data rcap; struct scsi_read_capacity_data_long rcaplong; struct camdd_dev *dev; struct camdd_dev_pass *pass_dev; struct kevent ke; int scsi_dev_type; dev = NULL; scsi_dev_type = SID_TYPE(&cam_dev->inq_data); maxsector = 0; block_len = 0; /* * For devices that support READ CAPACITY, we'll attempt to get the * capacity. Otherwise, we really don't support tape or other * devices via SCSI passthrough, so just return an error in that case. */ switch (scsi_dev_type) { case T_DIRECT: case T_WORM: case T_CDROM: case T_OPTICAL: case T_RBC: break; default: errx(1, "Unsupported SCSI device type %d", scsi_dev_type); break; /*NOTREACHED*/ } ccb = cam_getccb(cam_dev); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); goto bailout; } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); scsi_read_capacity(&ccb->csio, /*retries*/ probe_retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, &rcap, SSD_FULL_SIZE, /*timeout*/ probe_timeout ? probe_timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAMDD_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(cam_dev, ccb) < 0) { warn("error sending READ CAPACITY command"); cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } maxsector = scsi_4btoul(rcap.addr); block_len = scsi_4btoul(rcap.length); /* * A last block of 2^32-1 means that the true capacity is over 2TB, * and we need to issue the long READ CAPACITY to get the real * capacity. Otherwise, we're all set. */ if (maxsector != 0xffffffff) goto rcap_done; scsi_read_capacity_16(&ccb->csio, /*retries*/ probe_retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladdr*/ 0, /*pmi*/ 0, (uint8_t *)&rcaplong, sizeof(rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ probe_timeout ? probe_timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAMDD_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(cam_dev, ccb) < 0) { warn("error sending READ CAPACITY (16) command"); cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } maxsector = scsi_8btou64(rcaplong.addr); block_len = scsi_4btoul(rcaplong.length); rcap_done: if (block_len == 0) { warnx("Sector size for %s%u is 0, cannot continue", cam_dev->device_name, cam_dev->dev_unit_num); goto bailout_error; } - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cpi); ccb->ccb_h.func_code = XPT_PATH_INQ; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 1; if (cam_send_ccb(cam_dev, ccb) < 0) { warn("error sending XPT_PATH_INQ CCB"); cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } EV_SET(&ke, cam_dev->fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0); dev = camdd_alloc_dev(CAMDD_DEV_PASS, &ke, 1, io_retry_count, io_timeout); if (dev == NULL) goto bailout; pass_dev = &dev->dev_spec.pass; pass_dev->scsi_dev_type = scsi_dev_type; pass_dev->dev = cam_dev; pass_dev->max_sector = maxsector; pass_dev->block_len = block_len; pass_dev->cpi_maxio = ccb->cpi.maxio; snprintf(dev->device_name, sizeof(dev->device_name), "%s%u", pass_dev->dev->device_name, pass_dev->dev->dev_unit_num); dev->sector_size = block_len; dev->max_sector = maxsector; /* * Determine the optimal blocksize to use for this device. */ /* * If the controller has not specified a maximum I/O size, * just go with 128K as a somewhat conservative value. */ if (pass_dev->cpi_maxio == 0) cpi_maxio = 131072; else cpi_maxio = pass_dev->cpi_maxio; /* * If the controller has a large maximum I/O size, limit it * to something smaller so that the kernel doesn't have trouble * allocating buffers to copy data in and out for us. * XXX KDM this is until we have unmapped I/O support in the kernel. */ max_iosize = min(cpi_maxio, CAMDD_PASS_MAX_BLOCK); /* * If we weren't able to get a block size for some reason, * default to 512 bytes. */ block_len = pass_dev->block_len; if (block_len == 0) block_len = 512; /* * Figure out how many blocksize chunks will fit in the * maximum I/O size. */ pass_numblocks = max_iosize / block_len; /* * And finally, multiple the number of blocks by the LBA * length to get our maximum block size; */ dev->blocksize = pass_numblocks * block_len; if (io_opts->blocksize != 0) { if ((io_opts->blocksize % dev->sector_size) != 0) { warnx("Blocksize %ju for %s is not a multiple of " "sector size %u", (uintmax_t)io_opts->blocksize, dev->device_name, dev->sector_size); goto bailout_error; } dev->blocksize = io_opts->blocksize; } dev->target_queue_depth = CAMDD_PASS_DEFAULT_DEPTH; if (io_opts->queue_depth != 0) dev->target_queue_depth = io_opts->queue_depth; if (io_opts->offset != 0) { if (io_opts->offset > (dev->max_sector * dev->sector_size)) { warnx("Offset %ju is past the end of device %s", io_opts->offset, dev->device_name); goto bailout_error; } #if 0 else if ((io_opts->offset % dev->sector_size) != 0) { warnx("Offset %ju for %s is not a multiple of the " "sector size %u", io_opts->offset, dev->device_name, dev->sector_size); goto bailout_error; } dev->start_offset_bytes = io_opts->offset; #endif } dev->min_cmd_size = io_opts->min_cmd_size; dev->run = camdd_pass_run; dev->fetch = camdd_pass_fetch; bailout: cam_freeccb(ccb); return (dev); bailout_error: cam_freeccb(ccb); camdd_free_dev(dev); return (NULL); } void * camdd_worker(void *arg) { struct camdd_dev *dev = arg; struct camdd_buf *buf; struct timespec ts, *kq_ts; ts.tv_sec = 0; ts.tv_nsec = 0; pthread_mutex_lock(&dev->mutex); dev->flags |= CAMDD_DEV_FLAG_ACTIVE; for (;;) { struct kevent ke; int retval = 0; /* * XXX KDM check the reorder queue depth? */ if (dev->write_dev == 0) { uint32_t our_depth, peer_depth, peer_bytes, our_bytes; uint32_t target_depth = dev->target_queue_depth; uint32_t peer_target_depth = dev->peer_dev->target_queue_depth; uint32_t peer_blocksize = dev->peer_dev->blocksize; camdd_get_depth(dev, &our_depth, &peer_depth, &our_bytes, &peer_bytes); #if 0 while (((our_depth < target_depth) && (peer_depth < peer_target_depth)) || ((peer_bytes + our_bytes) < (peer_blocksize * 2))) { #endif while (((our_depth + peer_depth) < (target_depth + peer_target_depth)) || ((peer_bytes + our_bytes) < (peer_blocksize * 3))) { retval = camdd_queue(dev, NULL); if (retval == 1) break; else if (retval != 0) { error_exit = 1; goto bailout; } camdd_get_depth(dev, &our_depth, &peer_depth, &our_bytes, &peer_bytes); } } /* * See if we have any I/O that is ready to execute. */ buf = STAILQ_FIRST(&dev->run_queue); if (buf != NULL) { while (dev->target_queue_depth > dev->cur_active_io) { retval = dev->run(dev); if (retval == -1) { dev->flags |= CAMDD_DEV_FLAG_EOF; error_exit = 1; break; } else if (retval != 0) { break; } } } /* * We've reached EOF, or our partner has reached EOF. */ if ((dev->flags & CAMDD_DEV_FLAG_EOF) || (dev->flags & CAMDD_DEV_FLAG_PEER_EOF)) { if (dev->write_dev != 0) { if ((STAILQ_EMPTY(&dev->work_queue)) && (dev->num_run_queue == 0) && (dev->cur_active_io == 0)) { goto bailout; } } else { /* * If we're the reader, and the writer * got EOF, he is already done. If we got * the EOF, then we need to wait until * everything is flushed out for the writer. */ if (dev->flags & CAMDD_DEV_FLAG_PEER_EOF) { goto bailout; } else if ((dev->num_peer_work_queue == 0) && (dev->num_peer_done_queue == 0) && (dev->cur_active_io == 0) && (dev->num_run_queue == 0)) { goto bailout; } } /* * XXX KDM need to do something about the pending * queue and cleanup resources. */ } if ((dev->write_dev == 0) && (dev->cur_active_io == 0) && (dev->peer_bytes_queued < dev->peer_dev->blocksize)) kq_ts = &ts; else kq_ts = NULL; /* * Run kevent to see if there are events to process. */ pthread_mutex_unlock(&dev->mutex); retval = kevent(dev->kq, NULL, 0, &ke, 1, kq_ts); pthread_mutex_lock(&dev->mutex); if (retval == -1) { warn("%s: error returned from kevent",__func__); goto bailout; } else if (retval != 0) { switch (ke.filter) { case EVFILT_READ: if (dev->fetch != NULL) { retval = dev->fetch(dev); if (retval == -1) { error_exit = 1; goto bailout; } } break; case EVFILT_SIGNAL: /* * We register for this so we don't get * an error as a result of a SIGINFO or a * SIGINT. It will actually get handled * by the signal handler. If we get a * SIGINT, bail out without printing an * error message. Any other signals * will result in the error message above. */ if (ke.ident == SIGINT) goto bailout; break; case EVFILT_USER: retval = 0; /* * Check to see if the other thread has * queued any I/O for us to do. (In this * case we're the writer.) */ for (buf = STAILQ_FIRST(&dev->work_queue); buf != NULL; buf = STAILQ_FIRST(&dev->work_queue)) { STAILQ_REMOVE_HEAD(&dev->work_queue, work_links); retval = camdd_queue(dev, buf); /* * We keep going unless we get an * actual error. If we get EOF, we * still want to remove the buffers * from the queue and send the back * to the reader thread. */ if (retval == -1) { error_exit = 1; goto bailout; } else retval = 0; } /* * Next check to see if the other thread has * queued any completed buffers back to us. * (In this case we're the reader.) */ for (buf = STAILQ_FIRST(&dev->peer_done_queue); buf != NULL; buf = STAILQ_FIRST(&dev->peer_done_queue)){ STAILQ_REMOVE_HEAD( &dev->peer_done_queue, work_links); dev->num_peer_done_queue--; camdd_peer_done(buf); } break; default: warnx("%s: unknown kevent filter %d", __func__, ke.filter); break; } } } bailout: dev->flags &= ~CAMDD_DEV_FLAG_ACTIVE; /* XXX KDM cleanup resources here? */ pthread_mutex_unlock(&dev->mutex); need_exit = 1; sem_post(&camdd_sem); return (NULL); } /* * Simplistic translation of CCB status to our local status. */ camdd_buf_status camdd_ccb_status(union ccb *ccb) { camdd_buf_status status = CAMDD_STATUS_NONE; cam_status ccb_status; ccb_status = ccb->ccb_h.status & CAM_STATUS_MASK; switch (ccb_status) { case CAM_REQ_CMP: { if (ccb->csio.resid == 0) { status = CAMDD_STATUS_OK; } else if (ccb->csio.dxfer_len > ccb->csio.resid) { status = CAMDD_STATUS_SHORT_IO; } else { status = CAMDD_STATUS_EOF; } break; } case CAM_SCSI_STATUS_ERROR: { switch (ccb->csio.scsi_status) { case SCSI_STATUS_OK: case SCSI_STATUS_COND_MET: case SCSI_STATUS_INTERMED: case SCSI_STATUS_INTERMED_COND_MET: status = CAMDD_STATUS_OK; break; case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_QUEUE_FULL: case SCSI_STATUS_BUSY: case SCSI_STATUS_RESERV_CONFLICT: default: status = CAMDD_STATUS_ERROR; break; } break; } default: status = CAMDD_STATUS_ERROR; break; } return (status); } /* * Queue a buffer to our peer's work thread for writing. * * Returns 0 for success, -1 for failure, 1 if the other thread exited. */ int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf) { struct kevent ke; STAILQ_HEAD(, camdd_buf) local_queue; struct camdd_buf *buf1, *buf2; struct camdd_buf_data *data = NULL; uint64_t peer_bytes_queued = 0; int active = 1; int retval = 0; STAILQ_INIT(&local_queue); /* * Since we're the reader, we need to queue our I/O to the writer * in sequential order in order to make sure it gets written out * in sequential order. * * Check the next expected I/O starting offset. If this doesn't * match, put it on the reorder queue. */ if ((buf->lba * dev->sector_size) != dev->next_completion_pos_bytes) { /* * If there is nothing on the queue, there is no sorting * needed. */ if (STAILQ_EMPTY(&dev->reorder_queue)) { STAILQ_INSERT_TAIL(&dev->reorder_queue, buf, links); dev->num_reorder_queue++; goto bailout; } /* * Sort in ascending order by starting LBA. There should * be no identical LBAs. */ for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL; buf1 = buf2) { buf2 = STAILQ_NEXT(buf1, links); if (buf->lba < buf1->lba) { /* * If we're less than the first one, then * we insert at the head of the list * because this has to be the first element * on the list. */ STAILQ_INSERT_HEAD(&dev->reorder_queue, buf, links); dev->num_reorder_queue++; break; } else if (buf->lba > buf1->lba) { if (buf2 == NULL) { STAILQ_INSERT_TAIL(&dev->reorder_queue, buf, links); dev->num_reorder_queue++; break; } else if (buf->lba < buf2->lba) { STAILQ_INSERT_AFTER(&dev->reorder_queue, buf1, buf, links); dev->num_reorder_queue++; break; } } else { errx(1, "Found buffers with duplicate LBA %ju!", buf->lba); } } goto bailout; } else { /* * We're the next expected I/O completion, so put ourselves * on the local queue to be sent to the writer. We use * work_links here so that we can queue this to the * peer_work_queue before taking the buffer off of the * local_queue. */ dev->next_completion_pos_bytes += buf->len; STAILQ_INSERT_TAIL(&local_queue, buf, work_links); /* * Go through the reorder queue looking for more sequential * I/O and add it to the local queue. */ for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL; buf1 = STAILQ_FIRST(&dev->reorder_queue)) { /* * As soon as we see an I/O that is out of sequence, * we're done. */ if ((buf1->lba * dev->sector_size) != dev->next_completion_pos_bytes) break; STAILQ_REMOVE_HEAD(&dev->reorder_queue, links); dev->num_reorder_queue--; STAILQ_INSERT_TAIL(&local_queue, buf1, work_links); dev->next_completion_pos_bytes += buf1->len; } } /* * Setup the event to let the other thread know that it has work * pending. */ EV_SET(&ke, (uintptr_t)&dev->peer_dev->work_queue, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); /* * Put this on our shadow queue so that we know what we've queued * to the other thread. */ STAILQ_FOREACH_SAFE(buf1, &local_queue, work_links, buf2) { if (buf1->buf_type != CAMDD_BUF_DATA) { errx(1, "%s: should have a data buffer, not an " "indirect buffer", __func__); } data = &buf1->buf_type_spec.data; /* * We only need to send one EOF to the writer, and don't * need to continue sending EOFs after that. */ if (buf1->status == CAMDD_STATUS_EOF) { if (dev->flags & CAMDD_DEV_FLAG_EOF_SENT) { STAILQ_REMOVE(&local_queue, buf1, camdd_buf, work_links); camdd_release_buf(buf1); retval = 1; continue; } dev->flags |= CAMDD_DEV_FLAG_EOF_SENT; } STAILQ_INSERT_TAIL(&dev->peer_work_queue, buf1, links); peer_bytes_queued += (data->fill_len - data->resid); dev->peer_bytes_queued += (data->fill_len - data->resid); dev->num_peer_work_queue++; } if (STAILQ_FIRST(&local_queue) == NULL) goto bailout; /* * Drop our mutex and pick up the other thread's mutex. We need to * do this to avoid deadlocks. */ pthread_mutex_unlock(&dev->mutex); pthread_mutex_lock(&dev->peer_dev->mutex); if (dev->peer_dev->flags & CAMDD_DEV_FLAG_ACTIVE) { /* * Put the buffers on the other thread's incoming work queue. */ for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL; buf1 = STAILQ_FIRST(&local_queue)) { STAILQ_REMOVE_HEAD(&local_queue, work_links); STAILQ_INSERT_TAIL(&dev->peer_dev->work_queue, buf1, work_links); } /* * Send an event to the other thread's kqueue to let it know * that there is something on the work queue. */ retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL); if (retval == -1) warn("%s: unable to add peer work_queue kevent", __func__); else retval = 0; } else active = 0; pthread_mutex_unlock(&dev->peer_dev->mutex); pthread_mutex_lock(&dev->mutex); /* * If the other side isn't active, run through the queue and * release all of the buffers. */ if (active == 0) { for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL; buf1 = STAILQ_FIRST(&local_queue)) { STAILQ_REMOVE_HEAD(&local_queue, work_links); STAILQ_REMOVE(&dev->peer_work_queue, buf1, camdd_buf, links); dev->num_peer_work_queue--; camdd_release_buf(buf1); } dev->peer_bytes_queued -= peer_bytes_queued; retval = 1; } bailout: return (retval); } /* * Return a buffer to the reader thread when we have completed writing it. */ int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf) { struct kevent ke; int retval = 0; /* * Setup the event to let the other thread know that we have * completed a buffer. */ EV_SET(&ke, (uintptr_t)&dev->peer_dev->peer_done_queue, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); /* * Drop our lock and acquire the other thread's lock before * manipulating */ pthread_mutex_unlock(&dev->mutex); pthread_mutex_lock(&dev->peer_dev->mutex); /* * Put the buffer on the reader thread's peer done queue now that * we have completed it. */ STAILQ_INSERT_TAIL(&dev->peer_dev->peer_done_queue, peer_buf, work_links); dev->peer_dev->num_peer_done_queue++; /* * Send an event to the peer thread to let it know that we've added * something to its peer done queue. */ retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL); if (retval == -1) warn("%s: unable to add peer_done_queue kevent", __func__); else retval = 0; /* * Drop the other thread's lock and reacquire ours. */ pthread_mutex_unlock(&dev->peer_dev->mutex); pthread_mutex_lock(&dev->mutex); return (retval); } /* * Free a buffer that was written out by the writer thread and returned to * the reader thread. */ void camdd_peer_done(struct camdd_buf *buf) { struct camdd_dev *dev; struct camdd_buf_data *data; dev = buf->dev; if (buf->buf_type != CAMDD_BUF_DATA) { errx(1, "%s: should have a data buffer, not an " "indirect buffer", __func__); } data = &buf->buf_type_spec.data; STAILQ_REMOVE(&dev->peer_work_queue, buf, camdd_buf, links); dev->num_peer_work_queue--; dev->peer_bytes_queued -= (data->fill_len - data->resid); if (buf->status == CAMDD_STATUS_EOF) dev->flags |= CAMDD_DEV_FLAG_PEER_EOF; STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); } /* * Assumes caller holds the lock for this device. */ void camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf, int *error_count) { int retval = 0; /* * If we're the reader, we need to send the completed I/O * to the writer. If we're the writer, we need to just * free up resources, or let the reader know if we've * encountered an error. */ if (dev->write_dev == 0) { retval = camdd_queue_peer_buf(dev, buf); if (retval != 0) (*error_count)++; } else { struct camdd_buf *tmp_buf, *next_buf; STAILQ_FOREACH_SAFE(tmp_buf, &buf->src_list, src_links, next_buf) { struct camdd_buf *src_buf; struct camdd_buf_indirect *indirect; STAILQ_REMOVE(&buf->src_list, tmp_buf, camdd_buf, src_links); tmp_buf->status = buf->status; if (tmp_buf->buf_type == CAMDD_BUF_DATA) { camdd_complete_peer_buf(dev, tmp_buf); continue; } indirect = &tmp_buf->buf_type_spec.indirect; src_buf = indirect->src_buf; src_buf->refcount--; /* * XXX KDM we probably need to account for * exactly how many bytes we were able to * write. Allocate the residual to the * first N buffers? Or just track the * number of bytes written? Right now the reader * doesn't do anything with a residual. */ src_buf->status = buf->status; if (src_buf->refcount <= 0) camdd_complete_peer_buf(dev, src_buf); STAILQ_INSERT_TAIL(&dev->free_indirect_queue, tmp_buf, links); } STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); } } /* * Fetch all completed commands from the pass(4) device. * * Returns the number of commands received, or -1 if any of the commands * completed with an error. Returns 0 if no commands are available. */ int camdd_pass_fetch(struct camdd_dev *dev) { struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; union ccb ccb; int retval = 0, num_fetched = 0, error_count = 0; pthread_mutex_unlock(&dev->mutex); /* * XXX KDM we don't distinguish between EFAULT and ENOENT. */ while ((retval = ioctl(pass_dev->dev->fd, CAMIOGET, &ccb)) != -1) { struct camdd_buf *buf; struct camdd_buf_data *data; cam_status ccb_status; union ccb *buf_ccb; buf = ccb.ccb_h.ccb_buf; data = &buf->buf_type_spec.data; buf_ccb = &data->ccb; num_fetched++; /* * Copy the CCB back out so we get status, sense data, etc. */ bcopy(&ccb, buf_ccb, sizeof(ccb)); pthread_mutex_lock(&dev->mutex); /* * We're now done, so take this off the active queue. */ STAILQ_REMOVE(&dev->active_queue, buf, camdd_buf, links); dev->cur_active_io--; ccb_status = ccb.ccb_h.status & CAM_STATUS_MASK; if (ccb_status != CAM_REQ_CMP) { cam_error_print(pass_dev->dev, &ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } data->resid = ccb.csio.resid; dev->bytes_transferred += (ccb.csio.dxfer_len - ccb.csio.resid); if (buf->status == CAMDD_STATUS_NONE) buf->status = camdd_ccb_status(&ccb); if (buf->status == CAMDD_STATUS_ERROR) error_count++; else if (buf->status == CAMDD_STATUS_EOF) { /* * Once we queue this buffer to our partner thread, * he will know that we've hit EOF. */ dev->flags |= CAMDD_DEV_FLAG_EOF; } camdd_complete_buf(dev, buf, &error_count); /* * Unlock in preparation for the ioctl call. */ pthread_mutex_unlock(&dev->mutex); } pthread_mutex_lock(&dev->mutex); if (error_count > 0) return (-1); else return (num_fetched); } /* * Returns -1 for error, 0 for success/continue, and 1 for resource * shortage/stop processing. */ int camdd_file_run(struct camdd_dev *dev) { struct camdd_dev_file *file_dev = &dev->dev_spec.file; struct camdd_buf_data *data; struct camdd_buf *buf; off_t io_offset; int retval = 0, write_dev = dev->write_dev; int error_count = 0, no_resources = 0, double_buf_needed = 0; uint32_t num_sectors = 0, db_len = 0; buf = STAILQ_FIRST(&dev->run_queue); if (buf == NULL) { no_resources = 1; goto bailout; } else if ((dev->write_dev == 0) && (dev->flags & (CAMDD_DEV_FLAG_EOF | CAMDD_DEV_FLAG_EOF_SENT))) { STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); dev->num_run_queue--; buf->status = CAMDD_STATUS_EOF; error_count++; goto bailout; } /* * If we're writing, we need to go through the source buffer list * and create an S/G list. */ if (write_dev != 0) { retval = camdd_buf_sg_create(buf, /*iovec*/ 1, dev->sector_size, &num_sectors, &double_buf_needed); if (retval != 0) { no_resources = 1; goto bailout; } } STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); dev->num_run_queue--; data = &buf->buf_type_spec.data; /* * pread(2) and pwrite(2) offsets are byte offsets. */ io_offset = buf->lba * dev->sector_size; /* * Unlock the mutex while we read or write. */ pthread_mutex_unlock(&dev->mutex); /* * Note that we don't need to double buffer if we're the reader * because in that case, we have allocated a single buffer of * sufficient size to do the read. This copy is necessary on * writes because if one of the components of the S/G list is not * a sector size multiple, the kernel will reject the write. This * is unfortunate but not surprising. So this will make sure that * we're using a single buffer that is a multiple of the sector size. */ if ((double_buf_needed != 0) && (data->sg_count > 1) && (write_dev != 0)) { uint32_t cur_offset; int i; if (file_dev->tmp_buf == NULL) file_dev->tmp_buf = calloc(dev->blocksize, 1); if (file_dev->tmp_buf == NULL) { buf->status = CAMDD_STATUS_ERROR; error_count++; goto bailout; } for (i = 0, cur_offset = 0; i < data->sg_count; i++) { bcopy(data->iovec[i].iov_base, &file_dev->tmp_buf[cur_offset], data->iovec[i].iov_len); cur_offset += data->iovec[i].iov_len; } db_len = cur_offset; } if (file_dev->file_flags & CAMDD_FF_CAN_SEEK) { if (write_dev == 0) { /* * XXX KDM is there any way we would need a S/G * list here? */ retval = pread(file_dev->fd, data->buf, buf->len, io_offset); } else { if (double_buf_needed != 0) { retval = pwrite(file_dev->fd, file_dev->tmp_buf, db_len, io_offset); } else if (data->sg_count == 0) { retval = pwrite(file_dev->fd, data->buf, data->fill_len, io_offset); } else { retval = pwritev(file_dev->fd, data->iovec, data->sg_count, io_offset); } } } else { if (write_dev == 0) { /* * XXX KDM is there any way we would need a S/G * list here? */ retval = read(file_dev->fd, data->buf, buf->len); } else { if (double_buf_needed != 0) { retval = write(file_dev->fd, file_dev->tmp_buf, db_len); } else if (data->sg_count == 0) { retval = write(file_dev->fd, data->buf, data->fill_len); } else { retval = writev(file_dev->fd, data->iovec, data->sg_count); } } } /* We're done, re-acquire the lock */ pthread_mutex_lock(&dev->mutex); if (retval >= (ssize_t)data->fill_len) { /* * If the bytes transferred is more than the request size, * that indicates an overrun, which should only happen at * the end of a transfer if we have to round up to a sector * boundary. */ if (buf->status == CAMDD_STATUS_NONE) buf->status = CAMDD_STATUS_OK; data->resid = 0; dev->bytes_transferred += retval; } else if (retval == -1) { warn("Error %s %s", (write_dev) ? "writing to" : "reading from", file_dev->filename); buf->status = CAMDD_STATUS_ERROR; data->resid = data->fill_len; error_count++; if (dev->debug == 0) goto bailout; if ((double_buf_needed != 0) && (write_dev != 0)) { fprintf(stderr, "%s: fd %d, DB buf %p, len %u lba %ju " "offset %ju\n", __func__, file_dev->fd, file_dev->tmp_buf, db_len, (uintmax_t)buf->lba, (uintmax_t)io_offset); } else if (data->sg_count == 0) { fprintf(stderr, "%s: fd %d, buf %p, len %u, lba %ju " "offset %ju\n", __func__, file_dev->fd, data->buf, data->fill_len, (uintmax_t)buf->lba, (uintmax_t)io_offset); } else { int i; fprintf(stderr, "%s: fd %d, len %u, lba %ju " "offset %ju\n", __func__, file_dev->fd, data->fill_len, (uintmax_t)buf->lba, (uintmax_t)io_offset); for (i = 0; i < data->sg_count; i++) { fprintf(stderr, "index %d ptr %p len %zu\n", i, data->iovec[i].iov_base, data->iovec[i].iov_len); } } } else if (retval == 0) { buf->status = CAMDD_STATUS_EOF; if (dev->debug != 0) printf("%s: got EOF from %s!\n", __func__, file_dev->filename); data->resid = data->fill_len; error_count++; } else if (retval < (ssize_t)data->fill_len) { if (buf->status == CAMDD_STATUS_NONE) buf->status = CAMDD_STATUS_SHORT_IO; data->resid = data->fill_len - retval; dev->bytes_transferred += retval; } bailout: if (buf != NULL) { if (buf->status == CAMDD_STATUS_EOF) { struct camdd_buf *buf2; dev->flags |= CAMDD_DEV_FLAG_EOF; STAILQ_FOREACH(buf2, &dev->run_queue, links) buf2->status = CAMDD_STATUS_EOF; } camdd_complete_buf(dev, buf, &error_count); } if (error_count != 0) return (-1); else if (no_resources != 0) return (1); else return (0); } /* * Execute one command from the run queue. Returns 0 for success, 1 for * stop processing, and -1 for error. */ int camdd_pass_run(struct camdd_dev *dev) { struct camdd_buf *buf = NULL; struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; struct camdd_buf_data *data; uint32_t num_blocks, sectors_used = 0; union ccb *ccb; int retval = 0, is_write = dev->write_dev; int double_buf_needed = 0; buf = STAILQ_FIRST(&dev->run_queue); if (buf == NULL) { retval = 1; goto bailout; } /* * If we're writing, we need to go through the source buffer list * and create an S/G list. */ if (is_write != 0) { retval = camdd_buf_sg_create(buf, /*iovec*/ 0,dev->sector_size, §ors_used, &double_buf_needed); if (retval != 0) { retval = -1; goto bailout; } } STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); dev->num_run_queue--; data = &buf->buf_type_spec.data; ccb = &data->ccb; - bzero(&(&ccb->ccb_h)[1], - sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); /* * In almost every case the number of blocks should be the device * block size. The exception may be at the end of an I/O stream * for a partial block or at the end of a device. */ if (is_write != 0) num_blocks = sectors_used; else num_blocks = data->fill_len / pass_dev->block_len; scsi_read_write(&ccb->csio, /*retries*/ dev->retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*readop*/ (dev->write_dev == 0) ? SCSI_RW_READ : SCSI_RW_WRITE, /*byte2*/ 0, /*minimum_cmd_size*/ dev->min_cmd_size, /*lba*/ buf->lba, /*block_count*/ num_blocks, /*data_ptr*/ (data->sg_count != 0) ? (uint8_t *)data->segs : data->buf, /*dxfer_len*/ (num_blocks * pass_dev->block_len), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ dev->io_timeout); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (dev->retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (data->sg_count != 0) { ccb->csio.sglist_cnt = data->sg_count; ccb->ccb_h.flags |= CAM_DATA_SG; } /* * Store a pointer to the buffer in the CCB. The kernel will * restore this when we get it back, and we'll use it to identify * the buffer this CCB came from. */ ccb->ccb_h.ccb_buf = buf; /* * Unlock our mutex in preparation for issuing the ioctl. */ pthread_mutex_unlock(&dev->mutex); /* * Queue the CCB to the pass(4) driver. */ if (ioctl(pass_dev->dev->fd, CAMIOQUEUE, ccb) == -1) { pthread_mutex_lock(&dev->mutex); warn("%s: error sending CAMIOQUEUE ioctl to %s%u", __func__, pass_dev->dev->device_name, pass_dev->dev->dev_unit_num); warn("%s: CCB address is %p", __func__, ccb); retval = -1; STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); } else { pthread_mutex_lock(&dev->mutex); dev->cur_active_io++; STAILQ_INSERT_TAIL(&dev->active_queue, buf, links); } bailout: return (retval); } int camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len) { struct camdd_dev_pass *pass_dev; uint32_t num_blocks; int retval = 0; pass_dev = &dev->dev_spec.pass; *lba = dev->next_io_pos_bytes / dev->sector_size; *len = dev->blocksize; num_blocks = *len / dev->sector_size; /* * If max_sector is 0, then we have no set limit. This can happen * if we're writing to a file in a filesystem, or reading from * something like /dev/zero. */ if ((dev->max_sector != 0) || (dev->sector_io_limit != 0)) { uint64_t max_sector; if ((dev->max_sector != 0) && (dev->sector_io_limit != 0)) max_sector = min(dev->sector_io_limit, dev->max_sector); else if (dev->max_sector != 0) max_sector = dev->max_sector; else max_sector = dev->sector_io_limit; /* * Check to see whether we're starting off past the end of * the device. If so, we need to just send an EOF * notification to the writer. */ if (*lba > max_sector) { *len = 0; retval = 1; } else if (((*lba + num_blocks) > max_sector + 1) || ((*lba + num_blocks) < *lba)) { /* * If we get here (but pass the first check), we * can trim the request length down to go to the * end of the device. */ num_blocks = (max_sector + 1) - *lba; *len = num_blocks * dev->sector_size; retval = 1; } } dev->next_io_pos_bytes += *len; return (retval); } /* * Returns 0 for success, 1 for EOF detected, and -1 for failure. */ int camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf) { struct camdd_buf *buf = NULL; struct camdd_buf_data *data; struct camdd_dev_pass *pass_dev; size_t new_len; struct camdd_buf_data *rb_data; int is_write = dev->write_dev; int eof_flush_needed = 0; int retval = 0; int error; pass_dev = &dev->dev_spec.pass; /* * If we've gotten EOF or our partner has, we should not continue * queueing I/O. If we're a writer, though, we should continue * to write any buffers that don't have EOF status. */ if ((dev->flags & CAMDD_DEV_FLAG_EOF) || ((dev->flags & CAMDD_DEV_FLAG_PEER_EOF) && (is_write == 0))) { /* * Tell the worker thread that we have seen EOF. */ retval = 1; /* * If we're the writer, send the buffer back with EOF status. */ if (is_write) { read_buf->status = CAMDD_STATUS_EOF; error = camdd_complete_peer_buf(dev, read_buf); } goto bailout; } if (is_write == 0) { buf = camdd_get_buf(dev, CAMDD_BUF_DATA); if (buf == NULL) { retval = -1; goto bailout; } data = &buf->buf_type_spec.data; retval = camdd_get_next_lba_len(dev, &buf->lba, &buf->len); if (retval != 0) { buf->status = CAMDD_STATUS_EOF; if ((buf->len == 0) && ((dev->flags & (CAMDD_DEV_FLAG_EOF_SENT | CAMDD_DEV_FLAG_EOF_QUEUED)) != 0)) { camdd_release_buf(buf); goto bailout; } dev->flags |= CAMDD_DEV_FLAG_EOF_QUEUED; } data->fill_len = buf->len; data->src_start_offset = buf->lba * dev->sector_size; /* * Put this on the run queue. */ STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); dev->num_run_queue++; /* We're done. */ goto bailout; } /* * Check for new EOF status from the reader. */ if ((read_buf->status == CAMDD_STATUS_EOF) || (read_buf->status == CAMDD_STATUS_ERROR)) { dev->flags |= CAMDD_DEV_FLAG_PEER_EOF; if ((STAILQ_FIRST(&dev->pending_queue) == NULL) && (read_buf->len == 0)) { camdd_complete_peer_buf(dev, read_buf); retval = 1; goto bailout; } else eof_flush_needed = 1; } /* * See if we have a buffer we're composing with pieces from our * partner thread. */ buf = STAILQ_FIRST(&dev->pending_queue); if (buf == NULL) { uint64_t lba; ssize_t len; retval = camdd_get_next_lba_len(dev, &lba, &len); if (retval != 0) { read_buf->status = CAMDD_STATUS_EOF; if (len == 0) { dev->flags |= CAMDD_DEV_FLAG_EOF; error = camdd_complete_peer_buf(dev, read_buf); goto bailout; } } /* * If we don't have a pending buffer, we need to grab a new * one from the free list or allocate another one. */ buf = camdd_get_buf(dev, CAMDD_BUF_DATA); if (buf == NULL) { retval = 1; goto bailout; } buf->lba = lba; buf->len = len; STAILQ_INSERT_TAIL(&dev->pending_queue, buf, links); dev->num_pending_queue++; } data = &buf->buf_type_spec.data; rb_data = &read_buf->buf_type_spec.data; if ((rb_data->src_start_offset != dev->next_peer_pos_bytes) && (dev->debug != 0)) { printf("%s: WARNING: reader offset %#jx != expected offset " "%#jx\n", __func__, (uintmax_t)rb_data->src_start_offset, (uintmax_t)dev->next_peer_pos_bytes); } dev->next_peer_pos_bytes = rb_data->src_start_offset + (rb_data->fill_len - rb_data->resid); new_len = (rb_data->fill_len - rb_data->resid) + data->fill_len; if (new_len < buf->len) { /* * There are three cases here: * 1. We need more data to fill up a block, so we put * this I/O on the queue and wait for more I/O. * 2. We have a pending buffer in the queue that is * smaller than our blocksize, but we got an EOF. So we * need to go ahead and flush the write out. * 3. We got an error. */ /* * Increment our fill length. */ data->fill_len += (rb_data->fill_len - rb_data->resid); /* * Add the new read buffer to the list for writing. */ STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links); /* Increment the count */ buf->src_count++; if (eof_flush_needed == 0) { /* * We need to exit, because we don't have enough * data yet. */ goto bailout; } else { /* * Take the buffer off of the pending queue. */ STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links); dev->num_pending_queue--; /* * If we need an EOF flush, but there is no data * to flush, go ahead and return this buffer. */ if (data->fill_len == 0) { camdd_complete_buf(dev, buf, /*error_count*/0); retval = 1; goto bailout; } /* * Put this on the next queue for execution. */ STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); dev->num_run_queue++; } } else if (new_len == buf->len) { /* * We have enough data to completey fill one block, * so we're ready to issue the I/O. */ /* * Take the buffer off of the pending queue. */ STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links); dev->num_pending_queue--; /* * Add the new read buffer to the list for writing. */ STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links); /* Increment the count */ buf->src_count++; /* * Increment our fill length. */ data->fill_len += (rb_data->fill_len - rb_data->resid); /* * Put this on the next queue for execution. */ STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); dev->num_run_queue++; } else { struct camdd_buf *idb; struct camdd_buf_indirect *indirect; uint32_t len_to_go, cur_offset; idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT); if (idb == NULL) { retval = 1; goto bailout; } indirect = &idb->buf_type_spec.indirect; indirect->src_buf = read_buf; read_buf->refcount++; indirect->offset = 0; indirect->start_ptr = rb_data->buf; /* * We've already established that there is more * data in read_buf than we have room for in our * current write request. So this particular chunk * of the request should just be the remainder * needed to fill up a block. */ indirect->len = buf->len - (data->fill_len - data->resid); camdd_buf_add_child(buf, idb); /* * This buffer is ready to execute, so we can take * it off the pending queue and put it on the run * queue. */ STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links); dev->num_pending_queue--; STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); dev->num_run_queue++; cur_offset = indirect->offset + indirect->len; /* * The resulting I/O would be too large to fit in * one block. We need to split this I/O into * multiple pieces. Allocate as many buffers as needed. */ for (len_to_go = rb_data->fill_len - rb_data->resid - indirect->len; len_to_go > 0;) { struct camdd_buf *new_buf; struct camdd_buf_data *new_data; uint64_t lba; ssize_t len; retval = camdd_get_next_lba_len(dev, &lba, &len); if ((retval != 0) && (len == 0)) { /* * The device has already been marked * as EOF, and there is no space left. */ goto bailout; } new_buf = camdd_get_buf(dev, CAMDD_BUF_DATA); if (new_buf == NULL) { retval = 1; goto bailout; } new_buf->lba = lba; new_buf->len = len; idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT); if (idb == NULL) { retval = 1; goto bailout; } indirect = &idb->buf_type_spec.indirect; indirect->src_buf = read_buf; read_buf->refcount++; indirect->offset = cur_offset; indirect->start_ptr = rb_data->buf + cur_offset; indirect->len = min(len_to_go, new_buf->len); #if 0 if (((indirect->len % dev->sector_size) != 0) || ((indirect->offset % dev->sector_size) != 0)) { warnx("offset %ju len %ju not aligned with " "sector size %u", indirect->offset, (uintmax_t)indirect->len, dev->sector_size); } #endif cur_offset += indirect->len; len_to_go -= indirect->len; camdd_buf_add_child(new_buf, idb); new_data = &new_buf->buf_type_spec.data; if ((new_data->fill_len == new_buf->len) || (eof_flush_needed != 0)) { STAILQ_INSERT_TAIL(&dev->run_queue, new_buf, links); dev->num_run_queue++; } else if (new_data->fill_len < buf->len) { STAILQ_INSERT_TAIL(&dev->pending_queue, new_buf, links); dev->num_pending_queue++; } else { warnx("%s: too much data in new " "buffer!", __func__); retval = 1; goto bailout; } } } bailout: return (retval); } void camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth, uint32_t *peer_depth, uint32_t *our_bytes, uint32_t *peer_bytes) { *our_depth = dev->cur_active_io + dev->num_run_queue; if (dev->num_peer_work_queue > dev->num_peer_done_queue) *peer_depth = dev->num_peer_work_queue - dev->num_peer_done_queue; else *peer_depth = 0; *our_bytes = *our_depth * dev->blocksize; *peer_bytes = dev->peer_bytes_queued; } void camdd_sig_handler(int sig) { if (sig == SIGINFO) need_status = 1; else { need_exit = 1; error_exit = 1; } sem_post(&camdd_sem); } void camdd_print_status(struct camdd_dev *camdd_dev, struct camdd_dev *other_dev, struct timespec *start_time) { struct timespec done_time; uint64_t total_ns; long double mb_sec, total_sec; int error = 0; error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &done_time); if (error != 0) { warn("Unable to get done time"); return; } timespecsub(&done_time, start_time); total_ns = done_time.tv_nsec + (done_time.tv_sec * 1000000000); total_sec = total_ns; total_sec /= 1000000000; fprintf(stderr, "%ju bytes %s %s\n%ju bytes %s %s\n" "%.4Lf seconds elapsed\n", (uintmax_t)camdd_dev->bytes_transferred, (camdd_dev->write_dev == 0) ? "read from" : "written to", camdd_dev->device_name, (uintmax_t)other_dev->bytes_transferred, (other_dev->write_dev == 0) ? "read from" : "written to", other_dev->device_name, total_sec); mb_sec = min(other_dev->bytes_transferred,camdd_dev->bytes_transferred); mb_sec /= 1024 * 1024; mb_sec *= 1000000000; mb_sec /= total_ns; fprintf(stderr, "%.2Lf MB/sec\n", mb_sec); } int camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, uint64_t max_io, int retry_count, int timeout) { char *device = NULL; struct cam_device *new_cam_dev = NULL; struct camdd_dev *devs[2]; struct timespec start_time; pthread_t threads[2]; int unit = 0; int error = 0; int i; if (num_io_opts != 2) { warnx("Must have one input and one output path"); error = 1; goto bailout; } bzero(devs, sizeof(devs)); for (i = 0; i < num_io_opts; i++) { switch (io_opts[i].dev_type) { case CAMDD_DEV_PASS: { camdd_argmask new_arglist = CAMDD_ARG_NONE; int bus = 0, target = 0, lun = 0; char name[30]; int rv; if (isdigit(io_opts[i].dev_name[0])) { /* device specified as bus:target[:lun] */ rv = parse_btl(io_opts[i].dev_name, &bus, &target, &lun, &new_arglist); if (rv < 2) { warnx("numeric device specification " "must be either bus:target, or " "bus:target:lun"); error = 1; goto bailout; } /* default to 0 if lun was not specified */ if ((new_arglist & CAMDD_ARG_LUN) == 0) { lun = 0; new_arglist |= CAMDD_ARG_LUN; } } else { if (cam_get_device(io_opts[i].dev_name, name, sizeof name, &unit) == -1) { warnx("%s", cam_errbuf); error = 1; goto bailout; } device = strdup(name); new_arglist |= CAMDD_ARG_DEVICE |CAMDD_ARG_UNIT; } if (new_arglist & (CAMDD_ARG_BUS | CAMDD_ARG_TARGET)) new_cam_dev = cam_open_btl(bus, target, lun, O_RDWR, NULL); else new_cam_dev = cam_open_spec_device(device, unit, O_RDWR, NULL); if (new_cam_dev == NULL) { warnx("%s", cam_errbuf); error = 1; goto bailout; } devs[i] = camdd_probe_pass(new_cam_dev, /*io_opts*/ &io_opts[i], CAMDD_ARG_ERR_RECOVER, /*probe_retry_count*/ 3, /*probe_timeout*/ 5000, /*io_retry_count*/ retry_count, /*io_timeout*/ timeout); if (devs[i] == NULL) { warn("Unable to probe device %s%u", new_cam_dev->device_name, new_cam_dev->dev_unit_num); error = 1; goto bailout; } break; } case CAMDD_DEV_FILE: { int fd = -1; if (io_opts[i].dev_name[0] == '-') { if (io_opts[i].write_dev != 0) fd = STDOUT_FILENO; else fd = STDIN_FILENO; } else { if (io_opts[i].write_dev != 0) { fd = open(io_opts[i].dev_name, O_RDWR | O_CREAT, S_IWUSR |S_IRUSR); } else { fd = open(io_opts[i].dev_name, O_RDONLY); } } if (fd == -1) { warn("error opening file %s", io_opts[i].dev_name); error = 1; goto bailout; } devs[i] = camdd_probe_file(fd, &io_opts[i], retry_count, timeout); if (devs[i] == NULL) { error = 1; goto bailout; } break; } default: warnx("Unknown device type %d (%s)", io_opts[i].dev_type, io_opts[i].dev_name); error = 1; goto bailout; break; /*NOTREACHED */ } devs[i]->write_dev = io_opts[i].write_dev; devs[i]->start_offset_bytes = io_opts[i].offset; if (max_io != 0) { devs[i]->sector_io_limit = (devs[i]->start_offset_bytes / devs[i]->sector_size) + (max_io / devs[i]->sector_size) - 1; devs[i]->sector_io_limit = (devs[i]->start_offset_bytes / devs[i]->sector_size) + (max_io / devs[i]->sector_size) - 1; } devs[i]->next_io_pos_bytes = devs[i]->start_offset_bytes; devs[i]->next_completion_pos_bytes =devs[i]->start_offset_bytes; } devs[0]->peer_dev = devs[1]; devs[1]->peer_dev = devs[0]; devs[0]->next_peer_pos_bytes = devs[0]->peer_dev->next_io_pos_bytes; devs[1]->next_peer_pos_bytes = devs[1]->peer_dev->next_io_pos_bytes; sem_init(&camdd_sem, /*pshared*/ 0, 0); signal(SIGINFO, camdd_sig_handler); signal(SIGINT, camdd_sig_handler); error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &start_time); if (error != 0) { warn("Unable to get start time"); goto bailout; } for (i = 0; i < num_io_opts; i++) { error = pthread_create(&threads[i], NULL, camdd_worker, (void *)devs[i]); if (error != 0) { warnc(error, "pthread_create() failed"); goto bailout; } } for (;;) { if ((sem_wait(&camdd_sem) == -1) || (need_exit != 0)) { struct kevent ke; for (i = 0; i < num_io_opts; i++) { EV_SET(&ke, (uintptr_t)&devs[i]->work_queue, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); devs[i]->flags |= CAMDD_DEV_FLAG_EOF; error = kevent(devs[i]->kq, &ke, 1, NULL, 0, NULL); if (error == -1) warn("%s: unable to wake up thread", __func__); error = 0; } break; } else if (need_status != 0) { camdd_print_status(devs[0], devs[1], &start_time); need_status = 0; } } for (i = 0; i < num_io_opts; i++) { pthread_join(threads[i], NULL); } camdd_print_status(devs[0], devs[1], &start_time); bailout: for (i = 0; i < num_io_opts; i++) camdd_free_dev(devs[i]); return (error + error_exit); } void usage(void) { fprintf(stderr, "usage: camdd <-i|-o pass=pass0,bs=1M,offset=1M,depth=4>\n" " <-i|-o file=/tmp/file,bs=512K,offset=1M>\n" " <-i|-o file=/dev/da0,bs=512K,offset=1M>\n" " <-i|-o file=/dev/nsa0,bs=512K>\n" " [-C retry_count][-E][-m max_io_amt][-t timeout_secs][-v][-h]\n" "Option description\n" "-i Specify input device/file and parameters\n" "-o Specify output device/file and parameters\n" "Input and Output parameters\n" "pass=name Specify a pass(4) device like pass0 or /dev/pass0\n" "file=name Specify a file or device, /tmp/foo, /dev/da0, /dev/null\n" " or - for stdin/stdout\n" "bs=blocksize Specify blocksize in bytes, or using K, M, G, etc. suffix\n" "offset=len Specify starting offset in bytes or using K, M, G suffix\n" " NOTE: offset cannot be specified on tapes, pipes, stdin/out\n" "depth=N Specify a numeric queue depth. This only applies to pass(4)\n" "mcs=N Specify a minimum cmd size for pass(4) read/write commands\n" "Optional arguments\n" "-C retry_cnt Specify a retry count for pass(4) devices\n" "-E Enable CAM error recovery for pass(4) devices\n" "-m max_io Specify the maximum amount to be transferred in bytes or\n" " using K, G, M, etc. suffixes\n" "-t timeout Specify the I/O timeout to use with pass(4) devices\n" "-v Enable verbose error recovery\n" "-h Print this message\n"); } int camdd_parse_io_opts(char *args, int is_write, struct camdd_io_opts *io_opts) { char *tmpstr, *tmpstr2; char *orig_tmpstr = NULL; int retval = 0; io_opts->write_dev = is_write; tmpstr = strdup(args); if (tmpstr == NULL) { warn("strdup failed"); retval = 1; goto bailout; } orig_tmpstr = tmpstr; while ((tmpstr2 = strsep(&tmpstr, ",")) != NULL) { char *name, *value; /* * If the user creates an empty parameter by putting in two * commas, skip over it and look for the next field. */ if (*tmpstr2 == '\0') continue; name = strsep(&tmpstr2, "="); if (*name == '\0') { warnx("Got empty I/O parameter name"); retval = 1; goto bailout; } value = strsep(&tmpstr2, "="); if ((value == NULL) || (*value == '\0')) { warnx("Empty I/O parameter value for %s", name); retval = 1; goto bailout; } if (strncasecmp(name, "file", 4) == 0) { io_opts->dev_type = CAMDD_DEV_FILE; io_opts->dev_name = strdup(value); if (io_opts->dev_name == NULL) { warn("Error allocating memory"); retval = 1; goto bailout; } } else if (strncasecmp(name, "pass", 4) == 0) { io_opts->dev_type = CAMDD_DEV_PASS; io_opts->dev_name = strdup(value); if (io_opts->dev_name == NULL) { warn("Error allocating memory"); retval = 1; goto bailout; } } else if ((strncasecmp(name, "bs", 2) == 0) || (strncasecmp(name, "blocksize", 9) == 0)) { retval = expand_number(value, &io_opts->blocksize); if (retval == -1) { warn("expand_number(3) failed on %s=%s", name, value); retval = 1; goto bailout; } } else if (strncasecmp(name, "depth", 5) == 0) { char *endptr; io_opts->queue_depth = strtoull(value, &endptr, 0); if (*endptr != '\0') { warnx("invalid queue depth %s", value); retval = 1; goto bailout; } } else if (strncasecmp(name, "mcs", 3) == 0) { char *endptr; io_opts->min_cmd_size = strtol(value, &endptr, 0); if ((*endptr != '\0') || ((io_opts->min_cmd_size > 16) || (io_opts->min_cmd_size < 0))) { warnx("invalid minimum cmd size %s", value); retval = 1; goto bailout; } } else if (strncasecmp(name, "offset", 6) == 0) { retval = expand_number(value, &io_opts->offset); if (retval == -1) { warn("expand_number(3) failed on %s=%s", name, value); retval = 1; goto bailout; } } else if (strncasecmp(name, "debug", 5) == 0) { char *endptr; io_opts->debug = strtoull(value, &endptr, 0); if (*endptr != '\0') { warnx("invalid debug level %s", value); retval = 1; goto bailout; } } else { warnx("Unrecognized parameter %s=%s", name, value); } } bailout: free(orig_tmpstr); return (retval); } int main(int argc, char **argv) { int c; camdd_argmask arglist = CAMDD_ARG_NONE; int timeout = 0, retry_count = 1; int error = 0; uint64_t max_io = 0; struct camdd_io_opts *opt_list = NULL; if (argc == 1) { usage(); exit(1); } opt_list = calloc(2, sizeof(struct camdd_io_opts)); if (opt_list == NULL) { warn("Unable to allocate option list"); error = 1; goto bailout; } while ((c = getopt(argc, argv, "C:Ehi:m:o:t:v")) != -1){ switch (c) { case 'C': retry_count = strtol(optarg, NULL, 0); if (retry_count < 0) errx(1, "retry count %d is < 0", retry_count); arglist |= CAMDD_ARG_RETRIES; break; case 'E': arglist |= CAMDD_ARG_ERR_RECOVER; break; case 'i': case 'o': if (((c == 'i') && (opt_list[0].dev_type != CAMDD_DEV_NONE)) || ((c == 'o') && (opt_list[1].dev_type != CAMDD_DEV_NONE))) { errx(1, "Only one input and output path " "allowed"); } error = camdd_parse_io_opts(optarg, (c == 'o') ? 1 : 0, (c == 'o') ? &opt_list[1] : &opt_list[0]); if (error != 0) goto bailout; break; case 'm': error = expand_number(optarg, &max_io); if (error == -1) { warn("invalid maximum I/O amount %s", optarg); error = 1; goto bailout; } break; case 't': timeout = strtol(optarg, NULL, 0); if (timeout < 0) errx(1, "invalid timeout %d", timeout); /* Convert the timeout from seconds to ms */ timeout *= 1000; arglist |= CAMDD_ARG_TIMEOUT; break; case 'v': arglist |= CAMDD_ARG_VERBOSE; break; case 'h': default: usage(); exit(1); break; /*NOTREACHED*/ } } if ((opt_list[0].dev_type == CAMDD_DEV_NONE) || (opt_list[1].dev_type == CAMDD_DEV_NONE)) errx(1, "Must specify both -i and -o"); /* * Set the timeout if the user hasn't specified one. */ if (timeout == 0) timeout = CAMDD_PASS_RW_TIMEOUT; error = camdd_rw(opt_list, 2, max_io, retry_count, timeout); bailout: free(opt_list); exit(error); } Index: head/usr.sbin/mptutil/mpt_cam.c =================================================================== --- head/usr.sbin/mptutil/mpt_cam.c (revision 300546) +++ head/usr.sbin/mptutil/mpt_cam.c (revision 300547) @@ -1,569 +1,566 @@ /*- * Copyright (c) 2008 Yahoo!, Inc. * All rights reserved. * Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __RCSID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "mptutil.h" static int xptfd; static int xpt_open(void) { if (xptfd == 0) xptfd = open(XPT_DEVICE, O_RDWR); return (xptfd); } /* Fetch the path id of bus 0 for the opened mpt controller. */ static int fetch_path_id(path_id_t *path_id) { struct bus_match_pattern *b; union ccb ccb; size_t bufsize; int error; if (xpt_open() < 0) return (ENXIO); /* First, find the path id of bus 0 for this mpt controller. */ bzero(&ccb, sizeof(ccb)); ccb.ccb_h.func_code = XPT_DEV_MATCH; bufsize = sizeof(struct dev_match_result) * 1; ccb.cdm.num_matches = 0; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = calloc(1, bufsize); bufsize = sizeof(struct dev_match_pattern) * 1; ccb.cdm.num_patterns = 1; ccb.cdm.pattern_buf_len = bufsize; ccb.cdm.patterns = calloc(1, bufsize); /* Match mptX bus 0. */ ccb.cdm.patterns[0].type = DEV_MATCH_BUS; b = &ccb.cdm.patterns[0].pattern.bus_pattern; snprintf(b->dev_name, sizeof(b->dev_name), "mpt"); b->unit_number = mpt_unit; b->bus_id = 0; b->flags = BUS_MATCH_NAME | BUS_MATCH_UNIT | BUS_MATCH_BUS_ID; if (ioctl(xptfd, CAMIOCOMMAND, &ccb) < 0) { error = errno; free(ccb.cdm.matches); free(ccb.cdm.patterns); return (error); } free(ccb.cdm.patterns); if (((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) || (ccb.cdm.status != CAM_DEV_MATCH_LAST)) { warnx("fetch_path_id got CAM error %#x, CDM error %d\n", ccb.ccb_h.status, ccb.cdm.status); free(ccb.cdm.matches); return (EIO); } /* We should have exactly 1 match for the bus. */ if (ccb.cdm.num_matches != 1 || ccb.cdm.matches[0].type != DEV_MATCH_BUS) { free(ccb.cdm.matches); return (ENOENT); } *path_id = ccb.cdm.matches[0].result.bus_result.path_id; free(ccb.cdm.matches); return (0); } int mpt_query_disk(U8 VolumeBus, U8 VolumeID, struct mpt_query_disk *qd) { struct periph_match_pattern *p; struct periph_match_result *r; union ccb ccb; path_id_t path_id; size_t bufsize; int error; /* mpt(4) only handles devices on bus 0. */ if (VolumeBus != 0) return (ENXIO); if (xpt_open() < 0) return (ENXIO); /* Find the path ID of bus 0. */ error = fetch_path_id(&path_id); if (error) return (error); bzero(&ccb, sizeof(ccb)); ccb.ccb_h.func_code = XPT_DEV_MATCH; ccb.ccb_h.path_id = CAM_XPT_PATH_ID; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; bufsize = sizeof(struct dev_match_result) * 5; ccb.cdm.num_matches = 0; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = calloc(1, bufsize); bufsize = sizeof(struct dev_match_pattern) * 1; ccb.cdm.num_patterns = 1; ccb.cdm.pattern_buf_len = bufsize; ccb.cdm.patterns = calloc(1, bufsize); /* Look for a "da" device at the specified target and lun. */ ccb.cdm.patterns[0].type = DEV_MATCH_PERIPH; p = &ccb.cdm.patterns[0].pattern.periph_pattern; p->path_id = path_id; snprintf(p->periph_name, sizeof(p->periph_name), "da"); p->target_id = VolumeID; p->flags = PERIPH_MATCH_PATH | PERIPH_MATCH_NAME | PERIPH_MATCH_TARGET; if (ioctl(xptfd, CAMIOCOMMAND, &ccb) < 0) { error = errno; free(ccb.cdm.matches); free(ccb.cdm.patterns); return (error); } free(ccb.cdm.patterns); if (((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) || (ccb.cdm.status != CAM_DEV_MATCH_LAST)) { warnx("mpt_query_disk got CAM error %#x, CDM error %d\n", ccb.ccb_h.status, ccb.cdm.status); free(ccb.cdm.matches); return (EIO); } /* * We should have exactly 1 match for the peripheral. * However, if we don't get a match, don't print an error * message and return ENOENT. */ if (ccb.cdm.num_matches == 0) { free(ccb.cdm.matches); return (ENOENT); } if (ccb.cdm.num_matches != 1) { warnx("mpt_query_disk got %d matches, expected 1", ccb.cdm.num_matches); free(ccb.cdm.matches); return (EIO); } if (ccb.cdm.matches[0].type != DEV_MATCH_PERIPH) { warnx("mpt_query_disk got wrong CAM match"); free(ccb.cdm.matches); return (EIO); } /* Copy out the data. */ r = &ccb.cdm.matches[1].result.periph_result; snprintf(qd->devname, sizeof(qd->devname), "%s%d", r->periph_name, r->unit_number); free(ccb.cdm.matches); return (0); } static int periph_is_volume(CONFIG_PAGE_IOC_2 *ioc2, struct periph_match_result *r) { CONFIG_PAGE_IOC_2_RAID_VOL *vol; int i; if (ioc2 == NULL) return (0); vol = ioc2->RaidVolume; for (i = 0; i < ioc2->NumActiveVolumes; vol++, i++) { if (vol->VolumeBus == 0 && vol->VolumeID == r->target_id) return (1); } return (0); } /* Much borrowed from scsireadcapacity() in src/sbin/camcontrol/camcontrol.c. */ static int fetch_scsi_capacity(struct cam_device *dev, struct mpt_standalone_disk *disk) { struct scsi_read_capacity_data rcap; struct scsi_read_capacity_data_long rcaplong; union ccb *ccb; int error; ccb = cam_getccb(dev); if (ccb == NULL) return (ENOMEM); /* Zero the rest of the ccb. */ - bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); scsi_read_capacity(&ccb->csio, 1, NULL, MSG_SIMPLE_Q_TAG, &rcap, SSD_FULL_SIZE, 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (cam_send_ccb(dev, ccb) < 0) { error = errno; cam_freeccb(ccb); return (error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_freeccb(ccb); return (EIO); } /* * A last block of 2^32-1 means that the true capacity is over 2TB, * and we need to issue the long READ CAPACITY to get the real * capacity. Otherwise, we're all set. */ if (scsi_4btoul(rcap.addr) != 0xffffffff) { disk->maxlba = scsi_4btoul(rcap.addr); cam_freeccb(ccb); return (0); } /* Zero the rest of the ccb. */ - bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); scsi_read_capacity_16(&ccb->csio, 1, NULL, MSG_SIMPLE_Q_TAG, 0, 0, 0, (uint8_t *)&rcaplong, sizeof(rcaplong), SSD_FULL_SIZE, 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (cam_send_ccb(dev, ccb) < 0) { error = errno; cam_freeccb(ccb); return (error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_freeccb(ccb); return (EIO); } cam_freeccb(ccb); disk->maxlba = scsi_8btou64(rcaplong.addr); return (0); } /* Borrowed heavily from scsi_all.c:scsi_print_inquiry(). */ static void format_scsi_inquiry(struct mpt_standalone_disk *disk, struct scsi_inquiry_data *inq_data) { char vendor[16], product[48], revision[16], rstr[12]; if (SID_QUAL_IS_VENDOR_UNIQUE(inq_data)) return; if (SID_TYPE(inq_data) != T_DIRECT) return; if (SID_QUAL(inq_data) != SID_QUAL_LU_CONNECTED) return; cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor), sizeof(vendor)); cam_strvis(product, inq_data->product, sizeof(inq_data->product), sizeof(product)); cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision), sizeof(revision)); /* Hack for SATA disks, no idea how to tell speed. */ if (strcmp(vendor, "ATA") == 0) { snprintf(disk->inqstring, sizeof(disk->inqstring), "<%s %s> SATA", product, revision); return; } switch (SID_ANSI_REV(inq_data)) { case SCSI_REV_CCS: strcpy(rstr, "SCSI-CCS"); break; case 5: strcpy(rstr, "SAS"); break; default: snprintf(rstr, sizeof (rstr), "SCSI-%d", SID_ANSI_REV(inq_data)); break; } snprintf(disk->inqstring, sizeof(disk->inqstring), "<%s %s %s> %s", vendor, product, revision, rstr); } /* Much borrowed from scsiinquiry() in src/sbin/camcontrol/camcontrol.c. */ static int fetch_scsi_inquiry(struct cam_device *dev, struct mpt_standalone_disk *disk) { struct scsi_inquiry_data *inq_buf; union ccb *ccb; int error; ccb = cam_getccb(dev); if (ccb == NULL) return (ENOMEM); /* Zero the rest of the ccb. */ - bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - - sizeof(struct ccb_hdr)); + CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); inq_buf = calloc(1, sizeof(*inq_buf)); if (inq_buf == NULL) { cam_freeccb(ccb); return (ENOMEM); } scsi_inquiry(&ccb->csio, 1, NULL, MSG_SIMPLE_Q_TAG, (void *)inq_buf, SHORT_INQUIRY_LENGTH, 0, 0, SSD_FULL_SIZE, 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (cam_send_ccb(dev, ccb) < 0) { error = errno; free(inq_buf); cam_freeccb(ccb); return (error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { free(inq_buf); cam_freeccb(ccb); return (EIO); } cam_freeccb(ccb); format_scsi_inquiry(disk, inq_buf); free(inq_buf); return (0); } int mpt_fetch_disks(int fd, int *ndisks, struct mpt_standalone_disk **disksp) { CONFIG_PAGE_IOC_2 *ioc2; struct mpt_standalone_disk *disks; struct periph_match_pattern *p; struct periph_match_result *r; struct cam_device *dev; union ccb ccb; path_id_t path_id; size_t bufsize; int count, error; uint32_t i; if (xpt_open() < 0) return (ENXIO); error = fetch_path_id(&path_id); if (error) return (error); for (count = 100;; count+= 100) { /* Try to fetch 'count' disks in one go. */ bzero(&ccb, sizeof(ccb)); ccb.ccb_h.func_code = XPT_DEV_MATCH; bufsize = sizeof(struct dev_match_result) * (count + 1); ccb.cdm.num_matches = 0; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = calloc(1, bufsize); bufsize = sizeof(struct dev_match_pattern) * 1; ccb.cdm.num_patterns = 1; ccb.cdm.pattern_buf_len = bufsize; ccb.cdm.patterns = calloc(1, bufsize); /* Match any "da" peripherals. */ ccb.cdm.patterns[0].type = DEV_MATCH_PERIPH; p = &ccb.cdm.patterns[0].pattern.periph_pattern; p->path_id = path_id; snprintf(p->periph_name, sizeof(p->periph_name), "da"); p->flags = PERIPH_MATCH_PATH | PERIPH_MATCH_NAME; if (ioctl(xptfd, CAMIOCOMMAND, &ccb) < 0) { error = errno; free(ccb.cdm.matches); free(ccb.cdm.patterns); return (error); } free(ccb.cdm.patterns); /* Check for CCB errors. */ if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { free(ccb.cdm.matches); return (EIO); } /* If we need a longer list, try again. */ if (ccb.cdm.status == CAM_DEV_MATCH_MORE) { free(ccb.cdm.matches); continue; } /* If we got an error, abort. */ if (ccb.cdm.status != CAM_DEV_MATCH_LAST) { free(ccb.cdm.matches); return (EIO); } break; } /* Shortcut if we don't have any "da" devices. */ if (ccb.cdm.num_matches == 0) { free(ccb.cdm.matches); *ndisks = 0; *disksp = NULL; return (0); } /* We should have N matches, 1 for each "da" device. */ for (i = 0; i < ccb.cdm.num_matches; i++) { if (ccb.cdm.matches[i].type != DEV_MATCH_PERIPH) { warnx("mpt_fetch_disks got wrong CAM matches"); free(ccb.cdm.matches); return (EIO); } } /* * Some of the "da" peripherals may be for RAID volumes, so * fetch the IOC 2 page (list of RAID volumes) so we can * exclude them from the list. */ ioc2 = mpt_read_ioc_page(fd, 2, NULL); if (ioc2 == NULL) return (errno); disks = calloc(ccb.cdm.num_matches, sizeof(*disks)); count = 0; for (i = 0; i < ccb.cdm.num_matches; i++) { r = &ccb.cdm.matches[i].result.periph_result; if (periph_is_volume(ioc2, r)) continue; disks[count].bus = 0; disks[count].target = r->target_id; snprintf(disks[count].devname, sizeof(disks[count].devname), "%s%d", r->periph_name, r->unit_number); dev = cam_open_device(disks[count].devname, O_RDWR); if (dev != NULL) { fetch_scsi_capacity(dev, &disks[count]); fetch_scsi_inquiry(dev, &disks[count]); cam_close_device(dev); } count++; } free(ccb.cdm.matches); free(ioc2); *ndisks = count; *disksp = disks; return (0); } /* * Instruct the mpt(4) device to rescan its busses to find new devices * such as disks whose RAID physdisk page was removed or volumes that * were created. If id is -1, the entire bus is rescanned. * Otherwise, only devices at the specified ID are rescanned. If bus * is -1, then all busses are scanned instead of the specified bus. * Note that currently, only bus 0 is supported. */ int mpt_rescan_bus(int bus, int id) { union ccb ccb; path_id_t path_id; int error; /* mpt(4) only handles devices on bus 0. */ if (bus != -1 && bus != 0) return (EINVAL); if (xpt_open() < 0) return (ENXIO); error = fetch_path_id(&path_id); if (error) return (error); /* Perform the actual rescan. */ bzero(&ccb, sizeof(ccb)); ccb.ccb_h.path_id = path_id; if (id == -1) { ccb.ccb_h.func_code = XPT_SCAN_BUS; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.ccb_h.timeout = 5000; } else { ccb.ccb_h.func_code = XPT_SCAN_LUN; ccb.ccb_h.target_id = id; ccb.ccb_h.target_lun = 0; } ccb.crcn.flags = CAM_FLAG_NONE; /* Run this at a low priority. */ ccb.ccb_h.pinfo.priority = 5; if (ioctl(xptfd, CAMIOCOMMAND, &ccb) == -1) return (errno); if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("mpt_rescan_bus rescan got CAM error %#x\n", ccb.ccb_h.status & CAM_STATUS_MASK); return (EIO); } return (0); }