diff --git a/sys/cam/nvme/nvme_da.c b/sys/cam/nvme/nvme_da.c index baeaad182f3a..f1c9a9ec9fea 100644 --- a/sys/cam/nvme/nvme_da.c +++ b/sys/cam/nvme/nvme_da.c @@ -1,1363 +1,1356 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015 Netflix, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Derived from ata_da.c: * Copyright (c) 2009 Alexander Motin */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include typedef enum { NDA_STATE_NORMAL } nda_state; typedef enum { NDA_FLAG_OPEN = 0x0001, NDA_FLAG_DIRTY = 0x0002, NDA_FLAG_SCTX_INIT = 0x0004, } nda_flags; #define NDA_FLAG_STRING \ "\020" \ "\001OPEN" \ "\002DIRTY" \ "\003SCTX_INIT" typedef enum { NDA_Q_4K = 0x01, NDA_Q_NONE = 0x00, } nda_quirks; #define NDA_Q_BIT_STRING \ "\020" \ "\001Bit 0" typedef enum { NDA_CCB_BUFFER_IO = 0x01, NDA_CCB_DUMP = 0x02, NDA_CCB_TRIM = 0x03, NDA_CCB_PASS = 0x04, NDA_CCB_TYPE_MASK = 0x0F, } nda_ccb_state; /* Offsets into our private area for storing information */ #define ccb_state ccb_h.ppriv_field0 #define ccb_bp ccb_h.ppriv_ptr1 /* For NDA_CCB_BUFFER_IO */ #define ccb_trim ccb_h.ppriv_ptr1 /* For NDA_CCB_TRIM */ struct nda_softc { struct cam_iosched_softc *cam_iosched; int outstanding_cmds; /* Number of active commands */ int refcount; /* Active xpt_action() calls */ nda_state state; nda_flags flags; nda_quirks quirks; int unmappedio; quad_t deletes; uint32_t nsid; /* Namespace ID for this nda device */ struct disk *disk; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; uint64_t trim_count; uint64_t trim_ranges; uint64_t trim_lbas; #ifdef CAM_TEST_FAILURE int force_read_error; int force_write_error; int periodic_read_error; int periodic_read_count; #endif #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int timeouts; u_int errors; u_int invalidations; #endif }; struct nda_trim_request { struct nvme_dsm_range dsm[NVME_MAX_DSM_TRIM / sizeof(struct nvme_dsm_range)]; TAILQ_HEAD(, bio) bps; }; _Static_assert(NVME_MAX_DSM_TRIM % sizeof(struct nvme_dsm_range) == 0, "NVME_MAX_DSM_TRIM must be an integral number of ranges"); /* Need quirk table */ static disk_ioctl_t ndaioctl; static disk_strategy_t ndastrategy; static dumper_t ndadump; static periph_init_t ndainit; static void ndaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void ndasysctlinit(void *context, int pending); static int ndaflagssysctl(SYSCTL_HANDLER_ARGS); static periph_ctor_t ndaregister; static periph_dtor_t ndacleanup; static periph_start_t ndastart; static periph_oninv_t ndaoninvalidate; static void ndadone(struct cam_periph *periph, union ccb *done_ccb); static int ndaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void ndashutdown(void *arg, int howto); static void ndasuspend(void *arg); #ifndef NDA_DEFAULT_SEND_ORDERED #define NDA_DEFAULT_SEND_ORDERED 1 #endif #ifndef NDA_DEFAULT_TIMEOUT #define NDA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ #endif #ifndef NDA_DEFAULT_RETRY #define NDA_DEFAULT_RETRY 4 #endif #ifndef NDA_MAX_TRIM_ENTRIES #define NDA_MAX_TRIM_ENTRIES (NVME_MAX_DSM_TRIM / sizeof(struct nvme_dsm_range))/* Number of DSM trims to use, max 256 */ #endif static SYSCTL_NODE(_kern_cam, OID_AUTO, nda, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Direct Access Disk driver"); //static int nda_retry_count = NDA_DEFAULT_RETRY; static int nda_send_ordered = NDA_DEFAULT_SEND_ORDERED; static int nda_default_timeout = NDA_DEFAULT_TIMEOUT; static int nda_max_trim_entries = NDA_MAX_TRIM_ENTRIES; static int nda_enable_biospeedup = 1; static int nda_nvd_compat = 1; SYSCTL_INT(_kern_cam_nda, OID_AUTO, max_trim, CTLFLAG_RDTUN, &nda_max_trim_entries, NDA_MAX_TRIM_ENTRIES, "Maximum number of BIO_DELETE to send down as a DSM TRIM."); SYSCTL_INT(_kern_cam_nda, OID_AUTO, enable_biospeedup, CTLFLAG_RDTUN, &nda_enable_biospeedup, 0, "Enable BIO_SPEEDUP processing."); SYSCTL_INT(_kern_cam_nda, OID_AUTO, nvd_compat, CTLFLAG_RDTUN, &nda_nvd_compat, 1, "Enable creation of nvd aliases."); /* * All NVMe media is non-rotational, so all nvme device instances * share this to implement the sysctl. */ static int nda_rotating_media = 0; static struct periph_driver ndadriver = { ndainit, "nda", TAILQ_HEAD_INITIALIZER(ndadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(nda, ndadriver); static MALLOC_DEFINE(M_NVMEDA, "nvme_da", "nvme_da buffers"); /* * nice wrappers. Maybe these belong in nvme_all.c instead of * here, but this is the only place that uses these. Should * we ever grow another NVME periph, we should move them * all there wholesale. */ static void nda_nvme_flush(struct nda_softc *softc, struct ccb_nvmeio *nvmeio) { cam_fill_nvmeio(nvmeio, 0, /* retries */ ndadone, /* cbfcnp */ CAM_DIR_NONE, /* flags */ NULL, /* data_ptr */ 0, /* dxfer_len */ nda_default_timeout * 1000); /* timeout 30s */ nvme_ns_flush_cmd(&nvmeio->cmd, softc->nsid); } static void nda_nvme_trim(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, void *payload, uint32_t num_ranges) { cam_fill_nvmeio(nvmeio, 0, /* retries */ ndadone, /* cbfcnp */ CAM_DIR_OUT, /* flags */ payload, /* data_ptr */ num_ranges * sizeof(struct nvme_dsm_range), /* dxfer_len */ nda_default_timeout * 1000); /* timeout 30s */ nvme_ns_trim_cmd(&nvmeio->cmd, softc->nsid, num_ranges); } static void nda_nvme_write(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, void *payload, uint64_t lba, uint32_t len, uint32_t count) { cam_fill_nvmeio(nvmeio, 0, /* retries */ ndadone, /* cbfcnp */ CAM_DIR_OUT, /* flags */ payload, /* data_ptr */ len, /* dxfer_len */ nda_default_timeout * 1000); /* timeout 30s */ nvme_ns_write_cmd(&nvmeio->cmd, softc->nsid, lba, count); } static void nda_nvme_rw_bio(struct nda_softc *softc, struct ccb_nvmeio *nvmeio, struct bio *bp, uint32_t rwcmd) { int flags = rwcmd == NVME_OPC_READ ? CAM_DIR_IN : CAM_DIR_OUT; void *payload; uint64_t lba; uint32_t count; if (bp->bio_flags & BIO_UNMAPPED) { flags |= CAM_DATA_BIO; payload = bp; } else { payload = bp->bio_data; } lba = bp->bio_pblkno; count = bp->bio_bcount / softc->disk->d_sectorsize; cam_fill_nvmeio(nvmeio, 0, /* retries */ ndadone, /* cbfcnp */ flags, /* flags */ payload, /* data_ptr */ bp->bio_bcount, /* dxfer_len */ nda_default_timeout * 1000); /* timeout 30s */ nvme_ns_rw_cmd(&nvmeio->cmd, rwcmd, softc->nsid, lba, count); } static int ndaopen(struct disk *dp) { struct cam_periph *periph; struct nda_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; if (cam_periph_acquire(periph) != 0) { return(ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("ndaopen\n")); softc = (struct nda_softc *)periph->softc; softc->flags |= NDA_FLAG_OPEN; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int ndaclose(struct disk *dp) { struct cam_periph *periph; struct nda_softc *softc; union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct nda_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("ndaclose\n")); if ((softc->flags & NDA_FLAG_DIRTY) != 0 && (periph->flags & CAM_PERIPH_INVALID) == 0 && cam_periph_hold(periph, PRIBIO) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); nda_nvme_flush(softc, &ccb->nvmeio); error = cam_periph_runccb(ccb, ndaerror, /*cam_flags*/0, /*sense_flags*/0, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); else softc->flags &= ~NDA_FLAG_DIRTY; xpt_release_ccb(ccb); cam_periph_unhold(periph); } softc->flags &= ~NDA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "ndaclose", 1); KASSERT(softc->outstanding_cmds == 0, ("nda %d outstanding commands", softc->outstanding_cmds)); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void ndaschedule(struct cam_periph *periph) { struct nda_softc *softc = (struct nda_softc *)periph->softc; if (softc->state != NDA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } static int ndaioctl(struct disk *dp, u_long cmd, void *data, int fflag, struct thread *td) { struct cam_periph *periph; - struct nda_softc *softc; periph = (struct cam_periph *)dp->d_drv1; - softc = (struct nda_softc *)periph->softc; switch (cmd) { case NVME_IO_TEST: case NVME_BIO_TEST: /* * These don't map well to the underlying CCBs, so * they are usupported via CAM. */ return (ENOTTY); case NVME_GET_NSID: { struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)data; struct ccb_pathinq cpi; xpt_path_inq(&cpi, periph->path); strncpy(gnsid->cdev, cpi.xport_specific.nvme.dev_name, sizeof(gnsid->cdev)); gnsid->nsid = cpi.xport_specific.nvme.nsid; return (0); } case NVME_PASSTHROUGH_CMD: { struct nvme_pt_command *pt; union ccb *ccb; struct cam_periph_map_info mapinfo; u_int maxmap = dp->d_maxsize; int error; /* * Create a NVME_IO CCB to do the passthrough command. */ pt = (struct nvme_pt_command *)data; ccb = xpt_alloc_ccb(); xpt_setup_ccb(&ccb->ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccb->ccb_state = NDA_CCB_PASS; cam_fill_nvmeio(&ccb->nvmeio, 0, /* Retries */ ndadone, (pt->is_read ? CAM_DIR_IN : CAM_DIR_OUT) | CAM_DATA_VADDR, pt->buf, pt->len, nda_default_timeout * 1000); memcpy(&ccb->nvmeio.cmd, &pt->cmd, sizeof(pt->cmd)); /* * Wire the user memory in this request for the I/O */ memset(&mapinfo, 0, sizeof(mapinfo)); error = cam_periph_mapmem(ccb, &mapinfo, maxmap); if (error) goto out; /* * Lock the periph and run the command. */ cam_periph_lock(periph); cam_periph_runccb(ccb, NULL, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT, NULL); /* * Tear down mapping and return status. */ cam_periph_unlock(periph); cam_periph_unmapmem(ccb, &mapinfo); error = (ccb->ccb_h.status == CAM_REQ_CMP) ? 0 : EIO; out: cam_periph_lock(periph); xpt_release_ccb(ccb); cam_periph_unlock(periph); return (error); } default: break; } return (ENOTTY); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void ndastrategy(struct bio *bp) { struct cam_periph *periph; struct nda_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct nda_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastrategy(%p)\n", bp)); /* * If the device has been made invalid, error out */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } if (bp->bio_cmd == BIO_DELETE) softc->deletes++; /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ ndaschedule(periph); cam_periph_unlock(periph); return; } static int ndadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct nda_softc *softc; u_int secsize; struct ccb_nvmeio nvmeio; struct disk *dp; uint64_t lba; uint32_t count; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct nda_softc *)periph->softc; secsize = softc->disk->d_sectorsize; lba = offset / secsize; count = length / secsize; if ((periph->flags & CAM_PERIPH_INVALID) != 0) return (ENXIO); /* xpt_get_ccb returns a zero'd allocation for the ccb, mimic that here */ memset(&nvmeio, 0, sizeof(nvmeio)); if (length > 0) { xpt_setup_ccb(&nvmeio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); nvmeio.ccb_state = NDA_CCB_DUMP; nda_nvme_write(softc, &nvmeio, virtual, lba, length, count); error = cam_periph_runccb((union ccb *)&nvmeio, cam_periph_error, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) printf("Aborting dump due to I/O error %d.\n", error); return (error); } /* Flush */ xpt_setup_ccb(&nvmeio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); nvmeio.ccb_state = NDA_CCB_DUMP; nda_nvme_flush(softc, &nvmeio); error = cam_periph_runccb((union ccb *)&nvmeio, cam_periph_error, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) xpt_print(periph->path, "flush cmd failed\n"); return (error); } static void ndainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, ndaasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("nda: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (nda_send_ordered) { /* Register our event handlers */ if ((EVENTHANDLER_REGISTER(power_suspend, ndasuspend, NULL, EVENTHANDLER_PRI_LAST)) == NULL) printf("ndainit: power event registration failed!\n"); if ((EVENTHANDLER_REGISTER(shutdown_post_sync, ndashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("ndainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void ndadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void ndaoninvalidate(struct cam_periph *periph) { struct nda_softc *softc; softc = (struct nda_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, ndaasync, periph, periph->path); #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); disk_gone(softc->disk); } static void ndacleanup(struct cam_periph *periph) { struct nda_softc *softc; softc = (struct nda_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & NDA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } disk_destroy(softc->disk); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void ndaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_NVME) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(ndaregister, ndaoninvalidate, ndacleanup, ndastart, "nda", CAM_PERIPH_BIO, path, ndaasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("ndaasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct nda_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_LOST_DEVICE: default: cam_periph_async(periph, code, path, arg); break; } } static void ndasysctlinit(void *context, int pending) { struct cam_periph *periph; struct nda_softc *softc; char tmpstr[32], tmpstr2[16]; periph = (struct cam_periph *)context; /* periph was held for us when this task was enqueued */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_release(periph); return; } softc = (struct nda_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM NDA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= NDA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_nda), OID_AUTO, tmpstr2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("ndasysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_QUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "deletes", CTLFLAG_RD, &softc->deletes, "Number of BIO_DELETE requests"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_count", CTLFLAG_RD, &softc->trim_count, "Total number of unmap/dsm commands sent"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_ranges", CTLFLAG_RD, &softc->trim_ranges, "Total number of ranges in unmap/dsm commands"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_lbas", CTLFLAG_RD, &softc->trim_lbas, "Total lbas in the unmap/dsm commands sent"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD, &nda_rotating_media, 1, "Rotating media"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 0, ndaflagssysctl, "A", "Flags for drive"); #ifdef CAM_IO_STATS softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Statistics"); if (softc->sysctl_stats_tree == NULL) { printf("ndasysctlinit: unable to allocate sysctl tree for stats\n"); cam_periph_release(periph); return; } SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD, &softc->errors, 0, "Transport errors reported by the SIM."); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD, &softc->invalidations, 0, "Device pack invalidations."); #endif #ifdef CAM_TEST_FAILURE SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, periph, 0, cam_periph_invalidate_sysctl, "I", "Write 1 to invalidate the drive immediately"); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); cam_periph_release(periph); } static int ndaflagssysctl(SYSCTL_HANDLER_ARGS) { struct sbuf sbuf; struct nda_softc *softc = arg1; int error; sbuf_new_for_sysctl(&sbuf, NULL, 0, req); if (softc->flags != 0) sbuf_printf(&sbuf, "0x%b", (unsigned)softc->flags, NDA_FLAG_STRING); else sbuf_printf(&sbuf, "0"); error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } static int ndagetattr(struct bio *bp) { int ret; struct cam_periph *periph; if (g_handleattr_int(bp, "GEOM::canspeedup", nda_enable_biospeedup)) return (EJUSTRETURN); periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static cam_status ndaregister(struct cam_periph *periph, void *arg) { struct nda_softc *softc; struct disk *disk; struct ccb_pathinq cpi; const struct nvme_namespace_data *nsd; const struct nvme_controller_data *cd; char announce_buf[80]; uint8_t flbas_fmt, lbads, vwc_present; u_int maxio; int quirks; nsd = nvme_get_identify_ns(periph); cd = nvme_get_identify_cntrl(periph); softc = (struct nda_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("ndaregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("ndaregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); free(softc, M_DEVBUF); return(CAM_REQ_CMP_ERR); } /* ident_data parsing */ periph->softc = softc; softc->quirks = NDA_Q_NONE; xpt_path_inq(&cpi, periph->path); TASK_INIT(&softc->sysctl_task, 0, ndasysctlinit, periph); /* * The name space ID is the lun, save it for later I/O */ softc->nsid = (uint32_t)xpt_path_lun_id(periph->path); /* * Register this media as a disk */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); snprintf(announce_buf, sizeof(announce_buf), "kern.cam.nda.%d.quirks", periph->unit_number); quirks = softc->quirks; TUNABLE_INT_FETCH(announce_buf, &quirks); softc->quirks = quirks; cam_iosched_set_sort_queue(softc->cam_iosched, 0); softc->disk = disk = disk_alloc(); disk->d_rotation_rate = DISK_RR_NON_ROTATING; disk->d_open = ndaopen; disk->d_close = ndaclose; disk->d_strategy = ndastrategy; disk->d_ioctl = ndaioctl; disk->d_getattr = ndagetattr; if (cam_sim_pollable(periph->sim)) disk->d_dump = ndadump; disk->d_gone = ndadiskgonecb; disk->d_name = "nda"; disk->d_drv1 = periph; disk->d_unit = periph->unit_number; maxio = cpi.maxio; /* Honor max I/O size of SIM */ if (maxio == 0) maxio = DFLTPHYS; /* traditional default */ else if (maxio > maxphys) maxio = maxphys; /* for safety */ disk->d_maxsize = maxio; flbas_fmt = (nsd->flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) & NVME_NS_DATA_FLBAS_FORMAT_MASK; lbads = (nsd->lbaf[flbas_fmt] >> NVME_NS_DATA_LBAF_LBADS_SHIFT) & NVME_NS_DATA_LBAF_LBADS_MASK; disk->d_sectorsize = 1 << lbads; disk->d_mediasize = (off_t)(disk->d_sectorsize * nsd->nsze); disk->d_delmaxsize = disk->d_mediasize; disk->d_flags = DISKFLAG_DIRECT_COMPLETION; if (nvme_ctrlr_has_dataset_mgmt(cd)) disk->d_flags |= DISKFLAG_CANDELETE; vwc_present = (cd->vwc >> NVME_CTRLR_DATA_VWC_PRESENT_SHIFT) & NVME_CTRLR_DATA_VWC_PRESENT_MASK; if (vwc_present) disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { disk->d_flags |= DISKFLAG_UNMAPPED_BIO; softc->unmappedio = 1; } /* * d_ident and d_descr are both far bigger than the length of either * the serial or model number strings. */ cam_strvis(disk->d_descr, cd->mn, NVME_MODEL_NUMBER_LENGTH, sizeof(disk->d_descr)); cam_strvis(disk->d_ident, cd->sn, NVME_SERIAL_NUMBER_LENGTH, sizeof(disk->d_ident)); disk->d_hba_vendor = cpi.hba_vendor; disk->d_hba_device = cpi.hba_device; disk->d_hba_subvendor = cpi.hba_subvendor; disk->d_hba_subdevice = cpi.hba_subdevice; snprintf(disk->d_attachment, sizeof(disk->d_attachment), "%s%d", cpi.dev_name, cpi.unit_number); if (((nsd->nsfeat >> NVME_NS_DATA_NSFEAT_NPVALID_SHIFT) & NVME_NS_DATA_NSFEAT_NPVALID_MASK) != 0 && nsd->npwg != 0) disk->d_stripesize = ((nsd->npwg + 1) * disk->d_sectorsize); else disk->d_stripesize = nsd->noiob * disk->d_sectorsize; disk->d_stripeoffset = 0; disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, disk->d_sectorsize, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); /* * Add alias for older nvd drives to ease transition. */ if (nda_nvd_compat) disk_add_alias(disk, "nvd"); /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * ndadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); cam_periph_unhold(periph); snprintf(announce_buf, sizeof(announce_buf), "%juMB (%ju %u byte sectors)", (uintmax_t)((uintmax_t)disk->d_mediasize / (1024*1024)), (uintmax_t)disk->d_mediasize / disk->d_sectorsize, disk->d_sectorsize); xpt_announce_periph(periph, announce_buf); xpt_announce_quirks(periph, softc->quirks, NDA_Q_BIT_STRING); /* * Create our sysctl variables, now that we know * we have successfully attached. */ if (cam_periph_acquire(periph) == 0) taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); /* * Register for device going away and info about the drive * changing (though with NVMe, it can't) */ xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED, ndaasync, periph, periph->path); softc->state = NDA_STATE_NORMAL; return(CAM_REQ_CMP); } static void ndastart(struct cam_periph *periph, union ccb *start_ccb) { struct nda_softc *softc = (struct nda_softc *)periph->softc; struct ccb_nvmeio *nvmeio = &start_ccb->nvmeio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastart\n")); switch (softc->state) { case NDA_STATE_NORMAL: { struct bio *bp; bp = cam_iosched_next_bio(softc->cam_iosched); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ndastart: bio %p\n", bp)); if (bp == NULL) { xpt_release_ccb(start_ccb); break; } switch (bp->bio_cmd) { case BIO_WRITE: softc->flags |= NDA_FLAG_DIRTY; /* FALLTHROUGH */ case BIO_READ: { #ifdef CAM_TEST_FAILURE int fail = 0; /* * Support the failure ioctls. If the command is a * read, and there are pending forced read errors, or * if a write and pending write errors, then fail this * operation with EIO. This is useful for testing * purposes. Also, support having every Nth read fail. * * This is a rather blunt tool. */ if (bp->bio_cmd == BIO_READ) { if (softc->force_read_error) { softc->force_read_error--; fail = 1; } if (softc->periodic_read_error > 0) { if (++softc->periodic_read_count >= softc->periodic_read_error) { softc->periodic_read_count = 0; fail = 1; } } } else { if (softc->force_write_error) { softc->force_write_error--; fail = 1; } } if (fail) { biofinish(bp, NULL, EIO); xpt_release_ccb(start_ccb); ndaschedule(periph); return; } #endif KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || round_page(bp->bio_bcount + bp->bio_ma_offset) / PAGE_SIZE == bp->bio_ma_n, ("Short bio %p", bp)); nda_nvme_rw_bio(softc, &start_ccb->nvmeio, bp, bp->bio_cmd == BIO_READ ? NVME_OPC_READ : NVME_OPC_WRITE); break; } case BIO_DELETE: { struct nvme_dsm_range *dsm_range, *dsm_end; struct nda_trim_request *trim; struct bio *bp1; int ents; uint32_t totalcount = 0, ranges = 0; trim = malloc(sizeof(*trim), M_NVMEDA, M_ZERO | M_NOWAIT); if (trim == NULL) { biofinish(bp, NULL, ENOMEM); xpt_release_ccb(start_ccb); ndaschedule(periph); return; } TAILQ_INIT(&trim->bps); bp1 = bp; ents = min(nitems(trim->dsm), nda_max_trim_entries); ents = max(ents, 1); dsm_range = trim->dsm; dsm_end = dsm_range + ents; do { TAILQ_INSERT_TAIL(&trim->bps, bp1, bio_queue); dsm_range->length = htole32(bp1->bio_bcount / softc->disk->d_sectorsize); dsm_range->starting_lba = htole64(bp1->bio_offset / softc->disk->d_sectorsize); ranges++; totalcount += dsm_range->length; dsm_range++; if (dsm_range >= dsm_end) break; bp1 = cam_iosched_next_trim(softc->cam_iosched); /* XXX -- Could collapse adjacent ranges, but we don't for now */ /* XXX -- Could limit based on total payload size */ } while (bp1 != NULL); start_ccb->ccb_trim = trim; nda_nvme_trim(softc, &start_ccb->nvmeio, trim->dsm, dsm_range - trim->dsm); start_ccb->ccb_state = NDA_CCB_TRIM; softc->trim_count++; softc->trim_ranges += ranges; softc->trim_lbas += totalcount; /* * Note: We can have multiple TRIMs in flight, so we don't call * cam_iosched_submit_trim(softc->cam_iosched); * since that forces the I/O scheduler to only schedule one at a time. * On NVMe drives, this is a performance disaster. */ goto out; } case BIO_FLUSH: nda_nvme_flush(softc, nvmeio); break; default: biofinish(bp, NULL, EOPNOTSUPP); xpt_release_ccb(start_ccb); ndaschedule(periph); return; } start_ccb->ccb_state = NDA_CCB_BUFFER_IO; start_ccb->ccb_bp = bp; out: start_ccb->ccb_h.flags |= CAM_UNLOCKED; softc->outstanding_cmds++; softc->refcount++; /* For submission only */ cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* Submission done */ /* May have more work to do, so ensure we stay scheduled */ ndaschedule(periph); break; } } } static void ndadone(struct cam_periph *periph, union ccb *done_ccb) { struct nda_softc *softc; struct ccb_nvmeio *nvmeio = &done_ccb->nvmeio; struct cam_path *path; int state; softc = (struct nda_softc *)periph->softc; path = done_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("ndadone\n")); state = nvmeio->ccb_state & NDA_CCB_TYPE_MASK; switch (state) { case NDA_CCB_BUFFER_IO: case NDA_CCB_TRIM: { int error; cam_periph_lock(periph); if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = ndaerror(done_ccb, 0, 0); if (error == ERESTART) { /* A retry was scheduled, so just return. */ cam_periph_unlock(periph); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); error = 0; } if (state == NDA_CCB_BUFFER_IO) { struct bio *bp; bp = (struct bio *)done_ccb->ccb_bp; bp->bio_error = error; if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { bp->bio_resid = 0; } softc->outstanding_cmds--; /* * We need to call cam_iosched before we call biodone so that we * don't measure any activity that happens in the completion * routine, which in the case of sendfile can be quite * extensive. */ cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); ndaschedule(periph); cam_periph_unlock(periph); biodone(bp); } else { /* state == NDA_CCB_TRIM */ struct nda_trim_request *trim; struct bio *bp1, *bp2; TAILQ_HEAD(, bio) queue; trim = nvmeio->ccb_trim; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &trim->bps, bio_queue); free(trim, M_NVMEDA); /* * Since we can have multiple trims in flight, we don't * need to call this here. * cam_iosched_trim_done(softc->cam_iosched); */ /* * The the I/O scheduler that we're finishing the I/O * so we can keep book. The first one we pass in the CCB * which has the timing information. The rest we pass in NULL * so we can keep proper counts. */ bp1 = TAILQ_FIRST(&queue); cam_iosched_bio_complete(softc->cam_iosched, bp1, done_ccb); xpt_release_ccb(done_ccb); softc->outstanding_cmds--; ndaschedule(periph); cam_periph_unlock(periph); while ((bp2 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp2, bio_queue); bp2->bio_error = error; if (error != 0) { bp2->bio_flags |= BIO_ERROR; bp2->bio_resid = bp1->bio_bcount; } else bp2->bio_resid = 0; if (bp1 != bp2) cam_iosched_bio_complete(softc->cam_iosched, bp2, NULL); biodone(bp2); } } return; } case NDA_CCB_DUMP: /* No-op. We're polling */ return; case NDA_CCB_PASS: /* NVME_PASSTHROUGH_CMD runs this CCB and releases it */ return; default: break; } xpt_release_ccb(done_ccb); } static int ndaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { - struct nda_softc *softc; - struct cam_periph *periph; - - periph = xpt_path_periph(ccb->ccb_h.path); - softc = (struct nda_softc *)periph->softc; switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: #ifdef CAM_IO_STATS softc->timeouts++; #endif break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: case CAM_ATA_STATUS_ERROR: #ifdef CAM_IO_STATS softc->errors++; #endif break; default: break; } return(cam_periph_error(ccb, cam_flags, sense_flags)); } /* * Step through all NDA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void ndaflush(void) { struct cam_periph *periph; struct nda_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &ndadriver) { softc = (struct nda_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* * If we paniced with the lock held or the periph is not * open, do not recurse. Otherwise, call ndadump since * that avoids the sleeping cam_periph_getccb does if no * CCBs are available. */ if (!cam_periph_owned(periph) && (softc->flags & NDA_FLAG_OPEN)) { ndadump(softc->disk, NULL, 0, 0, 0); } continue; } /* * We only sync the cache if the drive is still open */ cam_periph_lock(periph); if ((softc->flags & NDA_FLAG_OPEN) == 0) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); nda_nvme_flush(softc, &ccb->nvmeio); error = cam_periph_runccb(ccb, ndaerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } static void ndashutdown(void *arg, int howto) { ndaflush(); } static void ndasuspend(void *arg) { ndaflush(); } diff --git a/sys/cam/nvme/nvme_xpt.c b/sys/cam/nvme/nvme_xpt.c index 22d984d02038..4e68330e63e7 100644 --- a/sys/cam/nvme/nvme_xpt.c +++ b/sys/cam/nvme/nvme_xpt.c @@ -1,856 +1,852 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015 Netflix, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for xpt_print below */ #include "opt_cam.h" struct nvme_quirk_entry { u_int quirks; #define CAM_QUIRK_MAXTAGS 1 u_int mintags; u_int maxtags; }; /* Not even sure why we need this */ static periph_init_t nvme_probe_periph_init; static struct periph_driver nvme_probe_driver = { nvme_probe_periph_init, "nvme_probe", TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver); typedef enum { NVME_PROBE_IDENTIFY_CD, NVME_PROBE_IDENTIFY_NS, NVME_PROBE_DONE, NVME_PROBE_INVALID } nvme_probe_action; static char *nvme_probe_action_text[] = { "NVME_PROBE_IDENTIFY_CD", "NVME_PROBE_IDENTIFY_NS", "NVME_PROBE_DONE", "NVME_PROBE_INVALID" }; #define NVME_PROBE_SET_ACTION(softc, newaction) \ do { \ char **text; \ text = nvme_probe_action_text; \ CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \ ("Probe %s to %s\n", text[(softc)->action], \ text[(newaction)])); \ (softc)->action = (newaction); \ } while(0) typedef enum { NVME_PROBE_NO_ANNOUNCE = 0x04 } nvme_probe_flags; typedef struct { TAILQ_HEAD(, ccb_hdr) request_ccbs; union { struct nvme_controller_data cd; struct nvme_namespace_data ns; }; nvme_probe_action action; nvme_probe_flags flags; int restart; struct cam_periph *periph; } nvme_probe_softc; static struct nvme_quirk_entry nvme_quirk_table[] = { { // { // T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, // /*vendor*/"*", /*product*/"*", /*revision*/"*" // }, .quirks = 0, .mintags = 0, .maxtags = 0 }, }; static const int nvme_quirk_table_size = sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table); static cam_status nvme_probe_register(struct cam_periph *periph, void *arg); static void nvme_probe_schedule(struct cam_periph *nvme_probe_periph); static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb); static void nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb); static void nvme_probe_cleanup(struct cam_periph *periph); //static void nvme_find_quirk(struct cam_ed *device); static void nvme_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *ccb); static struct cam_ed * nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static void nvme_device_transport(struct cam_path *path); static void nvme_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static void nvme_action(union ccb *start_ccb); static void nvme_announce_periph(struct cam_periph *periph); static void nvme_proto_announce(struct cam_ed *device); static void nvme_proto_denounce(struct cam_ed *device); static void nvme_proto_debug_out(union ccb *ccb); static struct xpt_xport_ops nvme_xport_ops = { .alloc_device = nvme_alloc_device, .action = nvme_action, .async = nvme_dev_async, .announce = nvme_announce_periph, }; #define NVME_XPT_XPORT(x, X) \ static struct xpt_xport nvme_xport_ ## x = { \ .xport = XPORT_ ## X, \ .name = #x, \ .ops = &nvme_xport_ops, \ }; \ CAM_XPT_XPORT(nvme_xport_ ## x); NVME_XPT_XPORT(nvme, NVME); #undef NVME_XPT_XPORT static struct xpt_proto_ops nvme_proto_ops = { .announce = nvme_proto_announce, .denounce = nvme_proto_denounce, .debug_out = nvme_proto_debug_out, }; static struct xpt_proto nvme_proto = { .proto = PROTO_NVME, .name = "nvme", .ops = &nvme_proto_ops, }; CAM_XPT_PROTO(nvme_proto); static void nvme_probe_periph_init(void) { } static cam_status nvme_probe_register(struct cam_periph *periph, void *arg) { union ccb *request_ccb; /* CCB representing the probe request */ nvme_probe_softc *softc; request_ccb = (union ccb *)arg; if (request_ccb == NULL) { printf("nvme_probe_register: no probe CCB, " "can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT); if (softc == NULL) { printf("nvme_probe_register: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } TAILQ_INIT(&softc->request_ccbs); TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->flags = 0; periph->softc = softc; softc->periph = periph; softc->action = NVME_PROBE_INVALID; if (cam_periph_acquire(periph) != 0) return (CAM_REQ_CMP_ERR); CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n")); // nvme_device_transport(periph->path); nvme_probe_schedule(periph); return(CAM_REQ_CMP); } static void nvme_probe_schedule(struct cam_periph *periph) { union ccb *ccb; nvme_probe_softc *softc; softc = (nvme_probe_softc *)periph->softc; ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD); if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) softc->flags |= NVME_PROBE_NO_ANNOUNCE; else softc->flags &= ~NVME_PROBE_NO_ANNOUNCE; xpt_schedule(periph, CAM_PRIORITY_XPT); } static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb) { struct ccb_nvmeio *nvmeio; nvme_probe_softc *softc; - struct cam_path *path; lun_id_t lun; CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n")); softc = (nvme_probe_softc *)periph->softc; - path = start_ccb->ccb_h.path; nvmeio = &start_ccb->nvmeio; lun = xpt_path_lun_id(periph->path); if (softc->restart) { softc->restart = 0; NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD); } switch (softc->action) { case NVME_PROBE_IDENTIFY_CD: cam_fill_nvmeadmin(nvmeio, 0, /* retries */ nvme_probe_done, /* cbfcnp */ CAM_DIR_IN, /* flags */ (uint8_t *)&softc->cd, /* data_ptr */ sizeof(softc->cd), /* dxfer_len */ 30 * 1000); /* timeout 30s */ nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, 0, 1, 0, 0, 0, 0, 0); break; case NVME_PROBE_IDENTIFY_NS: cam_fill_nvmeadmin(nvmeio, 0, /* retries */ nvme_probe_done, /* cbfcnp */ CAM_DIR_IN, /* flags */ (uint8_t *)&softc->ns, /* data_ptr */ sizeof(softc->ns), /* dxfer_len */ 30 * 1000); /* timeout 30s */ nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, lun, 0, 0, 0, 0, 0, 0); break; default: panic("nvme_probe_start: invalid action state 0x%x\n", softc->action); } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; xpt_action(start_ccb); } static void nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb) { struct nvme_namespace_data *nvme_data; struct nvme_controller_data *nvme_cdata; nvme_probe_softc *softc; struct cam_path *path; struct scsi_vpd_device_id *did; struct scsi_vpd_id_descriptor *idd; - cam_status status; u_int32_t priority; int found = 1, e, g, len; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_done\n")); softc = (nvme_probe_softc *)periph->softc; path = done_ccb->ccb_h.path; priority = done_ccb->ccb_h.pinfo.priority; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cam_periph_error(done_ccb, 0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0 ) == ERESTART) { out: /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge the queue */ xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); } - status = done_ccb->ccb_h.status & CAM_STATUS_MASK; /* * If we get to this point, we got an error status back * from the inquiry and the error status doesn't require * automatically retrying the command. Therefore, the * inquiry failed. If we had inquiry information before * for this device, but this latest inquiry command failed, * the device has probably gone away. If this device isn't * already marked unconfigured, notify the peripheral * drivers that this device is no more. */ device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) xpt_async(AC_LOST_DEVICE, path, NULL); NVME_PROBE_SET_ACTION(softc, NVME_PROBE_INVALID); found = 0; goto done; } if (softc->restart) goto done; switch (softc->action) { case NVME_PROBE_IDENTIFY_CD: nvme_controller_data_swapbytes(&softc->cd); nvme_cdata = path->device->nvme_cdata; if (nvme_cdata == NULL) { nvme_cdata = malloc(sizeof(*nvme_cdata), M_CAMXPT, M_NOWAIT); if (nvme_cdata == NULL) { xpt_print(path, "Can't allocate memory"); goto device_fail; } } bcopy(&softc->cd, nvme_cdata, sizeof(*nvme_cdata)); path->device->nvme_cdata = nvme_cdata; /* Save/update serial number. */ if (path->device->serial_num != NULL) { free(path->device->serial_num, M_CAMXPT); path->device->serial_num = NULL; path->device->serial_num_len = 0; } path->device->serial_num = (u_int8_t *) malloc(NVME_SERIAL_NUMBER_LENGTH + 1, M_CAMXPT, M_NOWAIT); if (path->device->serial_num != NULL) { cam_strvis(path->device->serial_num, nvme_cdata->sn, NVME_SERIAL_NUMBER_LENGTH, NVME_SERIAL_NUMBER_LENGTH + 1); path->device->serial_num_len = strlen(path->device->serial_num); } // nvme_find_quirk(path->device); nvme_device_transport(path); NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_NS); xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); goto out; case NVME_PROBE_IDENTIFY_NS: nvme_namespace_data_swapbytes(&softc->ns); /* Check that the namespace exists. */ if (softc->ns.nsze == 0) goto device_fail; nvme_data = path->device->nvme_data; if (nvme_data == NULL) { nvme_data = malloc(sizeof(*nvme_data), M_CAMXPT, M_NOWAIT); if (nvme_data == NULL) { xpt_print(path, "Can't allocate memory"); goto device_fail; } } bcopy(&softc->ns, nvme_data, sizeof(*nvme_data)); path->device->nvme_data = nvme_data; /* Save/update device_id based on NGUID and/or EUI64. */ if (path->device->device_id != NULL) { free(path->device->device_id, M_CAMXPT); path->device->device_id = NULL; path->device->device_id_len = 0; } len = 0; for (g = 0; g < sizeof(nvme_data->nguid); g++) { if (nvme_data->nguid[g] != 0) break; } if (g < sizeof(nvme_data->nguid)) len += sizeof(struct scsi_vpd_id_descriptor) + 16; for (e = 0; e < sizeof(nvme_data->eui64); e++) { if (nvme_data->eui64[e] != 0) break; } if (e < sizeof(nvme_data->eui64)) len += sizeof(struct scsi_vpd_id_descriptor) + 8; if (len > 0) { path->device->device_id = (u_int8_t *) malloc(SVPD_DEVICE_ID_HDR_LEN + len, M_CAMXPT, M_NOWAIT); } if (path->device->device_id != NULL) { did = (struct scsi_vpd_device_id *)path->device->device_id; did->device = SID_QUAL_LU_CONNECTED | T_DIRECT; did->page_code = SVPD_DEVICE_ID; scsi_ulto2b(len, did->length); idd = (struct scsi_vpd_id_descriptor *)(did + 1); if (g < sizeof(nvme_data->nguid)) { idd->proto_codeset = SVPD_ID_CODESET_BINARY; idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; idd->length = 16; bcopy(nvme_data->nguid, idd->identifier, 16); idd = (struct scsi_vpd_id_descriptor *) &idd->identifier[16]; } if (e < sizeof(nvme_data->eui64)) { idd->proto_codeset = SVPD_ID_CODESET_BINARY; idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; idd->length = 8; bcopy(nvme_data->eui64, idd->identifier, 8); } path->device->device_id_len = SVPD_DEVICE_ID_HDR_LEN + len; } if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) { path->device->flags &= ~CAM_DEV_UNCONFIGURED; xpt_acquire_device(path->device); done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action(done_ccb); xpt_async(AC_FOUND_DEVICE, path, done_ccb); } NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE); break; default: panic("nvme_probe_done: invalid action state 0x%x\n", softc->action); } done: if (softc->restart) { softc->restart = 0; xpt_release_ccb(done_ccb); nvme_probe_schedule(periph); goto out; } xpt_release_ccb(done_ccb); CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n")); while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) { TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR; xpt_done(done_ccb); } /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ cam_release_devq(path, 0, 0, 0, FALSE); cam_periph_invalidate(periph); cam_periph_release_locked(periph); } static void nvme_probe_cleanup(struct cam_periph *periph) { free(periph->softc, M_CAMXPT); } #if 0 /* XXX should be used, don't delete */ static void nvme_find_quirk(struct cam_ed *device) { struct nvme_quirk_entry *quirk; caddr_t match; match = cam_quirkmatch((caddr_t)&device->nvme_data, (caddr_t)nvme_quirk_table, nvme_quirk_table_size, sizeof(*nvme_quirk_table), nvme_identify_match); if (match == NULL) panic("xpt_find_quirk: device didn't match wildcard entry!!"); quirk = (struct nvme_quirk_entry *)match; device->quirk = quirk; if (quirk->quirks & CAM_QUIRK_MAXTAGS) { device->mintags = quirk->mintags; device->maxtags = quirk->maxtags; } } #endif static void nvme_scan_lun(struct cam_periph *periph, struct cam_path *path, cam_flags flags, union ccb *request_ccb) { struct ccb_pathinq cpi; cam_status status; struct cam_periph *old_periph; int lock; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n")); xpt_path_inq(&cpi, path); if (cpi.ccb_h.status != CAM_REQ_CMP) { if (request_ccb != NULL) { request_ccb->ccb_h.status = cpi.ccb_h.status; xpt_done(request_ccb); } return; } if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n")); request_ccb->ccb_h.status = CAM_REQ_CMP; /* XXX signal error ? */ xpt_done(request_ccb); return; } lock = (xpt_path_owned(path) == 0); if (lock) xpt_path_lock(path); if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) { if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) { nvme_probe_softc *softc; softc = (nvme_probe_softc *)old_periph->softc; TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, periph_links.tqe); softc->restart = 1; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("restarting nvme_probe device\n")); } else { request_ccb->ccb_h.status = CAM_REQ_CMP_ERR; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Failing to restart nvme_probe device\n")); xpt_done(request_ccb); } } else { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("Adding nvme_probe device\n")); status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup, nvme_probe_start, "nvme_probe", CAM_PERIPH_BIO, request_ccb->ccb_h.path, NULL, 0, request_ccb); if (status != CAM_REQ_CMP) { xpt_print(path, "xpt_scan_lun: cam_alloc_periph " "returned an error, can't continue probe\n"); request_ccb->ccb_h.status = status; xpt_done(request_ccb); } } if (lock) xpt_path_unlock(path); } static struct cam_ed * nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct nvme_quirk_entry *quirk; struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); /* * Take the default quirk entry until we have inquiry * data from nvme and can determine a better quirk to use. */ quirk = &nvme_quirk_table[nvme_quirk_table_size - 1]; device->quirk = (void *)quirk; device->mintags = 0; device->maxtags = 0; device->inq_flags = 0; device->queue_flags = 0; device->device_id = NULL; device->device_id_len = 0; device->serial_num = NULL; device->serial_num_len = 0; return (device); } static void nvme_device_transport(struct cam_path *path) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; /* XXX get data from nvme namespace and other info ??? */ /* Get transport information from the SIM */ xpt_path_inq(&cpi, path); path->device->transport = cpi.transport; path->device->transport_version = cpi.transport_version; path->device->protocol = cpi.protocol; path->device->protocol_version = cpi.protocol_version; /* Tell the controller what we think */ memset(&cts, 0, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.transport = path->device->transport; cts.transport_version = path->device->transport_version; cts.protocol = path->device->protocol; cts.protocol_version = path->device->protocol_version; cts.proto_specific.valid = 0; cts.xport_specific.valid = 0; xpt_action((union ccb *)&cts); } static void nvme_dev_advinfo(union ccb *start_ccb) { struct cam_ed *device; struct ccb_dev_advinfo *cdai; off_t amt; xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED); start_ccb->ccb_h.status = CAM_REQ_INVALID; device = start_ccb->ccb_h.path->device; cdai = &start_ccb->cdai; switch(cdai->buftype) { case CDAI_TYPE_SCSI_DEVID: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->device_id_len; if (device->device_id_len == 0) break; amt = device->device_id_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->device_id, amt); break; case CDAI_TYPE_SERIAL_NUM: if (cdai->flags & CDAI_FLAG_STORE) return; cdai->provsiz = device->serial_num_len; if (device->serial_num_len == 0) break; amt = device->serial_num_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->serial_num, amt); break; case CDAI_TYPE_PHYS_PATH: if (cdai->flags & CDAI_FLAG_STORE) { if (device->physpath != NULL) { free(device->physpath, M_CAMXPT); device->physpath = NULL; device->physpath_len = 0; } /* Clear existing buffer if zero length */ if (cdai->bufsiz == 0) break; device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT); if (device->physpath == NULL) { start_ccb->ccb_h.status = CAM_REQ_ABORTED; return; } device->physpath_len = cdai->bufsiz; memcpy(device->physpath, cdai->buf, cdai->bufsiz); } else { cdai->provsiz = device->physpath_len; if (device->physpath_len == 0) break; amt = device->physpath_len; if (cdai->provsiz > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->physpath, amt); } break; case CDAI_TYPE_NVME_CNTRL: if (cdai->flags & CDAI_FLAG_STORE) return; amt = sizeof(struct nvme_controller_data); cdai->provsiz = amt; if (amt > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->nvme_cdata, amt); break; case CDAI_TYPE_NVME_NS: if (cdai->flags & CDAI_FLAG_STORE) return; amt = sizeof(struct nvme_namespace_data); cdai->provsiz = amt; if (amt > cdai->bufsiz) amt = cdai->bufsiz; memcpy(cdai->buf, device->nvme_data, amt); break; default: return; } start_ccb->ccb_h.status = CAM_REQ_CMP; if (cdai->flags & CDAI_FLAG_STORE) { xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path, (void *)(uintptr_t)cdai->buftype); } } static void nvme_action(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code)); switch (start_ccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_SCAN_TGT: case XPT_SCAN_LUN: nvme_scan_lun(start_ccb->ccb_h.path->periph, start_ccb->ccb_h.path, start_ccb->crcn.flags, start_ccb); break; case XPT_DEV_ADVINFO: nvme_dev_advinfo(start_ccb); break; default: xpt_action_default(start_ccb); break; } } /* * Handle any per-device event notifications that require action by the XPT. */ static void nvme_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; if (async_code == AC_LOST_DEVICE && (device->flags & CAM_DEV_UNCONFIGURED) == 0) { device->flags |= CAM_DEV_UNCONFIGURED; xpt_release_device(device); } } static void nvme_announce_periph(struct cam_periph *periph) { struct ccb_pathinq cpi; struct ccb_trans_settings cts; struct cam_path *path = periph->path; struct ccb_trans_settings_nvme *nvmex; struct sbuf sb; char buffer[120]; cam_periph_assert(periph, MA_OWNED); /* Ask the SIM for connection details */ memset(&cts, 0, sizeof(cts)); xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; xpt_action((union ccb*)&cts); if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) return; nvmex = &cts.xport_specific.nvme; /* Ask the SIM for its base transfer speed */ xpt_path_inq(&cpi, periph->path); sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN); sbuf_printf(&sb, "%s%d: nvme version %d.%d", periph->periph_name, periph->unit_number, NVME_MAJOR(nvmex->spec), NVME_MINOR(nvmex->spec)); if (nvmex->valid & CTS_NVME_VALID_LINK) sbuf_printf(&sb, " x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link", nvmex->lanes, nvmex->max_lanes, nvmex->speed, nvmex->max_speed); sbuf_printf(&sb, "\n"); sbuf_finish(&sb); sbuf_putbuf(&sb); } static void nvme_proto_announce(struct cam_ed *device) { struct sbuf sb; char buffer[120]; sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN); nvme_print_ident(device->nvme_cdata, device->nvme_data, &sb); sbuf_finish(&sb); sbuf_putbuf(&sb); } static void nvme_proto_denounce(struct cam_ed *device) { nvme_proto_announce(device); } static void nvme_proto_debug_out(union ccb *ccb) { char cdb_str[(sizeof(struct nvme_command) * 3) + 1]; if (ccb->ccb_h.func_code != XPT_NVME_IO && ccb->ccb_h.func_code != XPT_NVME_ADMIN) return; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd, ccb->ccb_h.func_code == XPT_NVME_ADMIN), nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str)))); } diff --git a/sys/cam/scsi/scsi_cd.c b/sys/cam/scsi/scsi_cd.c index 3cca4bbf243b..3f5cadb44fdc 100644 --- a/sys/cam/scsi/scsi_cd.c +++ b/sys/cam/scsi/scsi_cd.c @@ -1,4260 +1,4258 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Portions of this driver taken from the original FreeBSD cd driver. * Written by Julian Elischer (julian@tfs.com) * for TRW Financial Systems for use under the MACH(2.5) operating system. * * TRW Financial Systems, in accordance with their agreement with Carnegie * Mellon University, makes this software available to CMU to distribute * or use in any manner that they see fit as long as this message is kept with * the software. For this reason TFS also grants any other persons or * organisations permission to use or modify this software. * * TFS supplies this software to be publicly redistributed * on the understanding that TFS is not responsible for the correct * functioning of this software in any circumstances. * * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 * * from: cd.c,v 1.83 1997/05/04 15:24:22 joerg Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_cd.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LEADOUT 0xaa /* leadout toc entry */ struct cd_params { u_int32_t blksize; u_long disksize; }; typedef enum { CD_Q_NONE = 0x00, CD_Q_NO_TOUCH = 0x01, CD_Q_BCD_TRACKS = 0x02, CD_Q_10_BYTE_ONLY = 0x10, CD_Q_RETRY_BUSY = 0x40 } cd_quirks; #define CD_Q_BIT_STRING \ "\020" \ "\001NO_TOUCH" \ "\002BCD_TRACKS" \ "\00510_BYTE_ONLY" \ "\007RETRY_BUSY" typedef enum { CD_FLAG_INVALID = 0x0001, CD_FLAG_NEW_DISC = 0x0002, CD_FLAG_DISC_LOCKED = 0x0004, CD_FLAG_DISC_REMOVABLE = 0x0008, CD_FLAG_SAW_MEDIA = 0x0010, CD_FLAG_ACTIVE = 0x0080, CD_FLAG_SCHED_ON_COMP = 0x0100, CD_FLAG_RETRY_UA = 0x0200, CD_FLAG_VALID_MEDIA = 0x0400, CD_FLAG_VALID_TOC = 0x0800, CD_FLAG_SCTX_INIT = 0x1000, CD_FLAG_MEDIA_WAIT = 0x2000, CD_FLAG_MEDIA_SCAN_ACT = 0x4000 } cd_flags; typedef enum { CD_CCB_PROBE = 0x01, CD_CCB_BUFFER_IO = 0x02, CD_CCB_TUR = 0x03, CD_CCB_MEDIA_PREVENT = 0x04, CD_CCB_MEDIA_ALLOW = 0x05, CD_CCB_MEDIA_SIZE = 0x06, CD_CCB_MEDIA_TOC_HDR = 0x07, CD_CCB_MEDIA_TOC_FULL = 0x08, CD_CCB_MEDIA_TOC_LEAD = 0x09, CD_CCB_TYPE_MASK = 0x0F, CD_CCB_RETRY_UA = 0x10 } cd_ccb_state; #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 /* * According to the MMC-6 spec, 6.25.3.2.11, the lead-out is reported by * READ_TOC as logical track 170, so at most 169 tracks may be reported. */ struct cd_tocdata { struct ioc_toc_header header; struct cd_toc_entry entries[170]; }; struct cd_toc_single { struct ioc_toc_header header; struct cd_toc_entry entry; }; typedef enum { CD_STATE_PROBE, CD_STATE_NORMAL, CD_STATE_MEDIA_PREVENT, CD_STATE_MEDIA_ALLOW, CD_STATE_MEDIA_SIZE, CD_STATE_MEDIA_TOC_HDR, CD_STATE_MEDIA_TOC_FULL, CD_STATE_MEDIA_TOC_LEAD } cd_state; struct cd_softc { cam_pinfo pinfo; cd_state state; volatile cd_flags flags; struct bio_queue_head bio_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; struct cd_params params; cd_quirks quirks; struct cam_periph *periph; int minimum_command_size; int outstanding_cmds; int tur; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; STAILQ_HEAD(, cd_mode_params) mode_queue; struct cd_tocdata toc; int toc_read_len; struct cd_toc_single leadout; struct disk *disk; struct callout mediapoll_c; #define CD_ANNOUNCETMP_SZ 120 char announce_temp[CD_ANNOUNCETMP_SZ]; #define CD_ANNOUNCE_SZ 400 char announce_buf[CD_ANNOUNCE_SZ]; }; struct cd_page_sizes { int page; int page_size; }; static struct cd_page_sizes cd_page_size_table[] = { { AUDIO_PAGE, sizeof(struct cd_audio_page)} }; struct cd_quirk_entry { struct scsi_inquiry_pattern inq_pat; cd_quirks quirks; }; /* * NOTE ON 10_BYTE_ONLY quirks: Any 10_BYTE_ONLY quirks MUST be because * your device hangs when it gets a 10 byte command. Adding a quirk just * to get rid of the informative diagnostic message is not acceptable. All * 10_BYTE_ONLY quirks must be documented in full in a PR (which should be * referenced in a comment along with the quirk) , and must be approved by * ken@FreeBSD.org. Any quirks added that don't adhere to this policy may * be removed until the submitter can explain why they are needed. * 10_BYTE_ONLY quirks will be removed (as they will no longer be necessary) * when the CAM_NEW_TRAN_CODE work is done. */ static struct cd_quirk_entry cd_quirk_table[] = { { { T_CDROM, SIP_MEDIA_REMOVABLE, "CHINON", "CD-ROM CDS-535","*"}, /* quirks */ CD_Q_BCD_TRACKS }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. */ {T_CDROM, SIP_MEDIA_REMOVABLE, "NECVMWar", "VMware IDE CDR10", "*"}, /*quirks*/ CD_Q_RETRY_BUSY } }; #ifdef COMPAT_FREEBSD32 struct ioc_read_toc_entry32 { u_char address_format; u_char starting_track; u_short data_len; uint32_t data; /* (struct cd_toc_entry *) */ }; #define CDIOREADTOCENTRYS_32 \ _IOC_NEWTYPE(CDIOREADTOCENTRYS, struct ioc_read_toc_entry32) #endif static disk_open_t cdopen; static disk_close_t cdclose; static disk_ioctl_t cdioctl; static disk_strategy_t cdstrategy; static periph_init_t cdinit; static periph_ctor_t cdregister; static periph_dtor_t cdcleanup; static periph_start_t cdstart; static periph_oninv_t cdoninvalidate; static void cdasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static int cdcmdsizesysctl(SYSCTL_HANDLER_ARGS); static int cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags), u_int32_t cam_flags, u_int32_t sense_flags); static void cddone(struct cam_periph *periph, union ccb *start_ccb); static union cd_pages *cdgetpage(struct cd_mode_params *mode_params); static int cdgetpagesize(int page_num); static void cdprevent(struct cam_periph *periph, int action); static void cdmediaprobedone(struct cam_periph *periph); static int cdcheckmedia(struct cam_periph *periph, int do_wait); #if 0 static int cdsize(struct cam_periph *periph, u_int32_t *size); #endif static int cd6byteworkaround(union ccb *ccb); static int cderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int cdreadtoc(struct cam_periph *periph, u_int32_t mode, u_int32_t start, u_int8_t *data, u_int32_t len, u_int32_t sense_flags); static int cdgetmode(struct cam_periph *periph, struct cd_mode_params *data, u_int32_t page); static int cdsetmode(struct cam_periph *periph, struct cd_mode_params *data); static int cdplay(struct cam_periph *periph, u_int32_t blk, u_int32_t len); static int cdreadsubchannel(struct cam_periph *periph, u_int32_t mode, u_int32_t format, int track, struct cd_sub_channel_info *data, u_int32_t len); static int cdplaymsf(struct cam_periph *periph, u_int32_t startm, u_int32_t starts, u_int32_t startf, u_int32_t endm, u_int32_t ends, u_int32_t endf); static int cdplaytracks(struct cam_periph *periph, u_int32_t strack, u_int32_t sindex, u_int32_t etrack, u_int32_t eindex); static int cdpause(struct cam_periph *periph, u_int32_t go); static int cdstopunit(struct cam_periph *periph, u_int32_t eject); static int cdstartunit(struct cam_periph *periph, int load); static int cdsetspeed(struct cam_periph *periph, u_int32_t rdspeed, u_int32_t wrspeed); static int cdreportkey(struct cam_periph *periph, struct dvd_authinfo *authinfo); static int cdsendkey(struct cam_periph *periph, struct dvd_authinfo *authinfo); static int cdreaddvdstructure(struct cam_periph *periph, struct dvd_struct *dvdstruct); static callout_func_t cdmediapoll; static struct periph_driver cddriver = { cdinit, "cd", TAILQ_HEAD_INITIALIZER(cddriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(cd, cddriver); #ifndef CD_DEFAULT_POLL_PERIOD #define CD_DEFAULT_POLL_PERIOD 3 #endif #ifndef CD_DEFAULT_RETRY #define CD_DEFAULT_RETRY 4 #endif #ifndef CD_DEFAULT_TIMEOUT #define CD_DEFAULT_TIMEOUT 30000 #endif static int cd_poll_period = CD_DEFAULT_POLL_PERIOD; static int cd_retry_count = CD_DEFAULT_RETRY; static int cd_timeout = CD_DEFAULT_TIMEOUT; static SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM CDROM driver"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RWTUN, &cd_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, retry_count, CTLFLAG_RWTUN, &cd_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RWTUN, &cd_timeout, 0, "Timeout, in us, for read operations"); static MALLOC_DEFINE(M_SCSICD, "scsi_cd", "scsi_cd buffers"); static void cdinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, cdasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("cd: Failed to attach master async callback " "due to status 0x%x!\n", status); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void cddiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void cdoninvalidate(struct cam_periph *periph) { struct cd_softc *softc; cam_periph_assert(periph, MA_OWNED); softc = (struct cd_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, cdasync, periph, periph->path); softc->flags |= CD_FLAG_INVALID; /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ bioq_flush(&softc->bio_queue, NULL, ENXIO); disk_gone(softc->disk); } static void cdcleanup(struct cam_periph *periph) { struct cd_softc *softc; softc = (struct cd_softc *)periph->softc; cam_periph_unlock(periph); if ((softc->flags & CD_FLAG_SCTX_INIT) != 0 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void cdasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_CDROM && SID_TYPE(&cgd->inq_data) != T_WORM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(cdregister, cdoninvalidate, cdcleanup, cdstart, "cd", CAM_PERIPH_BIO, path, cdasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("cdasync: Unable to attach new device " "due to status 0x%x\n", status); return; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct cd_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all media change UNIT ATTENTIONs except * our own, as they will be handled by cderror(). */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x28 && ascq == 0x00) disk_media_changed(softc->disk, M_NOWAIT); } break; } case AC_SCSI_AEN: cam_periph_assert(periph, MA_OWNED); softc = (struct cd_softc *)periph->softc; if (softc->state == CD_STATE_NORMAL && !softc->tur) { if (cam_periph_acquire(periph) == 0) { softc->tur = 1; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* FALLTHROUGH */ case AC_SENT_BDR: case AC_BUS_RESET: { struct ccb_hdr *ccbh; cam_periph_assert(periph, MA_OWNED); softc = (struct cd_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= CD_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= CD_CCB_RETRY_UA; break; } default: break; } cam_periph_async(periph, code, path, arg); } static void cdsysctlinit(void *context, int pending) { struct cam_periph *periph; struct cd_softc *softc; char tmpstr[32], tmpstr2[16]; periph = (struct cam_periph *)context; if (cam_periph_acquire(periph) != 0) return; softc = (struct cd_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM CD unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); cam_periph_lock(periph); softc->flags |= CD_FLAG_SCTX_INIT; cam_periph_unlock(periph); softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_cd), OID_AUTO, tmpstr2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("cdsysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } /* * Now register the sysctl handler, so the user can the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->minimum_command_size, 0, cdcmdsizesysctl, "I", "Minimum CDB size"); cam_periph_release(periph); } /* * We have a handler function for this so we can check the values when the * user sets them, instead of every time we look at them. */ static int cdcmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * The only real values we can have here are 6 or 10. I don't * really forsee having 12 be an option at any time in the future. * So if the user sets something less than or equal to 6, we'll set * it to 6. If he sets something greater than 6, we'll set it to 10. * * I suppose we could just return an error here for the wrong values, * but I don't think it's necessary to do so, as long as we can * determine the user's intent without too much trouble. */ if (value < 6) value = 6; else if (value > 6) value = 10; *(int *)arg1 = value; return (0); } static cam_status cdregister(struct cam_periph *periph, void *arg) { struct cd_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("cdregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct cd_softc *)malloc(sizeof(*softc),M_DEVBUF, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("cdregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); STAILQ_INIT(&softc->mode_queue); softc->state = CD_STATE_PROBE; bioq_init(&softc->bio_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= CD_FLAG_DISC_REMOVABLE; periph->softc = softc; softc->periph = periph; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)cd_quirk_table, nitems(cd_quirk_table), sizeof(*cd_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct cd_quirk_entry *)match)->quirks; else softc->quirks = CD_Q_NONE; /* Check if the SIM does not want 6 byte commands */ xpt_path_inq(&cpi, periph->path); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= CD_Q_10_BYTE_ONLY; TASK_INIT(&softc->sysctl_task, 0, cdsysctlinit, periph); /* The default is 6 byte commands, unless quirked otherwise */ if (softc->quirks & CD_Q_10_BYTE_ONLY) softc->minimum_command_size = 10; else softc->minimum_command_size = 6; /* * Refcount and block open attempts until we are setup * Can't block */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.cd.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_command_size); /* 6 and 10 are the only permissible values here. */ if (softc->minimum_command_size < 6) softc->minimum_command_size = 6; else if (softc->minimum_command_size > 6) softc->minimum_command_size = 10; /* * We need to register the statistics structure for this device, * but we don't have the blocksize yet for it. So, we register * the structure and indicate that we don't have the blocksize * yet. Unlike other SCSI peripheral drivers, we explicitly set * the device type here to be CDROM, rather than just ORing in * the device type. This is because this driver can attach to either * CDROM or WORM devices, and we want this peripheral driver to * show up in the devstat list as a CD peripheral driver, not a * WORM peripheral driver. WORM drives will also have the WORM * driver attached to them. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry("cd", periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, DEVSTAT_TYPE_CDROM | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_CD); softc->disk->d_open = cdopen; softc->disk->d_close = cdclose; softc->disk->d_strategy = cdstrategy; softc->disk->d_gone = cddiskgonecb; softc->disk->d_ioctl = cdioctl; softc->disk->d_name = "cd"; cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_unit = periph->unit_number; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->disk->d_maxsize = DFLTPHYS; /* traditional default */ else if (cpi.maxio > maxphys) softc->disk->d_maxsize = maxphys; /* for safety */ else softc->disk->d_maxsize = cpi.maxio; softc->disk->d_flags = 0; softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment), "%s%d", cpi.dev_name, cpi.unit_number); /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add an async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_SCSI_AEN | AC_UNIT_ATTENTION, cdasync, periph, periph->path); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & CD_FLAG_DISC_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && cd_poll_period != 0) callout_reset(&softc->mediapoll_c, cd_poll_period * hz, cdmediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int cdopen(struct disk *dp) { struct cam_periph *periph; struct cd_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct cd_softc *)periph->softc; if (cam_periph_acquire(periph) != 0) return(ENXIO); cam_periph_lock(periph); if (softc->flags & CD_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("cdopen\n")); /* * Check for media, and set the appropriate flags. We don't bail * if we don't have media, but then we don't allow anything but the * CDIOCEJECT/CDIOCCLOSE ioctls if there is no media. */ cdcheckmedia(periph, /*do_wait*/ 1); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdopen\n")); cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int cdclose(struct disk *dp) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)dp->d_drv1; softc = (struct cd_softc *)periph->softc; cam_periph_lock(periph); if (cam_periph_hold(periph, PRIBIO) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (0); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("cdclose\n")); if ((softc->flags & CD_FLAG_DISC_REMOVABLE) != 0) cdprevent(periph, PR_ALLOW); /* * Since we're closing this CD, mark the blocksize as unavailable. * It will be marked as available when the CD is opened again. */ softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; /* * We'll check the media and toc again at the next open(). */ softc->flags &= ~(CD_FLAG_VALID_MEDIA|CD_FLAG_VALID_TOC); cam_periph_unhold(periph); cam_periph_release_locked(periph); cam_periph_unlock(periph); return (0); } static int cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags), u_int32_t cam_flags, u_int32_t sense_flags) { struct cd_softc *softc; struct cam_periph *periph; int error; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; error = cam_periph_runccb(ccb, error_routine, cam_flags, sense_flags, softc->disk->d_devstat); return(error); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void cdstrategy(struct bio *bp) { struct cam_periph *periph; struct cd_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cdstrategy(%p)\n", bp)); softc = (struct cd_softc *)periph->softc; /* * If the device has been made invalid, error out */ if ((softc->flags & CD_FLAG_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ bioq_disksort(&softc->bio_queue, bp); /* * If we don't know that we have valid media, schedule the media * check first. The I/O will get executed after the media check. */ if ((softc->flags & CD_FLAG_VALID_MEDIA) == 0) cdcheckmedia(periph, /*do_wait*/ 0); else xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); return; } static void cdstart(struct cam_periph *periph, union ccb *start_ccb) { struct cd_softc *softc; struct bio *bp; struct ccb_scsiio *csio; cam_periph_assert(periph, MA_OWNED); softc = (struct cd_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdstart\n")); switch (softc->state) { case CD_STATE_NORMAL: { bp = bioq_first(&softc->bio_queue); if (bp == NULL) { if (softc->tur) { softc->tur = 0; csio = &start_ccb->csio; scsi_test_unit_ready(csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, cd_timeout); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); } else { if (softc->tur) { softc->tur = 0; cam_periph_release_locked(periph); } bioq_remove(&softc->bio_queue, bp); if ((bp->bio_cmd != BIO_READ) && (bp->bio_cmd != BIO_WRITE)) { biofinish(bp, NULL, EOPNOTSUPP); xpt_release_ccb(start_ccb); return; } scsi_read_write(&start_ccb->csio, /*retries*/ cd_retry_count, /* cbfcnp */ cddone, MSG_SIMPLE_Q_TAG, /* read */bp->bio_cmd == BIO_READ ? SCSI_RW_READ : SCSI_RW_WRITE, /* byte2 */ 0, /* minimum_cmd_size */ 10, /* lba */ bp->bio_offset / softc->params.blksize, bp->bio_bcount / softc->params.blksize, /* data_ptr */ bp->bio_data, /* dxfer_len */ bp->bio_bcount, /* sense_len */ cd_retry_count ? SSD_FULL_SIZE : SF_NO_PRINT, /* timeout */ cd_timeout); /* Use READ CD command for audio tracks. */ if (softc->params.blksize == 2352) { start_ccb->csio.cdb_io.cdb_bytes[0] = READ_CD; start_ccb->csio.cdb_io.cdb_bytes[9] = 0xf8; start_ccb->csio.cdb_io.cdb_bytes[10] = 0; start_ccb->csio.cdb_io.cdb_bytes[11] = 0; start_ccb->csio.cdb_len = 12; } start_ccb->ccb_h.ccb_state = CD_CCB_BUFFER_IO; LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); softc->outstanding_cmds++; /* We expect a unit attention from this device */ if ((softc->flags & CD_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= CD_CCB_RETRY_UA; softc->flags &= ~CD_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); xpt_action(start_ccb); } if (bp != NULL || softc->tur) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); } break; } case CD_STATE_PROBE: case CD_STATE_MEDIA_SIZE: { struct scsi_read_capacity_data *rcap; rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), M_SCSICD, M_NOWAIT | M_ZERO); if (rcap == NULL) { xpt_print(periph->path, "%s: Couldn't malloc read_capacity data\n", __func__); xpt_release_ccb(start_ccb); /* * We can't probe because we can't allocate memory, * so invalidate the peripheral. The system probably * has larger problems at this stage. If we've * already probed (and are re-probing capacity), we * don't need to invalidate. * * XXX KDM need to reset probe state and kick out * pending I/O. */ if (softc->state == CD_STATE_PROBE) cam_periph_invalidate(periph); break; } /* * Set the default capacity and sector size to something that * GEOM can handle. This will get reset when a read capacity * completes successfully. */ softc->disk->d_sectorsize = 2048; softc->disk->d_mediasize = 0; csio = &start_ccb->csio; scsi_read_capacity(csio, /*retries*/ cd_retry_count, cddone, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/20000); start_ccb->ccb_h.ccb_bp = NULL; if (softc->state == CD_STATE_PROBE) start_ccb->ccb_h.ccb_state = CD_CCB_PROBE; else start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_SIZE; xpt_action(start_ccb); break; } case CD_STATE_MEDIA_ALLOW: case CD_STATE_MEDIA_PREVENT: { /* * If the CD is already locked, we don't need to do this. * Move on to the capacity check. */ if (softc->state == CD_STATE_MEDIA_PREVENT && (softc->flags & CD_FLAG_DISC_LOCKED) != 0) { softc->state = CD_STATE_MEDIA_SIZE; xpt_release_ccb(start_ccb); xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } scsi_prevent(&start_ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/ cddone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*action*/ (softc->state == CD_STATE_MEDIA_ALLOW) ? PR_ALLOW : PR_PREVENT, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 60000); start_ccb->ccb_h.ccb_bp = NULL; if (softc->state == CD_STATE_MEDIA_ALLOW) start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_ALLOW; else start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_PREVENT; xpt_action(start_ccb); break; } case CD_STATE_MEDIA_TOC_HDR: { struct ioc_toc_header *toch; bzero(&softc->toc, sizeof(softc->toc)); toch = &softc->toc.header; scsi_read_toc(&start_ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/ cddone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*byte1_flags*/ 0, /*format*/ SRTOC_FORMAT_TOC, /*track*/ 0, /*data_ptr*/ (uint8_t *)toch, /*dxfer_len*/ sizeof(*toch), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 50000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_TOC_HDR; xpt_action(start_ccb); break; } case CD_STATE_MEDIA_TOC_FULL: { bzero(&softc->toc, sizeof(softc->toc)); scsi_read_toc(&start_ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/ cddone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*byte1_flags*/ 0, /*format*/ SRTOC_FORMAT_TOC, /*track*/ 0, /*data_ptr*/ (uint8_t *)&softc->toc, /*dxfer_len*/ softc->toc_read_len ? softc->toc_read_len : sizeof(softc->toc), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 50000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_TOC_FULL; xpt_action(start_ccb); break; } case CD_STATE_MEDIA_TOC_LEAD: { struct cd_toc_single *leadout; leadout = &softc->leadout; bzero(leadout, sizeof(*leadout)); scsi_read_toc(&start_ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/ cddone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*byte1_flags*/ CD_MSF, /*format*/ SRTOC_FORMAT_TOC, /*track*/ LEADOUT, /*data_ptr*/ (uint8_t *)leadout, /*dxfer_len*/ sizeof(*leadout), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 50000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_MEDIA_TOC_LEAD; xpt_action(start_ccb); break; } } } static void cddone(struct cam_periph *periph, union ccb *done_ccb) { struct cd_softc *softc; struct ccb_scsiio *csio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cddone\n")); cam_periph_assert(periph, MA_OWNED); softc = (struct cd_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_state & CD_CCB_TYPE_MASK) { case CD_CCB_BUFFER_IO: { struct bio *bp; int error; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int sf; if ((done_ccb->ccb_h.ccb_state & CD_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = cderror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } } if (error != 0) { xpt_print(periph->path, "cddone: got error %#x back\n", error); bioq_flush(&softc->bio_queue, NULL, EIO); bp->bio_resid = bp->bio_bcount; bp->bio_error = error; bp->bio_flags |= BIO_ERROR; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) { /* * Short transfer ??? * XXX: not sure this is correct for partial * transfers at EOM */ bp->bio_flags |= BIO_ERROR; } } LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); softc->outstanding_cmds--; biofinish(bp, NULL, 0); break; } case CD_CCB_PROBE: { struct scsi_read_capacity_data *rdcap; char *announce_buf; struct cd_params *cdp; int error; cdp = &softc->params; announce_buf = softc->announce_temp; bzero(announce_buf, CD_ANNOUNCETMP_SZ); rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; cdp->disksize = scsi_4btoul (rdcap->addr) + 1; cdp->blksize = scsi_4btoul (rdcap->length); /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP || (error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT)) == 0) { snprintf(announce_buf, CD_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors)", ((uintmax_t)cdp->disksize * cdp->blksize) / (1024 * 1024), (uintmax_t)cdp->disksize, cdp->blksize); } else { if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); status = done_ccb->ccb_h.status; bzero(&cgd, sizeof(cgd)); xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * Attach to anything that claims to be a * CDROM or WORM device, as long as it * doesn't return a "Logical unit not * supported" (0x25) error. */ if ((have_sense) && (asc != 0x25) && (error_code == SSD_CURRENT_ERROR || error_code == SSD_DESC_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, CD_ANNOUNCETMP_SZ, "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else if ((have_sense == 0) && ((status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) && (csio->scsi_status == SCSI_STATUS_BUSY)) { snprintf(announce_buf, CD_ANNOUNCETMP_SZ, "Attempt to query device " "size failed: SCSI Status: %s", scsi_status_string(csio)); } else if (SID_TYPE(&cgd.inq_data) == T_CDROM) { /* * We only print out an error for * CDROM type devices. For WORM * devices, we don't print out an * error since a few WORM devices * don't support CDROM commands. * If we have sense information, go * ahead and print it out. * Otherwise, just say that we * couldn't attach. */ /* * Just print out the error, not * the full probe message, when we * don't attach. */ if (have_sense) scsi_sense_print( &done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); /* * Invalidate this peripheral. */ cam_periph_invalidate(periph); announce_buf = NULL; } else { /* * Invalidate this peripheral. */ cam_periph_invalidate(periph); announce_buf = NULL; } } } free(rdcap, M_SCSICD); if (announce_buf != NULL) { struct sbuf sb; sbuf_new(&sb, softc->announce_buf, CD_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, CD_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ taskqueue_enqueue(taskqueue_thread,&softc->sysctl_task); } softc->state = CD_STATE_NORMAL; /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); cam_periph_unhold(periph); return; } case CD_CCB_TUR: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } xpt_release_ccb(done_ccb); cam_periph_release_locked(periph); return; } case CD_CCB_MEDIA_ALLOW: case CD_CCB_MEDIA_PREVENT: { int error; int is_prevent; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); } if (error == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); /* * Note that just like the original cdcheckmedia(), we do * a prevent without failing the whole operation if the * prevent fails. We try, but keep going if it doesn't * work. */ if ((done_ccb->ccb_h.ccb_state & CD_CCB_TYPE_MASK) == CD_CCB_MEDIA_PREVENT) is_prevent = 1; else is_prevent = 0; xpt_release_ccb(done_ccb); if (is_prevent != 0) { if (error == 0) softc->flags |= CD_FLAG_DISC_LOCKED; else softc->flags &= ~CD_FLAG_DISC_LOCKED; softc->state = CD_STATE_MEDIA_SIZE; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } else { if (error == 0) softc->flags &= ~CD_FLAG_DISC_LOCKED; softc->state = CD_STATE_NORMAL; if (bioq_first(&softc->bio_queue) != NULL) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } return; } case CD_CCB_MEDIA_SIZE: { struct scsi_read_capacity_data *rdcap; int error; error = 0; if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); } if (error == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; if (error == 0) { softc->params.disksize =scsi_4btoul(rdcap->addr) + 1; softc->params.blksize = scsi_4btoul(rdcap->length); /* Make sure we got at least some block size. */ if (softc->params.blksize == 0) error = EIO; /* * SCSI-3 mandates that the reported blocksize shall be * 2048. Older drives sometimes report funny values, * trim it down to 2048, or other parts of the kernel * will get confused. * * XXX we leave drives alone that might report 512 * bytes, as well as drives reporting more weird * sizes like perhaps 4K. */ if (softc->params.blksize > 2048 && softc->params.blksize <= 2352) softc->params.blksize = 2048; } free(rdcap, M_SCSICD); if (error == 0) { softc->disk->d_sectorsize = softc->params.blksize; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; softc->flags |= CD_FLAG_SAW_MEDIA | CD_FLAG_VALID_MEDIA; softc->state = CD_STATE_MEDIA_TOC_HDR; } else { softc->flags &= ~(CD_FLAG_VALID_MEDIA | CD_FLAG_VALID_TOC); bioq_flush(&softc->bio_queue, NULL, EINVAL); softc->state = CD_STATE_MEDIA_ALLOW; cdmediaprobedone(periph); } xpt_release_ccb(done_ccb); xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } case CD_CCB_MEDIA_TOC_HDR: case CD_CCB_MEDIA_TOC_FULL: case CD_CCB_MEDIA_TOC_LEAD: { int error; struct ioc_toc_header *toch; int num_entries; int cdindex; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); } if (error == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); /* * We will get errors here for media that doesn't have a table * of contents. According to the MMC-3 spec: "When a Read * TOC/PMA/ATIP command is presented for a DDCD/CD-R/RW media, * where the first TOC has not been recorded (no complete * session) and the Format codes 0000b, 0001b, or 0010b are * specified, this command shall be rejected with an INVALID * FIELD IN CDB. Devices that are not capable of reading an * incomplete session on DDC/CD-R/RW media shall report * CANNOT READ MEDIUM - INCOMPATIBLE FORMAT." * * So this isn't fatal if we can't read the table of contents, * it just means that the user won't be able to issue the * play tracks ioctl, and likely lots of other stuff won't * work either. They need to burn the CD before we can do * a whole lot with it. So we don't print anything here if * we get an error back. * * We also bail out if the drive doesn't at least give us * the full TOC header. */ if ((error != 0) || ((csio->dxfer_len - csio->resid) < sizeof(struct ioc_toc_header))) { softc->flags &= ~CD_FLAG_VALID_TOC; bzero(&softc->toc, sizeof(softc->toc)); /* * Failing the TOC read is not an error. */ softc->state = CD_STATE_NORMAL; xpt_release_ccb(done_ccb); cdmediaprobedone(periph); /* * Go ahead and schedule I/O execution if there is * anything in the queue. It'll probably get * kicked out with an error. */ if (bioq_first(&softc->bio_queue) != NULL) xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } /* * Note that this is NOT the storage location used for the * leadout! */ toch = &softc->toc.header; if (softc->quirks & CD_Q_BCD_TRACKS) { toch->starting_track = bcd2bin(toch->starting_track); toch->ending_track = bcd2bin(toch->ending_track); } /* Number of TOC entries, plus leadout */ num_entries = toch->ending_track - toch->starting_track + 2; cdindex = toch->starting_track + num_entries - 1; if ((done_ccb->ccb_h.ccb_state & CD_CCB_TYPE_MASK) == CD_CCB_MEDIA_TOC_HDR) { if (num_entries <= 0 || num_entries > nitems(softc->toc.entries)) { softc->flags &= ~CD_FLAG_VALID_TOC; bzero(&softc->toc, sizeof(softc->toc)); /* * Failing the TOC read is not an error. */ softc->state = CD_STATE_NORMAL; xpt_release_ccb(done_ccb); cdmediaprobedone(periph); /* * Go ahead and schedule I/O execution if * there is anything in the queue. It'll * probably get kicked out with an error. */ if (bioq_first(&softc->bio_queue) != NULL) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } else { softc->toc_read_len = num_entries * sizeof(struct cd_toc_entry); softc->toc_read_len += sizeof(*toch); softc->state = CD_STATE_MEDIA_TOC_FULL; xpt_release_ccb(done_ccb); xpt_schedule(periph, CAM_PRIORITY_NORMAL); } return; } else if ((done_ccb->ccb_h.ccb_state & CD_CCB_TYPE_MASK) == CD_CCB_MEDIA_TOC_LEAD) { struct cd_toc_single *leadout; leadout = (struct cd_toc_single *)csio->data_ptr; softc->toc.entries[cdindex - toch->starting_track] = leadout->entry; } else if (((done_ccb->ccb_h.ccb_state & CD_CCB_TYPE_MASK) == CD_CCB_MEDIA_TOC_FULL) && (cdindex == toch->ending_track + 1)) { /* * XXX KDM is this necessary? Probably only if the * drive doesn't return leadout information with the * table of contents. */ softc->state = CD_STATE_MEDIA_TOC_LEAD; xpt_release_ccb(done_ccb); xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (cdindex = 0; cdindex < num_entries - 1; cdindex++){ softc->toc.entries[cdindex].track = bcd2bin(softc->toc.entries[cdindex].track); } } softc->flags |= CD_FLAG_VALID_TOC; /* If the first track is audio, correct sector size. */ if ((softc->toc.entries[0].control & 4) == 0) { softc->disk->d_sectorsize =softc->params.blksize = 2352; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; } softc->state = CD_STATE_NORMAL; /* * We unconditionally (re)set the blocksize each time the * CD device is opened. This is because the CD can change, * and therefore the blocksize might change. * XXX problems here if some slice or partition is still * open with the old size? */ if ((softc->disk->d_devstat->flags & DEVSTAT_BS_UNAVAILABLE)!=0) softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; softc->disk->d_devstat->block_size = softc->params.blksize; xpt_release_ccb(done_ccb); cdmediaprobedone(periph); if (bioq_first(&softc->bio_queue) != NULL) xpt_schedule(periph, CAM_PRIORITY_NORMAL); return; } default: break; } xpt_release_ccb(done_ccb); } static union cd_pages * cdgetpage(struct cd_mode_params *mode_params) { union cd_pages *page; if (mode_params->cdb_size == 10) page = (union cd_pages *)find_mode_page_10( (struct scsi_mode_header_10 *)mode_params->mode_buf); else page = (union cd_pages *)find_mode_page_6( (struct scsi_mode_header_6 *)mode_params->mode_buf); return (page); } static int cdgetpagesize(int page_num) { u_int i; for (i = 0; i < nitems(cd_page_size_table); i++) { if (cd_page_size_table[i].page == page_num) return (cd_page_size_table[i].page_size); } return (-1); } static struct cd_toc_entry * te_data_get_ptr(void *irtep, u_long cmd) { union { struct ioc_read_toc_entry irte; #ifdef COMPAT_FREEBSD32 struct ioc_read_toc_entry32 irte32; #endif } *irteup; irteup = irtep; switch (IOCPARM_LEN(cmd)) { case sizeof(irteup->irte): return (irteup->irte.data); #ifdef COMPAT_FREEBSD32 case sizeof(irteup->irte32): return ((struct cd_toc_entry *)(uintptr_t)irteup->irte32.data); #endif default: panic("Unhandled ioctl command %ld", cmd); } } static int cdioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) { struct cam_periph *periph; struct cd_softc *softc; int error = 0; periph = (struct cam_periph *)dp->d_drv1; cam_periph_lock(periph); softc = (struct cd_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cdioctl(%#lx)\n", cmd)); if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } /* * If we don't have media loaded, check for it. If still don't * have media loaded, we can only do a load or eject. * * We only care whether media is loaded if this is a cd-specific ioctl * (thus the IOCGROUP check below). Note that this will break if * anyone adds any ioctls into the switch statement below that don't * have their ioctl group set to 'c'. */ if (((softc->flags & CD_FLAG_VALID_MEDIA) == 0) && ((cmd != CDIOCCLOSE) && (cmd != CDIOCEJECT)) && (IOCGROUP(cmd) == 'c')) { error = cdcheckmedia(periph, /*do_wait*/ 1); if (error != 0) { cam_periph_unhold(periph); cam_periph_unlock(periph); return (error); } } /* * Drop the lock here so later mallocs can use WAITOK. The periph * is essentially locked still with the cam_periph_hold call above. */ cam_periph_unlock(periph); switch (cmd) { case CDIOCPLAYTRACKS: { struct ioc_play_track *args = (struct ioc_play_track *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYTRACKS\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } /* * This was originally implemented with the PLAY * AUDIO TRACK INDEX command, but that command was * deprecated after SCSI-2. Most (all?) SCSI CDROM * drives support it but ATAPI and ATAPI-derivative * drives don't seem to support it. So we keep a * cache of the table of contents and translate * track numbers to MSF format. */ if (softc->flags & CD_FLAG_VALID_TOC) { union msf_lba *sentry, *eentry; struct ioc_toc_header *th; int st, et; th = &softc->toc.header; if (args->end_track < th->ending_track + 1) args->end_track++; if (args->end_track > th->ending_track + 1) args->end_track = th->ending_track + 1; st = args->start_track - th->starting_track; et = args->end_track - th->starting_track; if (st < 0 || et < 0 || st > th->ending_track - th->starting_track || et > th->ending_track - th->starting_track) { error = EINVAL; cam_periph_unlock(periph); break; } sentry = &softc->toc.entries[st].addr; eentry = &softc->toc.entries[et].addr; error = cdplaymsf(periph, sentry->msf.minute, sentry->msf.second, sentry->msf.frame, eentry->msf.minute, eentry->msf.second, eentry->msf.frame); } else { /* * If we don't have a valid TOC, try the * play track index command. It is part of * the SCSI-2 spec, but was removed in the * MMC specs. ATAPI and ATAPI-derived * drives don't support it. */ if (softc->quirks & CD_Q_BCD_TRACKS) { args->start_track = bin2bcd(args->start_track); args->end_track = bin2bcd(args->end_track); } error = cdplaytracks(periph, args->start_track, args->start_index, args->end_track, args->end_index); } cam_periph_unlock(periph); } break; case CDIOCPLAYMSF: { struct ioc_play_msf *args = (struct ioc_play_msf *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYMSF\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } error = cdplaymsf(periph, args->start_m, args->start_s, args->start_f, args->end_m, args->end_s, args->end_f); cam_periph_unlock(periph); } break; case CDIOCPLAYBLOCKS: { struct ioc_play_blocks *args = (struct ioc_play_blocks *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYBLOCKS\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.flags &= ~CD_PA_SOTC; page->audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); if (error) { cam_periph_unlock(periph); break; } error = cdplay(periph, args->blk, args->len); cam_periph_unlock(periph); } break; case CDIOCREADSUBCHANNEL: { struct ioc_read_subchannel *args = (struct ioc_read_subchannel *) addr; struct cd_sub_channel_info *data; u_int32_t len = args->data_len; data = malloc(sizeof(struct cd_sub_channel_info), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCREADSUBCHANNEL\n")); if ((len > sizeof(struct cd_sub_channel_info)) || (len < sizeof(struct cd_sub_channel_header))) { printf( "scsi_cd: cdioctl: " "cdioreadsubchannel: error, len=%d\n", len); error = EINVAL; free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) args->track = bin2bcd(args->track); error = cdreadsubchannel(periph, args->address_format, args->data_format, args->track, data, len); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) data->what.track_info.track_number = bcd2bin(data->what.track_info.track_number); len = min(len, ((data->header.data_len[0] << 8) + data->header.data_len[1] + sizeof(struct cd_sub_channel_header))); cam_periph_unlock(periph); error = copyout(data, args->data, len); free(data, M_SCSICD); } break; case CDIOREADTOCHEADER: { struct ioc_toc_header *th; th = malloc(sizeof(struct ioc_toc_header), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCHEADER\n")); error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/SF_NO_PRINT); if (error) { free(th, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } th->len = ntohs(th->len); bcopy(th, addr, sizeof(*th)); free(th, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOREADTOCENTRYS: #ifdef COMPAT_FREEBSD32 case CDIOREADTOCENTRYS_32: #endif { struct cd_tocdata *data; struct cd_toc_single *lead; struct ioc_read_toc_entry *te = (struct ioc_read_toc_entry *) addr; struct ioc_toc_header *th; u_int32_t len, readlen, idx, num; u_int32_t starting_track = te->starting_track; data = malloc(sizeof(*data), M_SCSICD, M_WAITOK | M_ZERO); lead = malloc(sizeof(*lead), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCENTRYS\n")); if (te->data_len < sizeof(struct cd_toc_entry) || (te->data_len % sizeof(struct cd_toc_entry)) != 0 || (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT)) { error = EINVAL; printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } th = &data->header; error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } if (starting_track == 0) starting_track = th->starting_track; else if (starting_track == LEADOUT) starting_track = th->ending_track + 1; else if (starting_track < th->starting_track || starting_track > th->ending_track + 1) { printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); error = EINVAL; break; } /* calculate reading length without leadout entry */ readlen = (th->ending_track - starting_track + 1) * sizeof(struct cd_toc_entry); /* and with leadout entry */ len = readlen + sizeof(struct cd_toc_entry); if (te->data_len < len) { len = te->data_len; if (readlen > len) readlen = len; } if (len > sizeof(data->entries)) { printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); error = EINVAL; free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } num = len / sizeof(struct cd_toc_entry); if (readlen > 0) { error = cdreadtoc(periph, te->address_format, starting_track, (u_int8_t *)data, readlen + sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } } /* make leadout entry if needed */ idx = starting_track + num - 1; if (softc->quirks & CD_Q_BCD_TRACKS) th->ending_track = bcd2bin(th->ending_track); if (idx == th->ending_track + 1) { error = cdreadtoc(periph, te->address_format, LEADOUT, (u_int8_t *)lead, sizeof(*lead), /*sense_flags*/0); if (error) { free(data, M_SCSICD); free(lead, M_SCSICD); cam_periph_unlock(periph); break; } data->entries[idx - starting_track] = lead->entry; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (idx = 0; idx < num - 1; idx++) { data->entries[idx].track = bcd2bin(data->entries[idx].track); } } cam_periph_unlock(periph); error = copyout(data->entries, te_data_get_ptr(te, cmd), len); free(data, M_SCSICD); free(lead, M_SCSICD); } break; case CDIOREADTOCENTRY: { struct cd_toc_single *data; struct ioc_read_toc_single_entry *te = (struct ioc_read_toc_single_entry *) addr; struct ioc_toc_header *th; u_int32_t track; data = malloc(sizeof(*data), M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCENTRY\n")); if (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT) { printf("error in readtocentry, " " returning EINVAL\n"); free(data, M_SCSICD); error = EINVAL; cam_periph_unlock(periph); break; } th = &data->header; error = cdreadtoc(periph, 0, 0, (u_int8_t *)th, sizeof (*th), /*sense_flags*/0); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } track = te->track; if (track == 0) track = th->starting_track; else if (track == LEADOUT) /* OK */; else if (track < th->starting_track || track > th->ending_track + 1) { printf("error in readtocentry, " " returning EINVAL\n"); free(data, M_SCSICD); error = EINVAL; cam_periph_unlock(periph); break; } error = cdreadtoc(periph, te->address_format, track, (u_int8_t *)data, sizeof(*data), /*sense_flags*/0); if (error) { free(data, M_SCSICD); cam_periph_unlock(periph); break; } if (softc->quirks & CD_Q_BCD_TRACKS) data->entry.track = bcd2bin(data->entry.track); bcopy(&data->entry, &te->entry, sizeof(struct cd_toc_entry)); free(data, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETPATCH: { struct ioc_patch *arg = (struct ioc_patch *)addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETPATCH\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = arg->patch[0]; page->audio.port[RIGHT_PORT].channels = arg->patch[1]; page->audio.port[2].channels = arg->patch[2]; page->audio.port[3].channels = arg->patch[3]; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCGETVOL: { struct ioc_vol *arg = (struct ioc_vol *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCGETVOL\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); arg->vol[LEFT_PORT] = page->audio.port[LEFT_PORT].volume; arg->vol[RIGHT_PORT] = page->audio.port[RIGHT_PORT].volume; arg->vol[2] = page->audio.port[2].volume; arg->vol[3] = page->audio.port[3].volume; free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETVOL: { struct ioc_vol *arg = (struct ioc_vol *) addr; struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETVOL\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = CHANNEL_0; page->audio.port[LEFT_PORT].volume = arg->vol[LEFT_PORT]; page->audio.port[RIGHT_PORT].channels = CHANNEL_1; page->audio.port[RIGHT_PORT].volume = arg->vol[RIGHT_PORT]; page->audio.port[2].volume = arg->vol[2]; page->audio.port[3].volume = arg->vol[3]; error = cdsetmode(periph, ¶ms); cam_periph_unlock(periph); free(params.mode_buf, M_SCSICD); } break; case CDIOCSETMONO: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETMONO\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL | RIGHT_CHANNEL; page->audio.port[RIGHT_PORT].channels = LEFT_CHANNEL | RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); cam_periph_unlock(periph); free(params.mode_buf, M_SCSICD); } break; case CDIOCSETSTEREO: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETSTEREO\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL; page->audio.port[RIGHT_PORT].channels = RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETMUTE: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETMUTE\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = 0; page->audio.port[RIGHT_PORT].channels = 0; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETLEFT: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETLEFT\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = LEFT_CHANNEL; page->audio.port[RIGHT_PORT].channels = LEFT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCSETRIGHT: { struct cd_mode_params params; union cd_pages *page; params.alloc_len = sizeof(union cd_mode_data_6_10); params.mode_buf = malloc(params.alloc_len, M_SCSICD, M_WAITOK | M_ZERO); cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETRIGHT\n")); error = cdgetmode(periph, ¶ms, AUDIO_PAGE); if (error) { free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); break; } page = cdgetpage(¶ms); page->audio.port[LEFT_PORT].channels = RIGHT_CHANNEL; page->audio.port[RIGHT_PORT].channels = RIGHT_CHANNEL; page->audio.port[2].channels = 0; page->audio.port[3].channels = 0; error = cdsetmode(periph, ¶ms); free(params.mode_buf, M_SCSICD); cam_periph_unlock(periph); } break; case CDIOCRESUME: cam_periph_lock(periph); error = cdpause(periph, 1); cam_periph_unlock(periph); break; case CDIOCPAUSE: cam_periph_lock(periph); error = cdpause(periph, 0); cam_periph_unlock(periph); break; case CDIOCSTART: cam_periph_lock(periph); error = cdstartunit(periph, 0); cam_periph_unlock(periph); break; case CDIOCCLOSE: cam_periph_lock(periph); error = cdstartunit(periph, 1); cam_periph_unlock(periph); break; case CDIOCSTOP: cam_periph_lock(periph); error = cdstopunit(periph, 0); cam_periph_unlock(periph); break; case CDIOCEJECT: cam_periph_lock(periph); error = cdstopunit(periph, 1); cam_periph_unlock(periph); break; case CDIOCALLOW: cam_periph_lock(periph); cdprevent(periph, PR_ALLOW); cam_periph_unlock(periph); break; case CDIOCPREVENT: cam_periph_lock(periph); cdprevent(periph, PR_PREVENT); cam_periph_unlock(periph); break; case CDIOCSETDEBUG: /* sc_link->flags |= (SDEV_DB1 | SDEV_DB2); */ error = ENOTTY; break; case CDIOCCLRDEBUG: /* sc_link->flags &= ~(SDEV_DB1 | SDEV_DB2); */ error = ENOTTY; break; case CDIOCRESET: /* return (cd_reset(periph)); */ error = ENOTTY; break; case CDRIOCREADSPEED: cam_periph_lock(periph); error = cdsetspeed(periph, *(u_int32_t *)addr, CDR_MAX_SPEED); cam_periph_unlock(periph); break; case CDRIOCWRITESPEED: cam_periph_lock(periph); error = cdsetspeed(periph, CDR_MAX_SPEED, *(u_int32_t *)addr); cam_periph_unlock(periph); break; case CDRIOCGETBLOCKSIZE: *(int *)addr = softc->params.blksize; break; case CDRIOCSETBLOCKSIZE: if (*(int *)addr <= 0) { error = EINVAL; break; } softc->disk->d_sectorsize = softc->params.blksize = *(int *)addr; break; case DVDIOCSENDKEY: case DVDIOCREPORTKEY: { struct dvd_authinfo *authinfo; authinfo = (struct dvd_authinfo *)addr; if (cmd == DVDIOCREPORTKEY) error = cdreportkey(periph, authinfo); else error = cdsendkey(periph, authinfo); break; } case DVDIOCREADSTRUCTURE: { struct dvd_struct *dvdstruct; dvdstruct = (struct dvd_struct *)addr; error = cdreaddvdstructure(periph, dvdstruct); break; } default: cam_periph_lock(periph); error = cam_periph_ioctl(periph, cmd, addr, cderror); cam_periph_unlock(periph); break; } cam_periph_lock(periph); cam_periph_unhold(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdioctl\n")); if (error && bootverbose) { printf("scsi_cd.c::ioctl cmd=%08lx error=%d\n", cmd, error); } cam_periph_unlock(periph); return (error); } static void cdprevent(struct cam_periph *periph, int action) { union ccb *ccb; struct cd_softc *softc; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdprevent\n")); cam_periph_assert(periph, MA_OWNED); softc = (struct cd_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & CD_FLAG_DISC_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & CD_FLAG_DISC_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, /* timeout */60000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT); xpt_release_ccb(ccb); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~CD_FLAG_DISC_LOCKED; else softc->flags |= CD_FLAG_DISC_LOCKED; } } static void cdmediaprobedone(struct cam_periph *periph) { struct cd_softc *softc; cam_periph_assert(periph, MA_OWNED); softc = (struct cd_softc *)periph->softc; softc->flags &= ~CD_FLAG_MEDIA_SCAN_ACT; if ((softc->flags & CD_FLAG_MEDIA_WAIT) != 0) { softc->flags &= ~CD_FLAG_MEDIA_WAIT; wakeup(&softc->toc); } } /* * XXX: the disk media and sector size is only really able to change * XXX: while the device is closed. */ static int cdcheckmedia(struct cam_periph *periph, int do_wait) { struct cd_softc *softc; int error; cam_periph_assert(periph, MA_OWNED); softc = (struct cd_softc *)periph->softc; error = 0; if ((do_wait != 0) && ((softc->flags & CD_FLAG_MEDIA_WAIT) == 0)) { softc->flags |= CD_FLAG_MEDIA_WAIT; } if ((softc->flags & CD_FLAG_MEDIA_SCAN_ACT) == 0) { softc->state = CD_STATE_MEDIA_PREVENT; softc->flags |= CD_FLAG_MEDIA_SCAN_ACT; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } if (do_wait == 0) goto bailout; error = msleep(&softc->toc, cam_periph_mtx(periph), PRIBIO,"cdmedia",0); if (error != 0) goto bailout; /* * Check to see whether we have a valid size from the media. We * may or may not have a valid TOC. */ if ((softc->flags & CD_FLAG_VALID_MEDIA) == 0) error = EINVAL; bailout: return (error); } #if 0 static int cdcheckmedia(struct cam_periph *periph) { struct cd_softc *softc; struct ioc_toc_header *toch; struct cd_toc_single leadout; u_int32_t size, toclen; int error, num_entries, cdindex; softc = (struct cd_softc *)periph->softc; cdprevent(periph, PR_PREVENT); softc->disk->d_sectorsize = 2048; softc->disk->d_mediasize = 0; /* * Get the disc size and block size. If we can't get it, we don't * have media, most likely. */ if ((error = cdsize(periph, &size)) != 0) { softc->flags &= ~(CD_FLAG_VALID_MEDIA|CD_FLAG_VALID_TOC); cdprevent(periph, PR_ALLOW); return (error); } else { softc->flags |= CD_FLAG_SAW_MEDIA | CD_FLAG_VALID_MEDIA; softc->disk->d_sectorsize = softc->params.blksize; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; } /* * Now we check the table of contents. This (currently) is only * used for the CDIOCPLAYTRACKS ioctl. It may be used later to do * things like present a separate entry in /dev for each track, * like that acd(4) driver does. */ bzero(&softc->toc, sizeof(softc->toc)); toch = &softc->toc.header; /* * We will get errors here for media that doesn't have a table of * contents. According to the MMC-3 spec: "When a Read TOC/PMA/ATIP * command is presented for a DDCD/CD-R/RW media, where the first TOC * has not been recorded (no complete session) and the Format codes * 0000b, 0001b, or 0010b are specified, this command shall be rejected * with an INVALID FIELD IN CDB. Devices that are not capable of * reading an incomplete session on DDC/CD-R/RW media shall report * CANNOT READ MEDIUM - INCOMPATIBLE FORMAT." * * So this isn't fatal if we can't read the table of contents, it * just means that the user won't be able to issue the play tracks * ioctl, and likely lots of other stuff won't work either. They * need to burn the CD before we can do a whole lot with it. So * we don't print anything here if we get an error back. */ error = cdreadtoc(periph, 0, 0, (u_int8_t *)toch, sizeof(*toch), SF_NO_PRINT); /* * Errors in reading the table of contents aren't fatal, we just * won't have a valid table of contents cached. */ if (error != 0) { error = 0; bzero(&softc->toc, sizeof(softc->toc)); goto bailout; } if (softc->quirks & CD_Q_BCD_TRACKS) { toch->starting_track = bcd2bin(toch->starting_track); toch->ending_track = bcd2bin(toch->ending_track); } /* Number of TOC entries, plus leadout */ num_entries = (toch->ending_track - toch->starting_track) + 2; if (num_entries <= 0) goto bailout; toclen = num_entries * sizeof(struct cd_toc_entry); error = cdreadtoc(periph, CD_MSF_FORMAT, toch->starting_track, (u_int8_t *)&softc->toc, toclen + sizeof(*toch), SF_NO_PRINT); if (error != 0) { error = 0; bzero(&softc->toc, sizeof(softc->toc)); goto bailout; } if (softc->quirks & CD_Q_BCD_TRACKS) { toch->starting_track = bcd2bin(toch->starting_track); toch->ending_track = bcd2bin(toch->ending_track); } /* * XXX KDM is this necessary? Probably only if the drive doesn't * return leadout information with the table of contents. */ cdindex = toch->starting_track + num_entries -1; if (cdindex == toch->ending_track + 1) { error = cdreadtoc(periph, CD_MSF_FORMAT, LEADOUT, (u_int8_t *)&leadout, sizeof(leadout), SF_NO_PRINT); if (error != 0) { error = 0; goto bailout; } softc->toc.entries[cdindex - toch->starting_track] = leadout.entry; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (cdindex = 0; cdindex < num_entries - 1; cdindex++) { softc->toc.entries[cdindex].track = bcd2bin(softc->toc.entries[cdindex].track); } } softc->flags |= CD_FLAG_VALID_TOC; /* If the first track is audio, correct sector size. */ if ((softc->toc.entries[0].control & 4) == 0) { softc->disk->d_sectorsize = softc->params.blksize = 2352; softc->disk->d_mediasize = (off_t)softc->params.blksize * softc->params.disksize; } bailout: /* * We unconditionally (re)set the blocksize each time the * CD device is opened. This is because the CD can change, * and therefore the blocksize might change. * XXX problems here if some slice or partition is still * open with the old size? */ if ((softc->disk->d_devstat->flags & DEVSTAT_BS_UNAVAILABLE) != 0) softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; softc->disk->d_devstat->block_size = softc->params.blksize; return (error); } static int cdsize(struct cam_periph *periph, u_int32_t *size) { struct cd_softc *softc; union ccb *ccb; struct scsi_read_capacity_data *rcap_buf; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdsize\n")); softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); /* XXX Should be M_WAITOK */ rcap_buf = malloc(sizeof(struct scsi_read_capacity_data), M_SCSICD, M_NOWAIT | M_ZERO); if (rcap_buf == NULL) return (ENOMEM); scsi_read_capacity(&ccb->csio, /*retries*/ cd_retry_count, /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, rcap_buf, SSD_FULL_SIZE, /* timeout */20000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT); xpt_release_ccb(ccb); softc->params.disksize = scsi_4btoul(rcap_buf->addr) + 1; softc->params.blksize = scsi_4btoul(rcap_buf->length); /* Make sure we got at least some block size. */ if (error == 0 && softc->params.blksize == 0) error = EIO; /* * SCSI-3 mandates that the reported blocksize shall be 2048. * Older drives sometimes report funny values, trim it down to * 2048, or other parts of the kernel will get confused. * * XXX we leave drives alone that might report 512 bytes, as * well as drives reporting more weird sizes like perhaps 4K. */ if (softc->params.blksize > 2048 && softc->params.blksize <= 2352) softc->params.blksize = 2048; free(rcap_buf, M_SCSICD); *size = softc->params.disksize; return (error); } #endif static int cd6byteworkaround(union ccb *ccb) { u_int8_t *cdb; struct cam_periph *periph; struct cd_softc *softc; struct cd_mode_params *params; int frozen, found; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; cdb = ccb->csio.cdb_io.cdb_bytes; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) || ((cdb[0] != MODE_SENSE_6) && (cdb[0] != MODE_SELECT_6))) return (0); /* * Because there is no convenient place to stash the overall * cd_mode_params structure pointer, we have to grab it like this. * This means that ALL MODE_SENSE and MODE_SELECT requests in the * cd(4) driver MUST go through cdgetmode() and cdsetmode()! * * XXX It would be nice if, at some point, we could increase the * number of available peripheral private pointers. Both pointers * are currently used in most every peripheral driver. */ found = 0; STAILQ_FOREACH(params, &softc->mode_queue, links) { if (params->mode_buf == ccb->csio.data_ptr) { found = 1; break; } } /* * This shouldn't happen. All mode sense and mode select * operations in the cd(4) driver MUST go through cdgetmode() and * cdsetmode()! */ if (found == 0) { xpt_print(periph->path, "mode buffer not found in mode queue!\n"); return (0); } params->cdb_size = 10; softc->minimum_command_size = 10; xpt_print(ccb->ccb_h.path, "%s(6) failed, increasing minimum CDB size to 10 bytes\n", (cdb[0] == MODE_SENSE_6) ? "MODE_SENSE" : "MODE_SELECT"); if (cdb[0] == MODE_SENSE_6) { struct scsi_mode_sense_10 ms10; struct scsi_mode_sense_6 *ms6; int len; ms6 = (struct scsi_mode_sense_6 *)cdb; bzero(&ms10, sizeof(ms10)); ms10.opcode = MODE_SENSE_10; ms10.byte2 = ms6->byte2; ms10.page = ms6->page; /* * 10 byte mode header, block descriptor, * sizeof(union cd_pages) */ len = sizeof(struct cd_mode_data_10); ccb->csio.dxfer_len = len; scsi_ulto2b(len, ms10.length); ms10.control = ms6->control; bcopy(&ms10, cdb, 10); ccb->csio.cdb_len = 10; } else { struct scsi_mode_select_10 ms10; struct scsi_mode_select_6 *ms6; struct scsi_mode_header_6 *header6; struct scsi_mode_header_10 *header10; struct scsi_mode_page_header *page_header; int blk_desc_len, page_num, page_size, len; ms6 = (struct scsi_mode_select_6 *)cdb; bzero(&ms10, sizeof(ms10)); ms10.opcode = MODE_SELECT_10; ms10.byte2 = ms6->byte2; header6 = (struct scsi_mode_header_6 *)params->mode_buf; header10 = (struct scsi_mode_header_10 *)params->mode_buf; page_header = find_mode_page_6(header6); page_num = page_header->page_code; blk_desc_len = header6->blk_desc_len; page_size = cdgetpagesize(page_num); if (page_size != (page_header->page_length + sizeof(*page_header))) page_size = page_header->page_length + sizeof(*page_header); len = sizeof(*header10) + blk_desc_len + page_size; len = min(params->alloc_len, len); /* * Since the 6 byte parameter header is shorter than the 10 * byte parameter header, we need to copy the actual mode * page data, and the block descriptor, if any, so things wind * up in the right place. The regions will overlap, but * bcopy() does the right thing. */ bcopy(params->mode_buf + sizeof(*header6), params->mode_buf + sizeof(*header10), len - sizeof(*header10)); /* Make sure these fields are set correctly. */ scsi_ulto2b(0, header10->data_length); header10->medium_type = 0; scsi_ulto2b(blk_desc_len, header10->blk_desc_len); ccb->csio.dxfer_len = len; scsi_ulto2b(len, ms10.length); ms10.control = ms6->control; bcopy(&ms10, cdb, 10); ccb->csio.cdb_len = 10; } frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*openings*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static int cderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct cd_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); /* * We use a status of CAM_REQ_INVALID as shorthand -- if a 6 byte * CDB comes back with this particular error, try transforming it * into the 10 byte version. */ error = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cd6byteworkaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cd6byteworkaround(ccb); else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) disk_media_changed(softc->disk, M_NOWAIT); else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & CD_FLAG_SAW_MEDIA)) { softc->flags &= ~CD_FLAG_SAW_MEDIA; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (error); /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & CD_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return (cam_periph_error(ccb, cam_flags, sense_flags)); } static void cdmediapoll(void *arg) { struct cam_periph *periph = arg; struct cd_softc *softc = periph->softc; if (softc->state == CD_STATE_NORMAL && !softc->tur && softc->outstanding_cmds == 0) { if (cam_periph_acquire(periph) == 0) { softc->tur = 1; xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } /* Queue us up again */ if (cd_poll_period != 0) callout_schedule(&softc->mediapoll_c, cd_poll_period * hz); } /* * Read table of contents */ static int cdreadtoc(struct cam_periph *periph, u_int32_t mode, u_int32_t start, u_int8_t *data, u_int32_t len, u_int32_t sense_flags) { - u_int32_t ntoc; struct ccb_scsiio *csio; union ccb *ccb; int error; - ntoc = len; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; scsi_read_toc(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* byte1_flags */ (mode == CD_MSF_FORMAT) ? CD_MSF : 0, /* format */ SRTOC_FORMAT_TOC, /* track*/ start, /* data_ptr */ data, /* dxfer_len */ len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA | sense_flags); xpt_release_ccb(ccb); return(error); } static int cdreadsubchannel(struct cam_periph *periph, u_int32_t mode, u_int32_t format, int track, struct cd_sub_channel_info *data, u_int32_t len) { struct scsi_read_subchannel *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* flags */ CAM_DIR_IN, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ (u_int8_t *)data, /* dxfer_len */ len, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_read_subchannel), /* timeout */ 50000); scsi_cmd = (struct scsi_read_subchannel *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = READ_SUBCHANNEL; if (mode == CD_MSF_FORMAT) scsi_cmd->byte1 |= CD_MSF; scsi_cmd->byte2 = SRS_SUBQ; scsi_cmd->subchan_format = format; scsi_cmd->track = track; scsi_ulto2b(len, (u_int8_t *)scsi_cmd->data_len); scsi_cmd->control = 0; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } /* * All MODE_SENSE requests in the cd(4) driver MUST go through this * routine. See comments in cd6byteworkaround() for details. */ static int cdgetmode(struct cam_periph *periph, struct cd_mode_params *data, u_int32_t page) { struct ccb_scsiio *csio; struct cd_softc *softc; union ccb *ccb; int param_len; int error; softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; data->cdb_size = softc->minimum_command_size; if (data->cdb_size < 10) param_len = sizeof(struct cd_mode_data); else param_len = sizeof(struct cd_mode_data_10); /* Don't say we've got more room than we actually allocated */ param_len = min(param_len, data->alloc_len); scsi_mode_sense_len(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ 0, /* page_code */ SMS_PAGE_CTRL_CURRENT, /* page */ page, /* param_buf */ data->mode_buf, /* param_len */ param_len, /* minimum_cmd_size */ softc->minimum_command_size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); /* * It would be nice not to have to do this, but there's no * available pointer in the CCB that would allow us to stuff the * mode params structure in there and retrieve it in * cd6byteworkaround(), so we can set the cdb size. The cdb size * lets the caller know what CDB size we ended up using, so they * can find the actual mode page offset. */ STAILQ_INSERT_TAIL(&softc->mode_queue, data, links); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); STAILQ_REMOVE(&softc->mode_queue, data, cd_mode_params, links); /* * This is a bit of belt-and-suspenders checking, but if we run * into a situation where the target sends back multiple block * descriptors, we might not have enough space in the buffer to * see the whole mode page. Better to return an error than * potentially access memory beyond our malloced region. */ if (error == 0) { u_int32_t data_len; if (data->cdb_size == 10) { struct scsi_mode_header_10 *hdr10; hdr10 = (struct scsi_mode_header_10 *)data->mode_buf; data_len = scsi_2btoul(hdr10->data_length); data_len += sizeof(hdr10->data_length); } else { struct scsi_mode_header_6 *hdr6; hdr6 = (struct scsi_mode_header_6 *)data->mode_buf; data_len = hdr6->data_length; data_len += sizeof(hdr6->data_length); } /* * Complain if there is more mode data available than we * allocated space for. This could potentially happen if * we miscalculated the page length for some reason, if the * drive returns multiple block descriptors, or if it sets * the data length incorrectly. */ if (data_len > data->alloc_len) { xpt_print(periph->path, "allocated modepage %d length " "%d < returned length %d\n", page, data->alloc_len, data_len); error = ENOSPC; } } return (error); } /* * All MODE_SELECT requests in the cd(4) driver MUST go through this * routine. See comments in cd6byteworkaround() for details. */ static int cdsetmode(struct cam_periph *periph, struct cd_mode_params *data) { struct ccb_scsiio *csio; struct cd_softc *softc; union ccb *ccb; int cdb_size, param_len; int error; softc = (struct cd_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; error = 0; /* * If the data is formatted for the 10 byte version of the mode * select parameter list, we need to use the 10 byte CDB. * Otherwise, we use whatever the stored minimum command size. */ if (data->cdb_size == 10) cdb_size = data->cdb_size; else cdb_size = softc->minimum_command_size; if (cdb_size >= 10) { struct scsi_mode_header_10 *mode_header; u_int32_t data_len; mode_header = (struct scsi_mode_header_10 *)data->mode_buf; data_len = scsi_2btoul(mode_header->data_length); scsi_ulto2b(0, mode_header->data_length); /* * SONY drives do not allow a mode select with a medium_type * value that has just been returned by a mode sense; use a * medium_type of 0 (Default) instead. */ mode_header->medium_type = 0; /* * Pass back whatever the drive passed to us, plus the size * of the data length field. */ param_len = data_len + sizeof(mode_header->data_length); } else { struct scsi_mode_header_6 *mode_header; mode_header = (struct scsi_mode_header_6 *)data->mode_buf; param_len = mode_header->data_length + 1; mode_header->data_length = 0; /* * SONY drives do not allow a mode select with a medium_type * value that has just been returned by a mode sense; use a * medium_type of 0 (Default) instead. */ mode_header->medium_type = 0; } /* Don't say we've got more room than we actually allocated */ param_len = min(param_len, data->alloc_len); scsi_mode_select_len(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* scsi_page_fmt */ 1, /* save_pages */ 0, /* param_buf */ data->mode_buf, /* param_len */ param_len, /* minimum_cmd_size */ cdb_size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); /* See comments in cdgetmode() and cd6byteworkaround(). */ STAILQ_INSERT_TAIL(&softc->mode_queue, data, links); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); STAILQ_REMOVE(&softc->mode_queue, data, cd_mode_params, links); return (error); } static int cdplay(struct cam_periph *periph, u_int32_t blk, u_int32_t len) { struct ccb_scsiio *csio; union ccb *ccb; int error; u_int8_t cdb_len; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; /* * Use the smallest possible command to perform the operation. */ if ((len & 0xffff0000) == 0) { /* * We can fit in a 10 byte cdb. */ struct scsi_play_10 *scsi_cmd; scsi_cmd = (struct scsi_play_10 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_10; scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr); scsi_ulto2b(len, (u_int8_t *)scsi_cmd->xfer_len); cdb_len = sizeof(*scsi_cmd); } else { struct scsi_play_12 *scsi_cmd; scsi_cmd = (struct scsi_play_12 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_12; scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr); scsi_ulto4b(len, (u_int8_t *)scsi_cmd->xfer_len); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, /*retries*/ cd_retry_count, /*cbfcnp*/NULL, /*flags*/CAM_DIR_NONE, MSG_SIMPLE_Q_TAG, /*dataptr*/NULL, /*datalen*/0, /*sense_len*/SSD_FULL_SIZE, cdb_len, /*timeout*/50 * 1000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplaymsf(struct cam_periph *periph, u_int32_t startm, u_int32_t starts, u_int32_t startf, u_int32_t endm, u_int32_t ends, u_int32_t endf) { struct scsi_play_msf *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_play_msf), /* timeout */ 50000); scsi_cmd = (struct scsi_play_msf *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_MSF; scsi_cmd->start_m = startm; scsi_cmd->start_s = starts; scsi_cmd->start_f = startf; scsi_cmd->end_m = endm; scsi_cmd->end_s = ends; scsi_cmd->end_f = endf; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplaytracks(struct cam_periph *periph, u_int32_t strack, u_int32_t sindex, u_int32_t etrack, u_int32_t eindex) { struct scsi_play_track *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_play_track), /* timeout */ 50000); scsi_cmd = (struct scsi_play_track *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_TRACK; scsi_cmd->start_track = strack; scsi_cmd->start_index = sindex; scsi_cmd->end_track = etrack; scsi_cmd->end_index = eindex; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdpause(struct cam_periph *periph, u_int32_t go) { struct scsi_pause *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_pause), /* timeout */ 50000); scsi_cmd = (struct scsi_pause *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PAUSE; scsi_cmd->resume = go; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdstartunit(struct cam_periph *periph, int load) { union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_start_stop(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* start */ TRUE, /* load_eject */ load, /* immediate */ FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdstopunit(struct cam_periph *periph, u_int32_t eject) { union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_start_stop(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* start */ FALSE, /* load_eject */ eject, /* immediate */ FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdsetspeed(struct cam_periph *periph, u_int32_t rdspeed, u_int32_t wrspeed) { struct scsi_set_speed *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; /* Preserve old behavior: units in multiples of CDROM speed */ if (rdspeed < 177) rdspeed *= 177; if (wrspeed < 177) wrspeed *= 177; cam_fill_csio(csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_set_speed), /* timeout */ 50000); scsi_cmd = (struct scsi_set_speed *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SET_CD_SPEED; scsi_ulto2b(rdspeed, scsi_cmd->readspeed); scsi_ulto2b(wrspeed, scsi_cmd->writespeed); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdreportkey(struct cam_periph *periph, struct dvd_authinfo *authinfo) { union ccb *ccb; u_int8_t *databuf; u_int32_t lba; int error; int length; error = 0; databuf = NULL; lba = 0; switch (authinfo->format) { case DVD_REPORT_AGID: length = sizeof(struct scsi_report_key_data_agid); break; case DVD_REPORT_CHALLENGE: length = sizeof(struct scsi_report_key_data_challenge); break; case DVD_REPORT_KEY1: length = sizeof(struct scsi_report_key_data_key1_key2); break; case DVD_REPORT_TITLE_KEY: length = sizeof(struct scsi_report_key_data_title); /* The lba field is only set for the title key */ lba = authinfo->lba; break; case DVD_REPORT_ASF: length = sizeof(struct scsi_report_key_data_asf); break; case DVD_REPORT_RPC: length = sizeof(struct scsi_report_key_data_rpc); break; case DVD_INVALIDATE_AGID: length = 0; break; default: return (EINVAL); } if (length != 0) { databuf = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); } else databuf = NULL; cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_report_key(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* lba */ lba, /* agid */ authinfo->agid, /* key_format */ authinfo->format, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); if (error != 0) goto bailout; if (ccb->csio.resid != 0) { xpt_print(periph->path, "warning, residual for report key " "command is %d\n", ccb->csio.resid); } switch(authinfo->format) { case DVD_REPORT_AGID: { struct scsi_report_key_data_agid *agid_data; agid_data = (struct scsi_report_key_data_agid *)databuf; authinfo->agid = (agid_data->agid & RKD_AGID_MASK) >> RKD_AGID_SHIFT; break; } case DVD_REPORT_CHALLENGE: { struct scsi_report_key_data_challenge *chal_data; chal_data = (struct scsi_report_key_data_challenge *)databuf; bcopy(chal_data->challenge_key, authinfo->keychal, min(sizeof(chal_data->challenge_key), sizeof(authinfo->keychal))); break; } case DVD_REPORT_KEY1: { struct scsi_report_key_data_key1_key2 *key1_data; key1_data = (struct scsi_report_key_data_key1_key2 *)databuf; bcopy(key1_data->key1, authinfo->keychal, min(sizeof(key1_data->key1), sizeof(authinfo->keychal))); break; } case DVD_REPORT_TITLE_KEY: { struct scsi_report_key_data_title *title_data; title_data = (struct scsi_report_key_data_title *)databuf; authinfo->cpm = (title_data->byte0 & RKD_TITLE_CPM) >> RKD_TITLE_CPM_SHIFT; authinfo->cp_sec = (title_data->byte0 & RKD_TITLE_CP_SEC) >> RKD_TITLE_CP_SEC_SHIFT; authinfo->cgms = (title_data->byte0 & RKD_TITLE_CMGS_MASK) >> RKD_TITLE_CMGS_SHIFT; bcopy(title_data->title_key, authinfo->keychal, min(sizeof(title_data->title_key), sizeof(authinfo->keychal))); break; } case DVD_REPORT_ASF: { struct scsi_report_key_data_asf *asf_data; asf_data = (struct scsi_report_key_data_asf *)databuf; authinfo->asf = asf_data->success & RKD_ASF_SUCCESS; break; } case DVD_REPORT_RPC: { struct scsi_report_key_data_rpc *rpc_data; rpc_data = (struct scsi_report_key_data_rpc *)databuf; authinfo->reg_type = (rpc_data->byte4 & RKD_RPC_TYPE_MASK) >> RKD_RPC_TYPE_SHIFT; authinfo->vend_rsts = (rpc_data->byte4 & RKD_RPC_VENDOR_RESET_MASK) >> RKD_RPC_VENDOR_RESET_SHIFT; authinfo->user_rsts = rpc_data->byte4 & RKD_RPC_USER_RESET_MASK; authinfo->region = rpc_data->region_mask; authinfo->rpc_scheme = rpc_data->rpc_scheme1; break; } case DVD_INVALIDATE_AGID: break; default: /* This should be impossible, since we checked above */ error = EINVAL; goto bailout; break; /* NOTREACHED */ } bailout: xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } static int cdsendkey(struct cam_periph *periph, struct dvd_authinfo *authinfo) { union ccb *ccb; u_int8_t *databuf; int length; int error; error = 0; databuf = NULL; switch(authinfo->format) { case DVD_SEND_CHALLENGE: { struct scsi_report_key_data_challenge *challenge_data; length = sizeof(*challenge_data); challenge_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)challenge_data; scsi_ulto2b(length - sizeof(challenge_data->data_len), challenge_data->data_len); bcopy(authinfo->keychal, challenge_data->challenge_key, min(sizeof(authinfo->keychal), sizeof(challenge_data->challenge_key))); break; } case DVD_SEND_KEY2: { struct scsi_report_key_data_key1_key2 *key2_data; length = sizeof(*key2_data); key2_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)key2_data; scsi_ulto2b(length - sizeof(key2_data->data_len), key2_data->data_len); bcopy(authinfo->keychal, key2_data->key1, min(sizeof(authinfo->keychal), sizeof(key2_data->key1))); break; } case DVD_SEND_RPC: { struct scsi_send_key_data_rpc *rpc_data; length = sizeof(*rpc_data); rpc_data = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); databuf = (u_int8_t *)rpc_data; scsi_ulto2b(length - sizeof(rpc_data->data_len), rpc_data->data_len); rpc_data->region_code = authinfo->region; break; } default: return (EINVAL); } cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_send_key(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* agid */ authinfo->agid, /* key_format */ authinfo->format, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } static int cdreaddvdstructure(struct cam_periph *periph, struct dvd_struct *dvdstruct) { union ccb *ccb; u_int8_t *databuf; u_int32_t address; int error; int length; error = 0; databuf = NULL; /* The address is reserved for many of the formats */ address = 0; switch(dvdstruct->format) { case DVD_STRUCT_PHYSICAL: length = sizeof(struct scsi_read_dvd_struct_data_physical); break; case DVD_STRUCT_COPYRIGHT: length = sizeof(struct scsi_read_dvd_struct_data_copyright); break; case DVD_STRUCT_DISCKEY: length = sizeof(struct scsi_read_dvd_struct_data_disc_key); break; case DVD_STRUCT_BCA: length = sizeof(struct scsi_read_dvd_struct_data_bca); break; case DVD_STRUCT_MANUFACT: length = sizeof(struct scsi_read_dvd_struct_data_manufacturer); break; case DVD_STRUCT_CMI: return (ENODEV); case DVD_STRUCT_PROTDISCID: length = sizeof(struct scsi_read_dvd_struct_data_prot_discid); break; case DVD_STRUCT_DISCKEYBLOCK: length = sizeof(struct scsi_read_dvd_struct_data_disc_key_blk); break; case DVD_STRUCT_DDS: length = sizeof(struct scsi_read_dvd_struct_data_dds); break; case DVD_STRUCT_MEDIUM_STAT: length = sizeof(struct scsi_read_dvd_struct_data_medium_status); break; case DVD_STRUCT_SPARE_AREA: length = sizeof(struct scsi_read_dvd_struct_data_spare_area); break; case DVD_STRUCT_RMD_LAST: return (ENODEV); case DVD_STRUCT_RMD_RMA: return (ENODEV); case DVD_STRUCT_PRERECORDED: length = sizeof(struct scsi_read_dvd_struct_data_leadin); break; case DVD_STRUCT_UNIQUEID: length = sizeof(struct scsi_read_dvd_struct_data_disc_id); break; case DVD_STRUCT_DCB: return (ENODEV); case DVD_STRUCT_LIST: /* * This is the maximum allocation length for the READ DVD * STRUCTURE command. There's nothing in the MMC3 spec * that indicates a limit in the amount of data that can * be returned from this call, other than the limits * imposed by the 2-byte length variables. */ length = 65535; break; default: return (EINVAL); } if (length != 0) { databuf = malloc(length, M_DEVBUF, M_WAITOK | M_ZERO); } else databuf = NULL; cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_read_dvd_structure(&ccb->csio, /* retries */ cd_retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* lba */ address, /* layer_number */ dvdstruct->layer_num, /* key_format */ dvdstruct->format, /* agid */ dvdstruct->agid, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); if (error != 0) goto bailout; switch(dvdstruct->format) { case DVD_STRUCT_PHYSICAL: { struct scsi_read_dvd_struct_data_layer_desc *inlayer; struct dvd_layer *outlayer; struct scsi_read_dvd_struct_data_physical *phys_data; phys_data = (struct scsi_read_dvd_struct_data_physical *)databuf; inlayer = &phys_data->layer_desc; outlayer = (struct dvd_layer *)&dvdstruct->data; dvdstruct->length = sizeof(*inlayer); outlayer->book_type = (inlayer->book_type_version & RDSD_BOOK_TYPE_MASK) >> RDSD_BOOK_TYPE_SHIFT; outlayer->book_version = (inlayer->book_type_version & RDSD_BOOK_VERSION_MASK); outlayer->disc_size = (inlayer->disc_size_max_rate & RDSD_DISC_SIZE_MASK) >> RDSD_DISC_SIZE_SHIFT; outlayer->max_rate = (inlayer->disc_size_max_rate & RDSD_MAX_RATE_MASK); outlayer->nlayers = (inlayer->layer_info & RDSD_NUM_LAYERS_MASK) >> RDSD_NUM_LAYERS_SHIFT; outlayer->track_path = (inlayer->layer_info & RDSD_TRACK_PATH_MASK) >> RDSD_TRACK_PATH_SHIFT; outlayer->layer_type = (inlayer->layer_info & RDSD_LAYER_TYPE_MASK); outlayer->linear_density = (inlayer->density & RDSD_LIN_DENSITY_MASK) >> RDSD_LIN_DENSITY_SHIFT; outlayer->track_density = (inlayer->density & RDSD_TRACK_DENSITY_MASK); outlayer->bca = (inlayer->bca & RDSD_BCA_MASK) >> RDSD_BCA_SHIFT; outlayer->start_sector = scsi_3btoul(inlayer->main_data_start); outlayer->end_sector = scsi_3btoul(inlayer->main_data_end); outlayer->end_sector_l0 = scsi_3btoul(inlayer->end_sector_layer0); break; } case DVD_STRUCT_COPYRIGHT: { struct scsi_read_dvd_struct_data_copyright *copy_data; copy_data = (struct scsi_read_dvd_struct_data_copyright *) databuf; dvdstruct->cpst = copy_data->cps_type; dvdstruct->rmi = copy_data->region_info; dvdstruct->length = 0; break; } default: /* * Tell the user what the overall length is, no matter * what we can actually fit in the data buffer. */ dvdstruct->length = length - ccb->csio.resid - sizeof(struct scsi_read_dvd_struct_data_header); /* * But only actually copy out the smaller of what we read * in or what the structure can take. */ bcopy(databuf + sizeof(struct scsi_read_dvd_struct_data_header), dvdstruct->data, min(sizeof(dvdstruct->data), dvdstruct->length)); break; } bailout: xpt_release_ccb(ccb); cam_periph_unlock(periph); if (databuf != NULL) free(databuf, M_DEVBUF); return(error); } void scsi_report_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t lba, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_report_key *scsi_cmd; scsi_cmd = (struct scsi_report_key *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_KEY; scsi_ulto4b(lba, scsi_cmd->lba); scsi_ulto2b(dxfer_len, scsi_cmd->alloc_len); scsi_cmd->agid_keyformat = (agid << RK_KF_AGID_SHIFT) | (key_format & RK_KF_KEYFORMAT_MASK); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len == 0) ? CAM_DIR_NONE : CAM_DIR_IN, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_key *scsi_cmd; scsi_cmd = (struct scsi_send_key *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_KEY; scsi_ulto2b(dxfer_len, scsi_cmd->param_len); scsi_cmd->agid_keyformat = (agid << RK_KF_AGID_SHIFT) | (key_format & RK_KF_KEYFORMAT_MASK); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_OUT, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_dvd_structure(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t address, u_int8_t layer_number, u_int8_t format, u_int8_t agid, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_dvd_structure *scsi_cmd; scsi_cmd = (struct scsi_read_dvd_structure *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_DVD_STRUCTURE; scsi_ulto4b(address, scsi_cmd->address); scsi_cmd->layer_number = layer_number; scsi_cmd->format = format; scsi_ulto2b(dxfer_len, scsi_cmd->alloc_len); /* The AGID is the top two bits of this byte */ scsi_cmd->agid = agid << 6; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_toc(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t byte1_flags, uint8_t format, uint8_t track, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_read_toc *scsi_cmd; scsi_cmd = (struct scsi_read_toc *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = READ_TOC; /* * The structure is counting from 1, the function counting from 0. * The spec counts from 0. In MMC-6, there is only one flag, the * MSF flag. But we put the whole byte in for a bit a future-proofing. */ scsi_cmd->byte2 = byte1_flags; scsi_cmd->format = format; scsi_cmd->from_track = track; scsi_ulto2b(dxfer_len, scsi_cmd->data_len); cam_fill_csio(csio, /* retries */ retries, /* cbfcnp */ cbfcnp, /* flags */ CAM_DIR_IN, /* tag_action */ tag_action, /* data_ptr */ data_ptr, /* dxfer_len */ dxfer_len, /* sense_len */ sense_len, sizeof(*scsi_cmd), /* timeout */ timeout); } diff --git a/sys/cam/scsi/scsi_ch.c b/sys/cam/scsi/scsi_ch.c index d440526a7913..bccca0e87726 100644 --- a/sys/cam/scsi/scsi_ch.c +++ b/sys/cam/scsi/scsi_ch.c @@ -1,1929 +1,1924 @@ /*- * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND BSD-4-Clause) * * Copyright (c) 1997 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 1996, 1997 Jason R. Thorpe * All rights reserved. * * Partially based on an autochanger driver written by Stefan Grefen * and on an autochanger driver written by the Systems Programming Group * at the University of Utah Computer Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgements: * This product includes software developed by Jason R. Thorpe * for And Communications, http://www.and.com/ * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $NetBSD: ch.c,v 1.34 1998/08/31 22:28:06 cgd Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Timeout definitions for various changer related commands. They may * be too short for some devices (especially the timeout for INITIALIZE * ELEMENT STATUS). */ static const u_int32_t CH_TIMEOUT_MODE_SENSE = 6000; static const u_int32_t CH_TIMEOUT_MOVE_MEDIUM = 15 * 60 * 1000; static const u_int32_t CH_TIMEOUT_EXCHANGE_MEDIUM = 15 * 60 * 1000; static const u_int32_t CH_TIMEOUT_POSITION_TO_ELEMENT = 15 * 60 * 1000; static const u_int32_t CH_TIMEOUT_READ_ELEMENT_STATUS = 5 * 60 * 1000; static const u_int32_t CH_TIMEOUT_SEND_VOLTAG = 10000; static const u_int32_t CH_TIMEOUT_INITIALIZE_ELEMENT_STATUS = 500000; typedef enum { CH_FLAG_INVALID = 0x001 } ch_flags; typedef enum { CH_STATE_PROBE, CH_STATE_NORMAL } ch_state; typedef enum { CH_CCB_PROBE } ch_ccb_types; typedef enum { CH_Q_NONE = 0x00, CH_Q_NO_DBD = 0x01, CH_Q_NO_DVCID = 0x02 } ch_quirks; #define CH_Q_BIT_STRING \ "\020" \ "\001NO_DBD" \ "\002NO_DVCID" #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct scsi_mode_sense_data { struct scsi_mode_header_6 header; struct scsi_mode_blk_desc blk_desc; union { struct page_element_address_assignment ea; struct page_transport_geometry_parameters tg; struct page_device_capabilities cap; } pages; }; struct ch_softc { ch_flags flags; ch_state state; ch_quirks quirks; struct devstat *device_stats; struct cdev *dev; int open_count; int sc_picker; /* current picker */ /* * The following information is obtained from the * element address assignment page. */ int sc_firsts[CHET_MAX + 1]; /* firsts */ int sc_counts[CHET_MAX + 1]; /* counts */ /* * The following mask defines the legal combinations * of elements for the MOVE MEDIUM command. */ u_int8_t sc_movemask[CHET_MAX + 1]; /* * As above, but for EXCHANGE MEDIUM. */ u_int8_t sc_exchangemask[CHET_MAX + 1]; /* * Quirks; see below. XXX KDM not implemented yet */ int sc_settledelay; /* delay for settle */ }; static d_open_t chopen; static d_close_t chclose; static d_ioctl_t chioctl; static periph_init_t chinit; static periph_ctor_t chregister; static periph_oninv_t choninvalidate; static periph_dtor_t chcleanup; static periph_start_t chstart; static void chasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void chdone(struct cam_periph *periph, union ccb *done_ccb); static int cherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int chmove(struct cam_periph *periph, struct changer_move *cm); static int chexchange(struct cam_periph *periph, struct changer_exchange *ce); static int chposition(struct cam_periph *periph, struct changer_position *cp); static int chgetelemstatus(struct cam_periph *periph, int scsi_version, u_long cmd, struct changer_element_status_request *csr); static int chsetvoltag(struct cam_periph *periph, struct changer_set_voltag_request *csvr); static int chielem(struct cam_periph *periph, unsigned int timeout); static int chgetparams(struct cam_periph *periph); static int chscsiversion(struct cam_periph *periph); static struct periph_driver chdriver = { chinit, "ch", TAILQ_HEAD_INITIALIZER(chdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(ch, chdriver); static struct cdevsw ch_cdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_open = chopen, .d_close = chclose, .d_ioctl = chioctl, .d_name = "ch", }; static MALLOC_DEFINE(M_SCSICH, "scsi_ch", "scsi_ch buffers"); static void chinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, chasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ch: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void chdevgonecb(void *arg) { struct ch_softc *softc; struct cam_periph *periph; struct mtx *mtx; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct ch_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for the device node, it is gone now. */ cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void choninvalidate(struct cam_periph *periph) { struct ch_softc *softc; softc = (struct ch_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, chasync, periph, periph->path); softc->flags |= CH_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, chdevgonecb, periph); } static void chcleanup(struct cam_periph *periph) { struct ch_softc *softc; softc = (struct ch_softc *)periph->softc; devstat_remove_entry(softc->device_stats); free(softc, M_DEVBUF); } static void chasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch(code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data)!= T_CHANGER) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(chregister, choninvalidate, chcleanup, chstart, "ch", CAM_PERIPH_BIO, path, chasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("chasync: Unable to probe new device " "due to status 0x%x\n", status); break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status chregister(struct cam_periph *periph, void *arg) { struct ch_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; int error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("chregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct ch_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); if (softc == NULL) { printf("chregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); softc->state = CH_STATE_PROBE; periph->softc = softc; softc->quirks = CH_Q_NONE; /* * The DVCID and CURDATA bits were not introduced until the SMC * spec. If this device claims SCSI-2 or earlier support, then it * very likely does not support these bits. */ if (cgd->inq_data.version <= SCSI_REV_2) softc->quirks |= CH_Q_NO_DVCID; xpt_path_inq(&cpi, periph->path); /* * Changers don't have a blocksize, and obviously don't support * tagged queueing. */ cam_periph_unlock(periph); softc->device_stats = devstat_new_entry("ch", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | DEVSTAT_NO_ORDERED_TAGS, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_OTHER); /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* Register the device */ make_dev_args_init(&args); args.mda_devsw = &ch_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, periph->unit_number); cam_periph_lock(periph); if (error != 0) { cam_periph_release_locked(periph); return (CAM_REQ_CMP_ERR); } /* * Add an async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_LOST_DEVICE, chasync, periph, periph->path); /* * Lock this periph until we are setup. * This first call can't block */ (void)cam_periph_hold(periph, PRIBIO); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int chopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct ch_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != 0) return (ENXIO); softc = (struct ch_softc *)periph->softc; cam_periph_lock(periph); if (softc->flags & CH_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } /* * Load information about this changer device into the softc. */ if ((error = chgetparams(periph)) != 0) { cam_periph_unhold(periph); cam_periph_release_locked(periph); cam_periph_unlock(periph); return(error); } cam_periph_unhold(periph); softc->open_count++; cam_periph_unlock(periph); return(error); } static int chclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct ch_softc *softc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct ch_softc *)periph->softc; softc->open_count--; cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return(0); } static void chstart(struct cam_periph *periph, union ccb *start_ccb) { struct ch_softc *softc; softc = (struct ch_softc *)periph->softc; switch (softc->state) { case CH_STATE_NORMAL: { xpt_release_ccb(start_ccb); break; } case CH_STATE_PROBE: { int mode_buffer_len; void *mode_buffer; /* * Include the block descriptor when calculating the mode * buffer length, */ mode_buffer_len = sizeof(struct scsi_mode_header_6) + sizeof(struct scsi_mode_blk_desc) + sizeof(struct page_element_address_assignment); mode_buffer = malloc(mode_buffer_len, M_SCSICH, M_NOWAIT); if (mode_buffer == NULL) { printf("chstart: couldn't malloc mode sense data\n"); break; } bzero(mode_buffer, mode_buffer_len); /* * Get the element address assignment page. */ scsi_mode_sense(&start_ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ (softc->quirks & CH_Q_NO_DBD) ? FALSE : TRUE, /* pc */ SMS_PAGE_CTRL_CURRENT, /* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE, /* param_buf */ (u_int8_t *)mode_buffer, /* param_len */ mode_buffer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MODE_SENSE); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CH_CCB_PROBE; xpt_action(start_ccb); break; } } } static void chdone(struct cam_periph *periph, union ccb *done_ccb) { struct ch_softc *softc; struct ccb_scsiio *csio; softc = (struct ch_softc *)periph->softc; csio = &done_ccb->csio; switch(done_ccb->ccb_h.ccb_state) { case CH_CCB_PROBE: { struct scsi_mode_header_6 *mode_header; struct page_element_address_assignment *ea; char announce_buf[80]; mode_header = (struct scsi_mode_header_6 *)csio->data_ptr; ea = (struct page_element_address_assignment *) find_mode_page_6(mode_header); if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP){ softc->sc_firsts[CHET_MT] = scsi_2btoul(ea->mtea); softc->sc_counts[CHET_MT] = scsi_2btoul(ea->nmte); softc->sc_firsts[CHET_ST] = scsi_2btoul(ea->fsea); softc->sc_counts[CHET_ST] = scsi_2btoul(ea->nse); softc->sc_firsts[CHET_IE] = scsi_2btoul(ea->fieea); softc->sc_counts[CHET_IE] = scsi_2btoul(ea->niee); softc->sc_firsts[CHET_DT] = scsi_2btoul(ea->fdtea); softc->sc_counts[CHET_DT] = scsi_2btoul(ea->ndte); softc->sc_picker = softc->sc_firsts[CHET_MT]; #define PLURAL(c) (c) == 1 ? "" : "s" snprintf(announce_buf, sizeof(announce_buf), "%d slot%s, %d drive%s, " "%d picker%s, %d portal%s", softc->sc_counts[CHET_ST], PLURAL(softc->sc_counts[CHET_ST]), softc->sc_counts[CHET_DT], PLURAL(softc->sc_counts[CHET_DT]), softc->sc_counts[CHET_MT], PLURAL(softc->sc_counts[CHET_MT]), softc->sc_counts[CHET_IE], PLURAL(softc->sc_counts[CHET_IE])); #undef PLURAL if (announce_buf[0] != '\0') { xpt_announce_periph(periph, announce_buf); xpt_announce_quirks(periph, softc->quirks, CH_Q_BIT_STRING); } } else { int error; error = cherror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ return; } else if (error != 0) { struct scsi_mode_sense_6 *sms; int frozen, retry_scheduled; sms = (struct scsi_mode_sense_6 *) done_ccb->csio.cdb_io.cdb_bytes; frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; /* * Check to see if block descriptors were * disabled. Some devices don't like that. * We're taking advantage of the fact that * the first few bytes of the 6 and 10 byte * mode sense commands are the same. If * block descriptors were disabled, enable * them and re-send the command. */ if ((sms->byte2 & SMS_DBD) != 0 && (periph->flags & CAM_PERIPH_INVALID) == 0) { sms->byte2 &= ~SMS_DBD; xpt_action(done_ccb); softc->quirks |= CH_Q_NO_DBD; retry_scheduled = 1; } else retry_scheduled = 0; /* Don't wedge this device's queue */ if (frozen) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (retry_scheduled) return; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) scsi_sense_print(&done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, failed " "to attach to device\n"); cam_periph_invalidate(periph); } } softc->state = CH_STATE_NORMAL; free(mode_header, M_SCSICH); /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); cam_periph_unhold(periph); return; } default: break; } xpt_release_ccb(done_ccb); } static int cherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { - struct ch_softc *softc; - struct cam_periph *periph; - - periph = xpt_path_periph(ccb->ccb_h.path); - softc = (struct ch_softc *)periph->softc; return (cam_periph_error(ccb, cam_flags, sense_flags)); } static int chioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct ch_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering chioctl\n")); softc = (struct ch_softc *)periph->softc; error = 0; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("trying to do ioctl %#lx\n", cmd)); /* * If this command can change the device's state, we must * have the device open for writing. */ switch (cmd) { case CHIOGPICKER: case CHIOGPARAMS: case OCHIOGSTATUS: case CHIOGSTATUS: break; default: if ((flag & FWRITE) == 0) { cam_periph_unlock(periph); return (EBADF); } } switch (cmd) { case CHIOMOVE: error = chmove(periph, (struct changer_move *)addr); break; case CHIOEXCHANGE: error = chexchange(periph, (struct changer_exchange *)addr); break; case CHIOPOSITION: error = chposition(periph, (struct changer_position *)addr); break; case CHIOGPICKER: *(int *)addr = softc->sc_picker - softc->sc_firsts[CHET_MT]; break; case CHIOSPICKER: { int new_picker = *(int *)addr; if (new_picker > (softc->sc_counts[CHET_MT] - 1)) { error = EINVAL; break; } softc->sc_picker = softc->sc_firsts[CHET_MT] + new_picker; break; } case CHIOGPARAMS: { struct changer_params *cp = (struct changer_params *)addr; cp->cp_npickers = softc->sc_counts[CHET_MT]; cp->cp_nslots = softc->sc_counts[CHET_ST]; cp->cp_nportals = softc->sc_counts[CHET_IE]; cp->cp_ndrives = softc->sc_counts[CHET_DT]; break; } case CHIOIELEM: error = chielem(periph, *(unsigned int *)addr); break; case OCHIOGSTATUS: { error = chgetelemstatus(periph, SCSI_REV_2, cmd, (struct changer_element_status_request *)addr); break; } case CHIOGSTATUS: { int scsi_version; scsi_version = chscsiversion(periph); if (scsi_version >= SCSI_REV_0) { error = chgetelemstatus(periph, scsi_version, cmd, (struct changer_element_status_request *)addr); } else { /* unable to determine the SCSI version */ cam_periph_unlock(periph); return (ENXIO); } break; } case CHIOSETVOLTAG: { error = chsetvoltag(periph, (struct changer_set_voltag_request *) addr); break; } /* Implement prevent/allow? */ default: error = cam_periph_ioctl(periph, cmd, addr, cherror); break; } cam_periph_unlock(periph); return (error); } static int chmove(struct cam_periph *periph, struct changer_move *cm) { struct ch_softc *softc; u_int16_t fromelem, toelem; union ccb *ccb; int error; error = 0; softc = (struct ch_softc *)periph->softc; /* * Check arguments. */ if ((cm->cm_fromtype > CHET_DT) || (cm->cm_totype > CHET_DT)) return (EINVAL); if ((cm->cm_fromunit > (softc->sc_counts[cm->cm_fromtype] - 1)) || (cm->cm_tounit > (softc->sc_counts[cm->cm_totype] - 1))) return (ENODEV); /* * Check the request against the changer's capabilities. */ if ((softc->sc_movemask[cm->cm_fromtype] & (1 << cm->cm_totype)) == 0) return (ENODEV); /* * Calculate the source and destination elements. */ fromelem = softc->sc_firsts[cm->cm_fromtype] + cm->cm_fromunit; toelem = softc->sc_firsts[cm->cm_totype] + cm->cm_tounit; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_move_medium(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* tea */ softc->sc_picker, /* src */ fromelem, /* dst */ toelem, /* invert */ (cm->cm_flags & CM_INVERT) ? TRUE : FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MOVE_MEDIUM); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } static int chexchange(struct cam_periph *periph, struct changer_exchange *ce) { struct ch_softc *softc; u_int16_t src, dst1, dst2; union ccb *ccb; int error; error = 0; softc = (struct ch_softc *)periph->softc; /* * Check arguments. */ if ((ce->ce_srctype > CHET_DT) || (ce->ce_fdsttype > CHET_DT) || (ce->ce_sdsttype > CHET_DT)) return (EINVAL); if ((ce->ce_srcunit > (softc->sc_counts[ce->ce_srctype] - 1)) || (ce->ce_fdstunit > (softc->sc_counts[ce->ce_fdsttype] - 1)) || (ce->ce_sdstunit > (softc->sc_counts[ce->ce_sdsttype] - 1))) return (ENODEV); /* * Check the request against the changer's capabilities. */ if (((softc->sc_exchangemask[ce->ce_srctype] & (1 << ce->ce_fdsttype)) == 0) || ((softc->sc_exchangemask[ce->ce_fdsttype] & (1 << ce->ce_sdsttype)) == 0)) return (ENODEV); /* * Calculate the source and destination elements. */ src = softc->sc_firsts[ce->ce_srctype] + ce->ce_srcunit; dst1 = softc->sc_firsts[ce->ce_fdsttype] + ce->ce_fdstunit; dst2 = softc->sc_firsts[ce->ce_sdsttype] + ce->ce_sdstunit; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_exchange_medium(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* tea */ softc->sc_picker, /* src */ src, /* dst1 */ dst1, /* dst2 */ dst2, /* invert1 */ (ce->ce_flags & CE_INVERT1) ? TRUE : FALSE, /* invert2 */ (ce->ce_flags & CE_INVERT2) ? TRUE : FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_EXCHANGE_MEDIUM); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } static int chposition(struct cam_periph *periph, struct changer_position *cp) { struct ch_softc *softc; u_int16_t dst; union ccb *ccb; int error; error = 0; softc = (struct ch_softc *)periph->softc; /* * Check arguments. */ if (cp->cp_type > CHET_DT) return (EINVAL); if (cp->cp_unit > (softc->sc_counts[cp->cp_type] - 1)) return (ENODEV); /* * Calculate the destination element. */ dst = softc->sc_firsts[cp->cp_type] + cp->cp_unit; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_position_to_element(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* tea */ softc->sc_picker, /* dst */ dst, /* invert */ (cp->cp_flags & CP_INVERT) ? TRUE : FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_POSITION_TO_ELEMENT); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } /* * Copy a volume tag to a volume_tag struct, converting SCSI byte order * to host native byte order in the volume serial number. The volume * label as returned by the changer is transferred to user mode as * nul-terminated string. Volume labels are truncated at the first * space, as suggested by SCSI-2. */ static void copy_voltag(struct changer_voltag *uvoltag, struct volume_tag *voltag) { int i; for (i=0; ivif[i]; if (c && c != ' ') uvoltag->cv_volid[i] = c; else break; } uvoltag->cv_serial = scsi_2btoul(voltag->vsn); } /* * Copy an element status descriptor to a user-mode * changer_element_status structure. */ static void copy_element_status(struct ch_softc *softc, u_int16_t flags, struct read_element_status_descriptor *desc, struct changer_element_status *ces, int scsi_version) { u_int16_t eaddr = scsi_2btoul(desc->eaddr); u_int16_t et; struct volume_tag *pvol_tag = NULL, *avol_tag = NULL; struct read_element_status_device_id *devid = NULL; ces->ces_int_addr = eaddr; /* set up logical address in element status */ for (et = CHET_MT; et <= CHET_DT; et++) { if ((softc->sc_firsts[et] <= eaddr) && ((softc->sc_firsts[et] + softc->sc_counts[et]) > eaddr)) { ces->ces_addr = eaddr - softc->sc_firsts[et]; ces->ces_type = et; break; } } ces->ces_flags = desc->flags1; ces->ces_sensecode = desc->sense_code; ces->ces_sensequal = desc->sense_qual; if (desc->flags2 & READ_ELEMENT_STATUS_INVERT) ces->ces_flags |= CES_INVERT; if (desc->flags2 & READ_ELEMENT_STATUS_SVALID) { eaddr = scsi_2btoul(desc->ssea); /* convert source address to logical format */ for (et = CHET_MT; et <= CHET_DT; et++) { if ((softc->sc_firsts[et] <= eaddr) && ((softc->sc_firsts[et] + softc->sc_counts[et]) > eaddr)) { ces->ces_source_addr = eaddr - softc->sc_firsts[et]; ces->ces_source_type = et; ces->ces_flags |= CES_SOURCE_VALID; break; } } if (!(ces->ces_flags & CES_SOURCE_VALID)) printf("ch: warning: could not map element source " "address %ud to a valid element type\n", eaddr); } /* * pvoltag and avoltag are common between SCSI-2 and later versions */ if (flags & READ_ELEMENT_STATUS_PVOLTAG) pvol_tag = &desc->voltag_devid.pvoltag; if (flags & READ_ELEMENT_STATUS_AVOLTAG) avol_tag = (flags & READ_ELEMENT_STATUS_PVOLTAG) ? &desc->voltag_devid.voltag[1] :&desc->voltag_devid.pvoltag; /* * For SCSI-3 and later, element status can carry designator and * other information. */ if (scsi_version >= SCSI_REV_SPC) { if ((flags & READ_ELEMENT_STATUS_PVOLTAG) ^ (flags & READ_ELEMENT_STATUS_AVOLTAG)) devid = &desc->voltag_devid.pvol_and_devid.devid; else if (!(flags & READ_ELEMENT_STATUS_PVOLTAG) && !(flags & READ_ELEMENT_STATUS_AVOLTAG)) devid = &desc->voltag_devid.devid; else /* Have both PVOLTAG and AVOLTAG */ devid = &desc->voltag_devid.vol_tags_and_devid.devid; } if (pvol_tag) copy_voltag(&(ces->ces_pvoltag), pvol_tag); if (avol_tag) copy_voltag(&(ces->ces_pvoltag), avol_tag); if (devid != NULL) { if (devid->designator_length > 0) { bcopy((void *)devid->designator, (void *)ces->ces_designator, devid->designator_length); ces->ces_designator_length = devid->designator_length; /* * Make sure we are always NUL terminated. The * This won't matter for the binary code set, * since the user will only pay attention to the * length field. */ ces->ces_designator[devid->designator_length]= '\0'; } if (devid->piv_assoc_designator_type & READ_ELEMENT_STATUS_PIV_SET) { ces->ces_flags |= CES_PIV; ces->ces_protocol_id = READ_ELEMENT_STATUS_PROTOCOL_ID( devid->prot_code_set); } ces->ces_code_set = READ_ELEMENT_STATUS_CODE_SET(devid->prot_code_set); ces->ces_assoc = READ_ELEMENT_STATUS_ASSOCIATION( devid->piv_assoc_designator_type); ces->ces_designator_type = READ_ELEMENT_STATUS_DESIGNATOR_TYPE( devid->piv_assoc_designator_type); } else if (scsi_version > SCSI_REV_2) { /* SCSI-SPC and No devid, no designator */ ces->ces_designator_length = 0; ces->ces_designator[0] = '\0'; ces->ces_protocol_id = CES_PROTOCOL_ID_FCP_4; } if (scsi_version <= SCSI_REV_2) { if (desc->dt_or_obsolete.scsi_2.dt_scsi_flags & READ_ELEMENT_STATUS_DT_IDVALID) { ces->ces_flags |= CES_SCSIID_VALID; ces->ces_scsi_id = desc->dt_or_obsolete.scsi_2.dt_scsi_addr; } if (desc->dt_or_obsolete.scsi_2.dt_scsi_addr & READ_ELEMENT_STATUS_DT_LUVALID) { ces->ces_flags |= CES_LUN_VALID; ces->ces_scsi_lun = desc->dt_or_obsolete.scsi_2.dt_scsi_flags & READ_ELEMENT_STATUS_DT_LUNMASK; } } } static int chgetelemstatus(struct cam_periph *periph, int scsi_version, u_long cmd, struct changer_element_status_request *cesr) { struct read_element_status_header *st_hdr; struct read_element_status_page_header *pg_hdr; struct read_element_status_descriptor *desc; caddr_t data = NULL; size_t size, desclen; u_int avail, i; int curdata, dvcid, sense_flags; int try_no_dvcid = 0; struct changer_element_status *user_data = NULL; struct ch_softc *softc; union ccb *ccb; int chet = cesr->cesr_element_type; int error = 0; int want_voltags = (cesr->cesr_flags & CESR_VOLTAGS) ? 1 : 0; softc = (struct ch_softc *)periph->softc; /* perform argument checking */ /* * Perform a range check on the cesr_element_{base,count} * request argument fields. */ if ((softc->sc_counts[chet] - cesr->cesr_element_base) <= 0 || (cesr->cesr_element_base + cesr->cesr_element_count) > softc->sc_counts[chet]) return (EINVAL); /* * Request one descriptor for the given element type. This * is used to determine the size of the descriptor so that * we can allocate enough storage for all of them. We assume * that the first one can fit into 1k. */ cam_periph_unlock(periph); data = (caddr_t)malloc(1024, M_DEVBUF, M_WAITOK); cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); sense_flags = SF_RETRY_UA; if (softc->quirks & CH_Q_NO_DVCID) { dvcid = 0; curdata = 0; } else { dvcid = 1; curdata = 1; /* * Don't print anything for an Illegal Request, because * these flags can cause some changers to complain. We'll * retry without them if we get an error. */ sense_flags |= SF_QUIET_IR; } retry_einval: scsi_read_element_status(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* voltag */ want_voltags, /* sea */ softc->sc_firsts[chet], /* curdata */ curdata, /* dvcid */ dvcid, /* count */ 1, /* data_ptr */ data, /* dxfer_len */ 1024, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_READ_ELEMENT_STATUS); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ sense_flags, softc->device_stats); /* * An Illegal Request sense key (only used if there is no asc/ascq) * or 0x24,0x00 for an ASC/ASCQ both map to EINVAL. If dvcid or * curdata are set (we set both or neither), try turning them off * and see if the command is successful. */ if ((error == EINVAL) && (dvcid || curdata)) { dvcid = 0; curdata = 0; error = 0; /* At this point we want to report any Illegal Request */ sense_flags &= ~SF_QUIET_IR; try_no_dvcid = 1; goto retry_einval; } /* * In this case, we tried a read element status with dvcid and * curdata set, and it failed. We retried without those bits, and * it succeeded. Suggest to the user that he set a quirk, so we * don't go through the retry process the first time in the future. * This should only happen on changers that claim SCSI-3 or higher, * but don't support these bits. */ if ((try_no_dvcid != 0) && (error == 0)) softc->quirks |= CH_Q_NO_DVCID; if (error) goto done; cam_periph_unlock(periph); st_hdr = (struct read_element_status_header *)data; pg_hdr = (struct read_element_status_page_header *)((uintptr_t)st_hdr + sizeof(struct read_element_status_header)); desclen = scsi_2btoul(pg_hdr->edl); size = sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header) + (desclen * cesr->cesr_element_count); /* * Reallocate storage for descriptors and get them from the * device. */ free(data, M_DEVBUF); data = (caddr_t)malloc(size, M_DEVBUF, M_WAITOK); cam_periph_lock(periph); scsi_read_element_status(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* voltag */ want_voltags, /* sea */ softc->sc_firsts[chet] + cesr->cesr_element_base, /* curdata */ curdata, /* dvcid */ dvcid, /* count */ cesr->cesr_element_count, /* data_ptr */ data, /* dxfer_len */ size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_READ_ELEMENT_STATUS); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); if (error) goto done; cam_periph_unlock(periph); /* * Fill in the user status array. */ st_hdr = (struct read_element_status_header *)data; pg_hdr = (struct read_element_status_page_header *)((uintptr_t)st_hdr + sizeof(struct read_element_status_header)); avail = scsi_2btoul(st_hdr->count); if (avail != cesr->cesr_element_count) { xpt_print(periph->path, "warning, READ ELEMENT STATUS avail != count\n"); } user_data = (struct changer_element_status *) malloc(avail * sizeof(struct changer_element_status), M_DEVBUF, M_WAITOK | M_ZERO); desc = (struct read_element_status_descriptor *)((uintptr_t)data + sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header)); /* * Set up the individual element status structures */ for (i = 0; i < avail; ++i) { struct changer_element_status *ces; /* * In the changer_element_status structure, fields from * the beginning to the field of ces_scsi_lun are common * between SCSI-2 and SCSI-3, while all the rest are new * from SCSI-3. In order to maintain backward compatibility * of the chio command, the ces pointer, below, is computed * such that it lines up with the structure boundary * corresponding to the SCSI version. */ ces = cmd == OCHIOGSTATUS ? (struct changer_element_status *) ((unsigned char *)user_data + i * (offsetof(struct changer_element_status,ces_scsi_lun)+1)): &user_data[i]; copy_element_status(softc, pg_hdr->flags, desc, ces, scsi_version); desc = (struct read_element_status_descriptor *) ((unsigned char *)desc + desclen); } /* Copy element status structures out to userspace. */ if (cmd == OCHIOGSTATUS) error = copyout(user_data, cesr->cesr_element_status, avail* (offsetof(struct changer_element_status, ces_scsi_lun) + 1)); else error = copyout(user_data, cesr->cesr_element_status, avail * sizeof(struct changer_element_status)); cam_periph_lock(periph); done: xpt_release_ccb(ccb); if (data != NULL) free(data, M_DEVBUF); if (user_data != NULL) free(user_data, M_DEVBUF); return (error); } static int chielem(struct cam_periph *periph, unsigned int timeout) { union ccb *ccb; struct ch_softc *softc; int error; if (!timeout) { timeout = CH_TIMEOUT_INITIALIZE_ELEMENT_STATUS; } else { timeout *= 1000; } error = 0; softc = (struct ch_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_initialize_element_status(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } static int chsetvoltag(struct cam_periph *periph, struct changer_set_voltag_request *csvr) { union ccb *ccb; struct ch_softc *softc; u_int16_t ea; u_int8_t sac; struct scsi_send_volume_tag_parameters ssvtp; int error; int i; error = 0; softc = (struct ch_softc *)periph->softc; bzero(&ssvtp, sizeof(ssvtp)); for (i=0; icsvr_type > CHET_DT) return EINVAL; if (csvr->csvr_addr > (softc->sc_counts[csvr->csvr_type] - 1)) return ENODEV; ea = softc->sc_firsts[csvr->csvr_type] + csvr->csvr_addr; if (csvr->csvr_flags & CSVR_ALTERNATE) { switch (csvr->csvr_flags & CSVR_MODE_MASK) { case CSVR_MODE_SET: sac = SEND_VOLUME_TAG_ASSERT_ALTERNATE; break; case CSVR_MODE_REPLACE: sac = SEND_VOLUME_TAG_REPLACE_ALTERNATE; break; case CSVR_MODE_CLEAR: sac = SEND_VOLUME_TAG_UNDEFINED_ALTERNATE; break; default: error = EINVAL; goto out; } } else { switch (csvr->csvr_flags & CSVR_MODE_MASK) { case CSVR_MODE_SET: sac = SEND_VOLUME_TAG_ASSERT_PRIMARY; break; case CSVR_MODE_REPLACE: sac = SEND_VOLUME_TAG_REPLACE_PRIMARY; break; case CSVR_MODE_CLEAR: sac = SEND_VOLUME_TAG_UNDEFINED_PRIMARY; break; default: error = EINVAL; goto out; } } memcpy(ssvtp.vitf, csvr->csvr_voltag.cv_volid, min(strlen(csvr->csvr_voltag.cv_volid), sizeof(ssvtp.vitf))); scsi_ulto2b(csvr->csvr_voltag.cv_serial, ssvtp.minvsn); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_send_volume_tag(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* element_address */ ea, /* send_action_code */ sac, /* parameters */ &ssvtp, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_SEND_VOLTAG); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); out: return error; } static int chgetparams(struct cam_periph *periph) { union ccb *ccb; struct ch_softc *softc; void *mode_buffer; int mode_buffer_len; struct page_element_address_assignment *ea; struct page_device_capabilities *cap; int error, from, dbd; u_int8_t *moves, *exchanges; error = 0; softc = (struct ch_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); /* * The scsi_mode_sense_data structure is just a convenience * structure that allows us to easily calculate the worst-case * storage size of the mode sense buffer. */ mode_buffer_len = sizeof(struct scsi_mode_sense_data); mode_buffer = malloc(mode_buffer_len, M_SCSICH, M_NOWAIT); if (mode_buffer == NULL) { printf("chgetparams: couldn't malloc mode sense data\n"); xpt_release_ccb(ccb); return(ENOSPC); } bzero(mode_buffer, mode_buffer_len); if (softc->quirks & CH_Q_NO_DBD) dbd = FALSE; else dbd = TRUE; /* * Get the element address assignment page. */ scsi_mode_sense(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ dbd, /* pc */ SMS_PAGE_CTRL_CURRENT, /* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE, /* param_buf */ (u_int8_t *)mode_buffer, /* param_len */ mode_buffer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MODE_SENSE); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /* sense_flags */ SF_RETRY_UA|SF_NO_PRINT, softc->device_stats); if (error) { if (dbd) { struct scsi_mode_sense_6 *sms; sms = (struct scsi_mode_sense_6 *) ccb->csio.cdb_io.cdb_bytes; sms->byte2 &= ~SMS_DBD; error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); } else { /* * Since we disabled sense printing above, print * out the sense here since we got an error. */ scsi_sense_print(&ccb->csio); } if (error) { xpt_print(periph->path, "chgetparams: error getting element " "address page\n"); xpt_release_ccb(ccb); free(mode_buffer, M_SCSICH); return(error); } } ea = (struct page_element_address_assignment *) find_mode_page_6((struct scsi_mode_header_6 *)mode_buffer); softc->sc_firsts[CHET_MT] = scsi_2btoul(ea->mtea); softc->sc_counts[CHET_MT] = scsi_2btoul(ea->nmte); softc->sc_firsts[CHET_ST] = scsi_2btoul(ea->fsea); softc->sc_counts[CHET_ST] = scsi_2btoul(ea->nse); softc->sc_firsts[CHET_IE] = scsi_2btoul(ea->fieea); softc->sc_counts[CHET_IE] = scsi_2btoul(ea->niee); softc->sc_firsts[CHET_DT] = scsi_2btoul(ea->fdtea); softc->sc_counts[CHET_DT] = scsi_2btoul(ea->ndte); bzero(mode_buffer, mode_buffer_len); /* * Now get the device capabilities page. */ scsi_mode_sense(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ dbd, /* pc */ SMS_PAGE_CTRL_CURRENT, /* page */ CH_DEVICE_CAP_PAGE, /* param_buf */ (u_int8_t *)mode_buffer, /* param_len */ mode_buffer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MODE_SENSE); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /* sense_flags */ SF_RETRY_UA | SF_NO_PRINT, softc->device_stats); if (error) { if (dbd) { struct scsi_mode_sense_6 *sms; sms = (struct scsi_mode_sense_6 *) ccb->csio.cdb_io.cdb_bytes; sms->byte2 &= ~SMS_DBD; error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); } else { /* * Since we disabled sense printing above, print * out the sense here since we got an error. */ scsi_sense_print(&ccb->csio); } if (error) { xpt_print(periph->path, "chgetparams: error getting device " "capabilities page\n"); xpt_release_ccb(ccb); free(mode_buffer, M_SCSICH); return(error); } } xpt_release_ccb(ccb); cap = (struct page_device_capabilities *) find_mode_page_6((struct scsi_mode_header_6 *)mode_buffer); bzero(softc->sc_movemask, sizeof(softc->sc_movemask)); bzero(softc->sc_exchangemask, sizeof(softc->sc_exchangemask)); moves = cap->move_from; exchanges = cap->exchange_with; for (from = CHET_MT; from <= CHET_MAX; ++from) { softc->sc_movemask[from] = moves[from]; softc->sc_exchangemask[from] = exchanges[from]; } free(mode_buffer, M_SCSICH); return(error); } static int chscsiversion(struct cam_periph *periph) { struct scsi_inquiry_data *inq_data; struct ccb_getdev *cgd; int dev_scsi_version; cam_periph_assert(periph, MA_OWNED); if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) == NULL) return (-1); /* * Get the device information. */ xpt_setup_ccb(&cgd->ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); if (cgd->ccb_h.status != CAM_REQ_CMP) { xpt_free_ccb((union ccb *)cgd); return -1; } inq_data = &cgd->inq_data; dev_scsi_version = inq_data->version; xpt_free_ccb((union ccb *)cgd); return dev_scsi_version; } void scsi_move_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t src, u_int32_t dst, int invert, u_int8_t sense_len, u_int32_t timeout) { struct scsi_move_medium *scsi_cmd; scsi_cmd = (struct scsi_move_medium *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MOVE_MEDIUM; scsi_ulto2b(tea, scsi_cmd->tea); scsi_ulto2b(src, scsi_cmd->src); scsi_ulto2b(dst, scsi_cmd->dst); if (invert) scsi_cmd->invert |= MOVE_MEDIUM_INVERT; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_exchange_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t src, u_int32_t dst1, u_int32_t dst2, int invert1, int invert2, u_int8_t sense_len, u_int32_t timeout) { struct scsi_exchange_medium *scsi_cmd; scsi_cmd = (struct scsi_exchange_medium *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = EXCHANGE_MEDIUM; scsi_ulto2b(tea, scsi_cmd->tea); scsi_ulto2b(src, scsi_cmd->src); scsi_ulto2b(dst1, scsi_cmd->fdst); scsi_ulto2b(dst2, scsi_cmd->sdst); if (invert1) scsi_cmd->invert |= EXCHANGE_MEDIUM_INV1; if (invert2) scsi_cmd->invert |= EXCHANGE_MEDIUM_INV2; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_position_to_element(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t dst, int invert, u_int8_t sense_len, u_int32_t timeout) { struct scsi_position_to_element *scsi_cmd; scsi_cmd = (struct scsi_position_to_element *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = POSITION_TO_ELEMENT; scsi_ulto2b(tea, scsi_cmd->tea); scsi_ulto2b(dst, scsi_cmd->dst); if (invert) scsi_cmd->invert |= POSITION_TO_ELEMENT_INVERT; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_element_status(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int voltag, u_int32_t sea, int curdata, int dvcid, u_int32_t count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_element_status *scsi_cmd; scsi_cmd = (struct scsi_read_element_status *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_ELEMENT_STATUS; scsi_ulto2b(sea, scsi_cmd->sea); scsi_ulto2b(count, scsi_cmd->count); scsi_ulto3b(dxfer_len, scsi_cmd->len); if (dvcid) scsi_cmd->flags |= READ_ELEMENT_STATUS_DVCID; if (curdata) scsi_cmd->flags |= READ_ELEMENT_STATUS_CURDATA; if (voltag) scsi_cmd->byte2 |= READ_ELEMENT_STATUS_VOLTAG; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_initialize_element_status(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_initialize_element_status *scsi_cmd; scsi_cmd = (struct scsi_initialize_element_status *) &csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = INITIALIZE_ELEMENT_STATUS; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /* data_ptr */ NULL, /* dxfer_len */ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_volume_tag(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int16_t element_address, u_int8_t send_action_code, struct scsi_send_volume_tag_parameters *parameters, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_volume_tag *scsi_cmd; scsi_cmd = (struct scsi_send_volume_tag *) &csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_VOLUME_TAG; scsi_ulto2b(element_address, scsi_cmd->ea); scsi_cmd->sac = send_action_code; scsi_ulto2b(sizeof(*parameters), scsi_cmd->pll); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_OUT, tag_action, /* data_ptr */ (u_int8_t *) parameters, sizeof(*parameters), sense_len, sizeof(*scsi_cmd), timeout); } diff --git a/sys/cam/scsi/scsi_da.c b/sys/cam/scsi/scsi_da.c index 31aeb59d9438..83e14eca35ab 100644 --- a/sys/cam/scsi/scsi_da.c +++ b/sys/cam/scsi/scsi_da.c @@ -1,6659 +1,6653 @@ /*- * Implementation of SCSI Direct Access Peripheral driver for CAM. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include "opt_da.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #ifdef _KERNEL #include #endif /* _KERNEL */ #include #include #include #include #ifdef _KERNEL /* * Note that there are probe ordering dependencies here. The order isn't * controlled by this enumeration, but by explicit state transitions in * dastart() and dadone(). Here are some of the dependencies: * * 1. RC should come first, before RC16, unless there is evidence that RC16 * is supported. * 2. BDC needs to come before any of the ATA probes, or the ZONE probe. * 3. The ATA probes should go in this order: * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE */ typedef enum { DA_STATE_PROBE_WP, DA_STATE_PROBE_RC, DA_STATE_PROBE_RC16, DA_STATE_PROBE_LBP, DA_STATE_PROBE_BLK_LIMITS, DA_STATE_PROBE_BDC, DA_STATE_PROBE_ATA, DA_STATE_PROBE_ATA_LOGDIR, DA_STATE_PROBE_ATA_IDDIR, DA_STATE_PROBE_ATA_SUP, DA_STATE_PROBE_ATA_ZONE, DA_STATE_PROBE_ZONE, DA_STATE_NORMAL } da_state; typedef enum { DA_FLAG_PACK_INVALID = 0x000001, DA_FLAG_NEW_PACK = 0x000002, DA_FLAG_PACK_LOCKED = 0x000004, DA_FLAG_PACK_REMOVABLE = 0x000008, DA_FLAG_ROTATING = 0x000010, DA_FLAG_NEED_OTAG = 0x000020, DA_FLAG_WAS_OTAG = 0x000040, DA_FLAG_RETRY_UA = 0x000080, DA_FLAG_OPEN = 0x000100, DA_FLAG_SCTX_INIT = 0x000200, DA_FLAG_CAN_RC16 = 0x000400, DA_FLAG_PROBED = 0x000800, DA_FLAG_DIRTY = 0x001000, DA_FLAG_ANNOUNCED = 0x002000, DA_FLAG_CAN_ATA_DMA = 0x004000, DA_FLAG_CAN_ATA_LOG = 0x008000, DA_FLAG_CAN_ATA_IDLOG = 0x010000, DA_FLAG_CAN_ATA_SUPCAP = 0x020000, DA_FLAG_CAN_ATA_ZONE = 0x040000, DA_FLAG_TUR_PENDING = 0x080000, DA_FLAG_UNMAPPEDIO = 0x100000 } da_flags; #define DA_FLAG_STRING \ "\020" \ "\001PACK_INVALID" \ "\002NEW_PACK" \ "\003PACK_LOCKED" \ "\004PACK_REMOVABLE" \ "\005ROTATING" \ "\006NEED_OTAG" \ "\007WAS_OTAG" \ "\010RETRY_UA" \ "\011OPEN" \ "\012SCTX_INIT" \ "\013CAN_RC16" \ "\014PROBED" \ "\015DIRTY" \ "\016ANNOUCNED" \ "\017CAN_ATA_DMA" \ "\020CAN_ATA_LOG" \ "\021CAN_ATA_IDLOG" \ "\022CAN_ATA_SUPACP" \ "\023CAN_ATA_ZONE" \ "\024TUR_PENDING" \ "\025UNMAPPEDIO" typedef enum { DA_Q_NONE = 0x00, DA_Q_NO_SYNC_CACHE = 0x01, DA_Q_NO_6_BYTE = 0x02, DA_Q_NO_PREVENT = 0x04, DA_Q_4K = 0x08, DA_Q_NO_RC16 = 0x10, DA_Q_NO_UNMAP = 0x20, DA_Q_RETRY_BUSY = 0x40, DA_Q_SMR_DM = 0x80, DA_Q_STRICT_UNMAP = 0x100, DA_Q_128KB = 0x200 } da_quirks; #define DA_Q_BIT_STRING \ "\020" \ "\001NO_SYNC_CACHE" \ "\002NO_6_BYTE" \ "\003NO_PREVENT" \ "\0044K" \ "\005NO_RC16" \ "\006NO_UNMAP" \ "\007RETRY_BUSY" \ "\010SMR_DM" \ "\011STRICT_UNMAP" \ "\012128KB" typedef enum { DA_CCB_PROBE_RC = 0x01, DA_CCB_PROBE_RC16 = 0x02, DA_CCB_PROBE_LBP = 0x03, DA_CCB_PROBE_BLK_LIMITS = 0x04, DA_CCB_PROBE_BDC = 0x05, DA_CCB_PROBE_ATA = 0x06, DA_CCB_BUFFER_IO = 0x07, DA_CCB_DUMP = 0x0A, DA_CCB_DELETE = 0x0B, DA_CCB_TUR = 0x0C, DA_CCB_PROBE_ZONE = 0x0D, DA_CCB_PROBE_ATA_LOGDIR = 0x0E, DA_CCB_PROBE_ATA_IDDIR = 0x0F, DA_CCB_PROBE_ATA_SUP = 0x10, DA_CCB_PROBE_ATA_ZONE = 0x11, DA_CCB_PROBE_WP = 0x12, DA_CCB_TYPE_MASK = 0x1F, DA_CCB_RETRY_UA = 0x20 } da_ccb_state; /* * Order here is important for method choice * * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes * using ATA_TRIM than the corresponding UNMAP results for a real world mysql * import taking 5mins. * */ typedef enum { DA_DELETE_NONE, DA_DELETE_DISABLE, DA_DELETE_ATA_TRIM, DA_DELETE_UNMAP, DA_DELETE_WS16, DA_DELETE_WS10, DA_DELETE_ZERO, DA_DELETE_MIN = DA_DELETE_ATA_TRIM, DA_DELETE_MAX = DA_DELETE_ZERO } da_delete_methods; /* * For SCSI, host managed drives show up as a separate device type. For * ATA, host managed drives also have a different device signature. * XXX KDM figure out the ATA host managed signature. */ typedef enum { DA_ZONE_NONE = 0x00, DA_ZONE_DRIVE_MANAGED = 0x01, DA_ZONE_HOST_AWARE = 0x02, DA_ZONE_HOST_MANAGED = 0x03 } da_zone_mode; /* * We distinguish between these interface cases in addition to the drive type: * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC * o ATA drive behind a SCSI translation layer that does not know about * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this * case, we would need to share the ATA code with the ada(4) driver. * o SCSI drive. */ typedef enum { DA_ZONE_IF_SCSI, DA_ZONE_IF_ATA_PASS, DA_ZONE_IF_ATA_SAT, } da_zone_interface; typedef enum { DA_ZONE_FLAG_RZ_SUP = 0x0001, DA_ZONE_FLAG_OPEN_SUP = 0x0002, DA_ZONE_FLAG_CLOSE_SUP = 0x0004, DA_ZONE_FLAG_FINISH_SUP = 0x0008, DA_ZONE_FLAG_RWP_SUP = 0x0010, DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP | DA_ZONE_FLAG_OPEN_SUP | DA_ZONE_FLAG_CLOSE_SUP | DA_ZONE_FLAG_FINISH_SUP | DA_ZONE_FLAG_RWP_SUP), DA_ZONE_FLAG_URSWRZ = 0x0020, DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET | DA_ZONE_FLAG_OPT_NONSEQ_SET | DA_ZONE_FLAG_MAX_SEQ_SET) } da_zone_flags; static struct da_zone_desc { da_zone_flags value; const char *desc; } da_zone_desc_table[] = { {DA_ZONE_FLAG_RZ_SUP, "Report Zones" }, {DA_ZONE_FLAG_OPEN_SUP, "Open" }, {DA_ZONE_FLAG_CLOSE_SUP, "Close" }, {DA_ZONE_FLAG_FINISH_SUP, "Finish" }, {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, }; typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, struct bio *bp); static da_delete_func_t da_delete_trim; static da_delete_func_t da_delete_unmap; static da_delete_func_t da_delete_ws; static const void * da_delete_functions[] = { NULL, NULL, da_delete_trim, da_delete_unmap, da_delete_ws, da_delete_ws, da_delete_ws }; static const char *da_delete_method_names[] = { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; static const char *da_delete_method_desc[] = { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", "WRITE SAME(10) with UNMAP", "ZERO" }; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct disk_params { u_int8_t heads; u_int32_t cylinders; u_int8_t secs_per_track; u_int32_t secsize; /* Number of bytes/sector */ u_int64_t sectors; /* total number sectors */ u_int stripesize; u_int stripeoffset; }; #define UNMAP_RANGE_MAX 0xffffffff #define UNMAP_HEAD_SIZE 8 #define UNMAP_RANGE_SIZE 16 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ UNMAP_HEAD_SIZE) #define WS10_MAX_BLKS 0xffff #define WS16_MAX_BLKS 0xffffffff #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) #define DA_WORK_TUR (1 << 16) typedef enum { DA_REF_OPEN = 1, DA_REF_OPEN_HOLD, DA_REF_CLOSE_HOLD, DA_REF_PROBE_HOLD, DA_REF_TUR, DA_REF_GEOM, DA_REF_SYSCTL, DA_REF_REPROBE, DA_REF_MAX /* KEEP LAST */ } da_ref_token; struct da_softc { struct cam_iosched_softc *cam_iosched; struct bio_queue_head delete_run_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; int refcount; /* Active xpt_action() calls */ da_state state; da_flags flags; da_quirks quirks; int minimum_cmd_size; int error_inject; int trim_max_ranges; int delete_available; /* Delete methods possibly available */ da_zone_mode zone_mode; da_zone_interface zone_interface; da_zone_flags zone_flags; struct ata_gp_log_dir ata_logdir; int valid_logdir_len; struct ata_identify_log_pages ata_iddir; int valid_iddir_len; uint64_t optimal_seq_zones; uint64_t optimal_nonseq_zones; uint64_t max_seq_zones; u_int maxio; uint32_t unmap_max_ranges; uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ uint32_t unmap_gran; uint32_t unmap_gran_align; uint64_t ws_max_blks; uint64_t trim_count; uint64_t trim_ranges; uint64_t trim_lbas; da_delete_methods delete_method_pref; da_delete_methods delete_method; da_delete_func_t *delete_func; int p_type; struct disk_params params; struct disk *disk; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; uint64_t wwpn; uint8_t unmap_buf[UNMAP_BUF_SIZE]; struct scsi_read_capacity_data_long rcaplong; struct callout mediapoll_c; int ref_flags[DA_REF_MAX]; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int errors; u_int timeouts; u_int invalidations; #endif #define DA_ANNOUNCETMP_SZ 160 char announce_temp[DA_ANNOUNCETMP_SZ]; #define DA_ANNOUNCE_SZ 400 char announcebuf[DA_ANNOUNCE_SZ]; }; #define dadeleteflag(softc, delete_method, enable) \ if (enable) { \ softc->delete_available |= (1 << delete_method); \ } else { \ softc->delete_available &= ~(1 << delete_method); \ } static uma_zone_t da_ccb_zone; struct da_quirk_entry { struct scsi_inquiry_pattern inq_pat; da_quirks quirks; }; static const char quantum[] = "QUANTUM"; static const char microp[] = "MICROP"; static struct da_quirk_entry da_quirk_table[] = { /* SPI, FC devices */ { /* * Fujitsu M2513A MO drives. * Tested devices: M2513A2 firmware versions 1200 & 1300. * (dip switch selects whether T_DIRECT or T_OPTICAL device) * Reported by: W.Scholten */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* See above. */ {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This particular Fujitsu drive doesn't like the * synchronize cache command. * Reported by: Tom Jackson */ {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Matthew Jacob * in NetBSD PR kern/6027, August 24, 1998. */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Hellmuth Michaelis (hm@kts.org) * (PR 8882). */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't work correctly with 6 byte reads/writes. * Returns illegal request, and points to byte 9 of the * 6-byte CDB. * Reported by: Adam McDougall */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* See above. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The CISS RAID controllers do not support SYNC_CACHE */ {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The STEC SSDs sometimes hang on UNMAP. */ {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, /*quirks*/ DA_Q_NO_UNMAP }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. * Also VMware returns odd errors on misaligned UNMAPs. */ {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP }, /* USB mass storage devices supported by umass(4) */ { /* * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player * PR: kern/51675 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Power Quotient Int. (PQI) USB flash key * PR: kern/53067 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative Nomad MUVO mp3 player (USB) * PR: kern/53094 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Jungsoft NEXDISK USB flash key * PR: kern/54737 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * FreeDik USB Mini Data Drive * PR: kern/54786 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sigmatel USB Flash MP3 Player * PR: kern/57046 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Neuros USB Digital Audio Computer * PR: kern/63645 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SEAGRAND NP-900 MP3 Player * PR: kern/64563 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * iRiver iFP MP3 player (with UMS Firmware) * PR: kern/54881, i386/63941, kern/66124 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 * PR: kern/70158 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ZICPlay USB MP3 Player with FM * PR: kern/75057 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TEAC USB floppy mechanisms */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler II+ USB Pen-Drive. * Reported by: Pawel Jakub Dawidek */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * USB DISK Pro PMAP * Reported by: jhs * PR: usb/96381 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Motorola E398 Mobile Phone (TransFlash memory card). * Reported by: Wojciech A. Koszek * PR: usb/89889 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Qware BeatZkey! Pro * PR: usb/79164 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Time DPA20B 1GB MP3 Player * PR: usb/81846 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung USB key 128Mb * PR: usb/90081 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler 2.0 USB Flash memory. * PR: usb/89196 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative MUVO Slim mp3 player (USB) * PR: usb/86131 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) * PR: usb/80487 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SanDisk Micro Cruzer 128MB * PR: usb/75970 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TOSHIBA TransMemory USB sticks * PR: kern/94660 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * PNY USB 3.0 Flash Drives */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 }, { /* * PNY USB Flash keys * PR: usb/75578, usb/72344, usb/65436 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Genesys GL3224 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16 }, { /* * Genesys 6-in-1 Card Reader * PR: usb/94647 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Rekam Digital CAMERA * PR: usb/98713 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver H10 MP3 player * PR: usb/102547 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver U10 MP3 player * PR: usb/92306 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * X-Micro Flash Disk * PR: usb/96901 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * EasyMP3 EM732X USB 2.0 Flash MP3 Player * PR: usb/96546 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Denver MP3 player * PR: usb/107101 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Philips USB Key Audio KEY013 * PR: usb/68412 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { /* * JNC MP3 Player * PR: usb/94439 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SAMSUNG MP0402H * PR: usb/108427 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * I/O Magic USB flash - Giga Bank * PR: usb/108810 */ {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * JoyFly 128mb USB Flash Drive * PR: 96133 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ChipsBnk usb stick * PR: 103702 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A * PR: 129858 */ {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung YP-U3 mp3-player * PR: 125398 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sony Cyber-Shot DSC cameras * PR: usb/137035 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", "1.00"}, /*quirks*/ DA_Q_NO_PREVENT }, { /* At least several Transcent USB sticks lie on RC16. */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, { /* * I-O Data USB Flash Disk * PR: usb/211716 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, { /* * SLC CHIPFANCIER USB drives * PR: usb/234503 (RC10 right, RC16 wrong) * 16GB, 32GB and 128GB confirmed to have same issue */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER", "*"}, /*quirks*/ DA_Q_NO_RC16 }, /* ATA/SATA devices over SAS/USB/... */ { /* Sandisk X400 */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" }, /*quirks*/DA_Q_128KB }, { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, /*quirks*/DA_Q_4K }, { /* Micron Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1) * PR: usb/97472 */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE }, { /* * Olympus digital cameras (D-370) * PR: usb/97472 */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* * Olympus digital cameras (E-100RS, E-10). * PR: usb/97472 */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE }, { /* * Olympus FE-210 camera */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Pentax Digital Camera * PR: usb/93389 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PENTAX", "DIGITAL CAMERA", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LG UP3S MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Laser MP3-2GA13 MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LaCie external 250GB Hard drive des by Porsche * Submitted by: Ben Stuyts * PR: 121474 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, /* SATA SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel S3610 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 750 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 840 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 845 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 850 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * Same as for SAMSUNG MZ7* but enable the quirks for SSD * starting with MZ7* too */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * Same as above but enable the quirks for SSD SAMSUNG MZ7* * connected via SATA-to-SAS interposer and because of this * starting without "ATA" */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, /*quirks*/DA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, /*quirks*/DA_Q_4K }, { /* * Hama Innostor USB-Stick */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, /*quirks*/DA_Q_NO_RC16 }, { /* * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) * Drive Managed SATA hard drive. This drive doesn't report * in firmware that it is a drive managed SMR drive. */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" }, /*quirks*/DA_Q_SMR_DM }, { /* * MX-ES USB Drive by Mach Xtreme */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, /*quirks*/DA_Q_NO_RC16 }, }; static disk_strategy_t dastrategy; static dumper_t dadump; static periph_init_t dainit; static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void dasysctlinit(void *context, int pending); static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); static int dabitsysctl(SYSCTL_HANDLER_ARGS); static int daflagssysctl(SYSCTL_HANDLER_ARGS); static int dazonemodesysctl(SYSCTL_HANDLER_ARGS); static int dazonesupsysctl(SYSCTL_HANDLER_ARGS); static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method); static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method); static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method); static void daprobedone(struct cam_periph *periph, union ccb *ccb); static periph_ctor_t daregister; static periph_dtor_t dacleanup; static periph_start_t dastart; static periph_oninv_t daoninvalidate; static void dazonedone(struct cam_periph *periph, union ccb *ccb); static void dadone(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probewp(struct cam_periph *periph, union ccb *done_ccb); static void dadone_proberc(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeata(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb); static void dadone_probezone(struct cam_periph *periph, union ccb *done_ccb); static void dadone_tur(struct cam_periph *periph, union ccb *done_ccb); static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void daprevent(struct cam_periph *periph, int action); static void dareprobe(struct cam_periph *periph); static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_size); static callout_func_t dasendorderedtag; static void dashutdown(void *arg, int howto); static callout_func_t damediapoll; #ifndef DA_DEFAULT_POLL_PERIOD #define DA_DEFAULT_POLL_PERIOD 3 #endif #ifndef DA_DEFAULT_TIMEOUT #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ #endif #ifndef DA_DEFAULT_SOFTTIMEOUT #define DA_DEFAULT_SOFTTIMEOUT 0 #endif #ifndef DA_DEFAULT_RETRY #define DA_DEFAULT_RETRY 4 #endif #ifndef DA_DEFAULT_SEND_ORDERED #define DA_DEFAULT_SEND_ORDERED 1 #endif static int da_poll_period = DA_DEFAULT_POLL_PERIOD; static int da_retry_count = DA_DEFAULT_RETRY; static int da_default_timeout = DA_DEFAULT_TIMEOUT; static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; static int da_disable_wp_detection = 0; static int da_enable_biospeedup = 1; static int da_enable_uma_ccbs = 1; static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, &da_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, &da_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &da_send_ordered, 0, "Send Ordered Tags"); SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN, &da_disable_wp_detection, 0, "Disable detection of write-protected disks"); SYSCTL_INT(_kern_cam_da, OID_AUTO, enable_biospeedup, CTLFLAG_RDTUN, &da_enable_biospeedup, 0, "Enable BIO_SPEEDUP processing"); SYSCTL_INT(_kern_cam_da, OID_AUTO, enable_uma_ccbs, CTLFLAG_RWTUN, &da_enable_uma_ccbs, 0, "Use UMA for CCBs"); SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, dasysctlsofttimeout, "I", "Soft I/O timeout (ms)"); TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); /* * DA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef DA_ORDEREDTAG_INTERVAL #define DA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver dadriver = { dainit, "da", TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(da, dadriver); static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); /* * This driver takes out references / holds in well defined pairs, never * recursively. These macros / inline functions enforce those rules. They * are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is * defined to be 2 or larger, the tracking also includes debug printfs. */ #if defined(DA_TRACK_REFS) || defined(INVARIANTS) #ifndef DA_TRACK_REFS #define DA_TRACK_REFS 1 #endif #if DA_TRACK_REFS > 1 static const char *da_ref_text[] = { "bogus", "open", "open hold", "close hold", "reprobe hold", "Test Unit Ready", "Geom", "sysctl", "reprobe", "max -- also bogus" }; #define DA_PERIPH_PRINT(periph, msg, args...) \ CAM_PERIPH_PRINT(periph, msg, ##args) #else #define DA_PERIPH_PRINT(periph, msg, args...) #endif static inline void token_sanity(da_ref_token token) { if ((unsigned)token >= DA_REF_MAX) panic("Bad token value passed in %d\n", token); } static inline int da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token) { int err = cam_periph_hold(periph, priority); token_sanity(token); DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n", da_ref_text[token], token, err); if (err == 0) { int cnt; struct da_softc *softc = periph->softc; cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); if (cnt != 0) panic("Re-holding for reason %d, cnt = %d", token, cnt); } return (err); } static inline void da_periph_unhold(struct cam_periph *periph, da_ref_token token) { int cnt; struct da_softc *softc = periph->softc; token_sanity(token); DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n", da_ref_text[token], token); cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); if (cnt != 1) panic("Unholding %d with cnt = %d", token, cnt); cam_periph_unhold(periph); } static inline int da_periph_acquire(struct cam_periph *periph, da_ref_token token) { int err = cam_periph_acquire(periph); token_sanity(token); DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n", da_ref_text[token], token, err); if (err == 0) { int cnt; struct da_softc *softc = periph->softc; cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); if (cnt != 0) panic("Re-refing for reason %d, cnt = %d", token, cnt); } return (err); } static inline void da_periph_release(struct cam_periph *periph, da_ref_token token) { int cnt; struct da_softc *softc = periph->softc; token_sanity(token); DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n", da_ref_text[token], token); cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); if (cnt != 1) panic("Releasing %d with cnt = %d", token, cnt); cam_periph_release(periph); } static inline void da_periph_release_locked(struct cam_periph *periph, da_ref_token token) { int cnt; struct da_softc *softc = periph->softc; token_sanity(token); DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n", da_ref_text[token], token); cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); if (cnt != 1) panic("releasing (locked) %d with cnt = %d", token, cnt); cam_periph_release_locked(periph); } #define cam_periph_hold POISON #define cam_periph_unhold POISON #define cam_periph_acquire POISON #define cam_periph_release POISON #define cam_periph_release_locked POISON #else #define da_periph_hold(periph, prio, token) cam_periph_hold((periph), (prio)) #define da_periph_unhold(periph, token) cam_periph_unhold((periph)) #define da_periph_acquire(periph, token) cam_periph_acquire((periph)) #define da_periph_release(periph, token) cam_periph_release((periph)) #define da_periph_release_locked(periph, token) cam_periph_release_locked((periph)) #endif static int daopen(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; if (da_periph_acquire(periph, DA_REF_OPEN) != 0) { return (ENXIO); } cam_periph_lock(periph); if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) { cam_periph_unlock(periph); da_periph_release(periph, DA_REF_OPEN); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daopen\n")); softc = (struct da_softc *)periph->softc; dareprobe(periph); /* Wait for the disk size update. */ error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, "dareprobe", 0); if (error != 0) xpt_print(periph->path, "unable to retrieve capacity data\n"); if (periph->flags & CAM_PERIPH_INVALID) error = ENXIO; if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_PREVENT); if (error == 0) { softc->flags &= ~DA_FLAG_PACK_INVALID; softc->flags |= DA_FLAG_OPEN; } da_periph_unhold(periph, DA_REF_OPEN_HOLD); cam_periph_unlock(periph); if (error != 0) da_periph_release(periph, DA_REF_OPEN); return (error); } static int daclose(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; periph = (struct cam_periph *)dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daclose\n")); if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) { /* Flush disk cache. */ if ((softc->flags & DA_FLAG_DIRTY) != 0 && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/1, /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 5 * 60 * 1000); cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, softc->disk->d_devstat); softc->flags &= ~DA_FLAG_DIRTY; xpt_release_ccb(ccb); } /* Allow medium removal. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_ALLOW); da_periph_unhold(periph, DA_REF_CLOSE_HOLD); } /* * If we've got removable media, mark the blocksize as * unavailable, since it could change when new media is * inserted. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; softc->flags &= ~DA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); cam_periph_unlock(periph); da_periph_release(periph, DA_REF_OPEN); return (0); } static void daschedule(struct cam_periph *periph) { struct da_softc *softc = (struct da_softc *)periph->softc; if (softc->state != DA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void dastrategy(struct bio *bp) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); /* * If the device has been made invalid, error out */ if ((softc->flags & DA_FLAG_PACK_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); /* * Zone commands must be ordered, because they can depend on the * effects of previously issued commands, and they may affect * commands after them. */ if (bp->bio_cmd == BIO_ZONE) bp->bio_flags |= BIO_ORDERED; /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ daschedule(periph); cam_periph_unlock(periph); return; } static int dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct da_softc *softc; u_int secsize; struct ccb_scsiio csio; struct disk *dp; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct da_softc *)periph->softc; secsize = softc->params.secsize; if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) return (ENXIO); memset(&csio, 0, sizeof(csio)); if (length > 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_read_write(&csio, /*retries*/0, /*cbfcnp*/NULL, MSG_ORDERED_Q_TAG, /*read*/SCSI_RW_WRITE, /*byte2*/0, /*minimum_cmd_size*/ softc->minimum_cmd_size, offset / secsize, length / secsize, /*data_ptr*/(u_int8_t *) virtual, /*dxfer_len*/length, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) printf("Aborting dump due to I/O error.\n"); return (error); } /* * Sync the disk cache contents to the physical media. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_synchronize_cache(&csio, /*retries*/0, /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, /*begin_lba*/0,/* Cover the whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 5 * 1000); error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } return (error); } static int dagetattr(struct bio *bp) { int ret; struct cam_periph *periph; if (g_handleattr_int(bp, "GEOM::canspeedup", da_enable_biospeedup)) return (EJUSTRETURN); periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static void dainit(void) { cam_status status; da_ccb_zone = uma_zcreate("da_ccb", sizeof(struct ccb_scsiio), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("da: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (da_send_ordered) { /* Register our shutdown event handler */ if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("dainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void dadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; da_periph_release(periph, DA_REF_GEOM); } static void daoninvalidate(struct cam_periph *periph) { struct da_softc *softc; cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, daasync, periph, periph->path); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); /* * Tell GEOM that we've gone away, we'll get a callback when it is * done cleaning up its resources. */ disk_gone(softc->disk); } static void dacleanup(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: /* callback to create periph, no locking yet */ { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_DIRECT && SID_TYPE(&cgd->inq_data) != T_RBC && SID_TYPE(&cgd->inq_data) != T_OPTICAL && SID_TYPE(&cgd->inq_data) != T_ZBC_HM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(daregister, daoninvalidate, dacleanup, dastart, "da", CAM_PERIPH_BIO, path, daasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("daasync: Unable to attach to new device " "due to status 0x%x\n", status); return; } case AC_ADVINFO_CHANGED: /* Doesn't touch periph */ { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct da_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct da_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all UNIT ATTENTIONs except our own, as they will be * handled by daerror(). Since this comes from a different periph, * that periph's lock is held, not ours, so we have to take it ours * out to touch softc flags. */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x2A && ascq == 0x09) { xpt_print(ccb->ccb_h.path, "Capacity data has changed\n"); cam_periph_lock(periph); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); cam_periph_unlock(periph); } else if (asc == 0x28 && ascq == 0x00) { cam_periph_lock(periph); softc->flags &= ~DA_FLAG_PROBED; cam_periph_unlock(periph); disk_media_changed(softc->disk, M_NOWAIT); } else if (asc == 0x3F && ascq == 0x03) { xpt_print(ccb->ccb_h.path, "INQUIRY data has changed\n"); cam_periph_lock(periph); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); cam_periph_unlock(periph); } } break; } case AC_SCSI_AEN: /* Called for this path: periph locked */ /* * Appears to be currently unused for SCSI devices, only ata SIMs * generate this. */ cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && (softc->flags & DA_FLAG_TUR_PENDING) == 0) { if (da_periph_acquire(periph, DA_REF_TUR) == 0) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* FALLTHROUGH */ case AC_SENT_BDR: /* Called for this path: periph locked */ case AC_BUS_RESET: /* Called for this path: periph locked */ { struct ccb_hdr *ccbh; cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= DA_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= DA_CCB_RETRY_UA; break; } case AC_INQ_CHANGED: /* Called for this path: periph locked */ cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); break; default: break; } cam_periph_async(periph, code, path, arg); } static void dasysctlinit(void *context, int pending) { struct cam_periph *periph; struct da_softc *softc; char tmpstr[32], tmpstr2[16]; struct ccb_trans_settings cts; periph = (struct cam_periph *)context; /* * periph was held for us when this task was enqueued */ if (periph->flags & CAM_PERIPH_INVALID) { da_periph_release(periph, DA_REF_SYSCTL); return; } softc = (struct da_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); cam_periph_lock(periph); softc->flags |= DA_FLAG_SCTX_INIT; cam_periph_unlock(periph); softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr, "device_index"); if (softc->sysctl_tree == NULL) { printf("dasysctlinit: unable to allocate sysctl tree\n"); da_periph_release(periph, DA_REF_SYSCTL); return; } /* * Now register the sysctl handler, so the user can change the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0, dadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, softc, 0, dadeletemaxsysctl, "Q", "Maximum BIO_DELETE size"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", "Minimum CDB size"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_count", CTLFLAG_RD, &softc->trim_count, "Total number of unmap/dsm commands sent"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_ranges", CTLFLAG_RD, &softc->trim_ranges, "Total number of ranges in unmap/dsm commands"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "trim_lbas", CTLFLAG_RD, &softc->trim_lbas, "Total lbas in the unmap/dsm commands sent"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 0, dazonemodesysctl, "A", "Zone Mode"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 0, dazonesupsysctl, "A", "Zone Support"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, "Optimal Number of Open Sequential Write Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "optimal_nonseq_zones", CTLFLAG_RD, &softc->optimal_nonseq_zones, "Optimal Number of Non-Sequentially Written Sequential Write " "Preferred Zones"); SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, "Maximum Number of Open Sequential Write Required Zones"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "error_inject", CTLFLAG_RW, &softc->error_inject, 0, "error_inject leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "p_type", CTLFLAG_RD, &softc->p_type, 0, "DIF protection type"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 0, daflagssysctl, "A", "Flags for drive"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->flags, (u_int)DA_FLAG_ROTATING, dabitsysctl, "I", "Rotating media *DEPRECATED* gone in FreeBSD 14"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->flags, (u_int)DA_FLAG_UNMAPPEDIO, dabitsysctl, "I", "Unmapped I/O support *DEPRECATED* gone in FreeBSD 14"); #ifdef CAM_TEST_FAILURE SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, periph, 0, cam_periph_invalidate_sysctl, "I", "Write 1 to invalidate the drive immediately"); #endif /* * Add some addressing info. */ memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cam_periph_lock(periph); xpt_action((union ccb *)&cts); cam_periph_unlock(periph); if (cts.ccb_h.status != CAM_REQ_CMP) { da_periph_release(periph, DA_REF_SYSCTL); return; } if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWPN) { softc->wwpn = fc->wwpn; SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "wwpn", CTLFLAG_RD, &softc->wwpn, "World Wide Port Name"); } } #ifdef CAM_IO_STATS /* * Now add some useful stats. * XXX These should live in cam_periph and be common to all periphs */ softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD, &softc->errors, 0, "Transport errors reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD, &softc->invalidations, 0, "Device pack invalidations"); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); da_periph_release(periph, DA_REF_SYSCTL); } static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) { int error; uint64_t value; struct da_softc *softc; softc = (struct da_softc *)arg1; value = softc->disk->d_delmaxsize; error = sysctl_handle_64(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* only accept values smaller than the calculated value */ if (value > dadeletemaxsize(softc, softc->delete_method)) { return (EINVAL); } softc->disk->d_delmaxsize = value; return (0); } static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * Acceptable values here are 6, 10, 12 or 16. */ if (value < 6) value = 6; else if ((value > 6) && (value <= 10)) value = 10; else if ((value > 10) && (value <= 12)) value = 12; else if (value > 12) value = 16; *(int *)arg1 = value; return (0); } static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) { sbintime_t value; int error; value = da_default_softtimeout / SBT_1MS; error = sysctl_handle_int(oidp, (int *)&value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* XXX Should clip this to a reasonable level */ if (value > da_default_timeout * 1000) return (EINVAL); da_default_softtimeout = value * SBT_1MS; return (0); } static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) { softc->delete_method = delete_method; softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); softc->delete_func = da_delete_functions[delete_method]; if (softc->delete_method > DA_DELETE_DISABLE) softc->disk->d_flags |= DISKFLAG_CANDELETE; else softc->disk->d_flags &= ~DISKFLAG_CANDELETE; } static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) { off_t sectors; switch(delete_method) { case DA_DELETE_UNMAP: sectors = (off_t)softc->unmap_max_lba; break; case DA_DELETE_ATA_TRIM: sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; break; case DA_DELETE_WS16: sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); break; case DA_DELETE_ZERO: case DA_DELETE_WS10: sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); break; default: return 0; } return (off_t)softc->params.secsize * omin(sectors, softc->params.sectors); } static void daprobedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); dadeletemethodchoose(softc, DA_DELETE_NONE); if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { char buf[80]; int i, sep; snprintf(buf, sizeof(buf), "Delete methods: <"); sep = 0; for (i = 0; i <= DA_DELETE_MAX; i++) { if ((softc->delete_available & (1 << i)) == 0 && i != softc->delete_method) continue; if (sep) strlcat(buf, ",", sizeof(buf)); strlcat(buf, da_delete_method_names[i], sizeof(buf)); if (i == softc->delete_method) strlcat(buf, "(*)", sizeof(buf)); sep = 1; } strlcat(buf, ">", sizeof(buf)); printf("%s%d: %s\n", periph->periph_name, periph->unit_number, buf); } if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { printf("%s%d: Write Protected\n", periph->periph_name, periph->unit_number); } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(ccb); softc->state = DA_STATE_NORMAL; softc->flags |= DA_FLAG_PROBED; daschedule(periph); wakeup(&softc->disk->d_mediasize); if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { softc->flags |= DA_FLAG_ANNOUNCED; da_periph_unhold(periph, DA_REF_PROBE_HOLD); } else da_periph_release_locked(periph, DA_REF_REPROBE); } static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) { int i, methods; /* If available, prefer the method requested by user. */ i = softc->delete_method_pref; methods = softc->delete_available | (1 << DA_DELETE_DISABLE); if (methods & (1 << i)) { dadeletemethodset(softc, i); return; } /* Use the pre-defined order to choose the best performing delete. */ for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { if (i == DA_DELETE_ZERO) continue; if (softc->delete_available & (1 << i)) { dadeletemethodset(softc, i); return; } } /* Fallback to default. */ dadeletemethodset(softc, default_method); } static int dabitsysctl(SYSCTL_HANDLER_ARGS) { u_int *flags = arg1; u_int test = arg2; int tmpout, error; tmpout = !!(*flags & test); error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout)); if (error || !req->newptr) return (error); return (EPERM); } static int daflagssysctl(SYSCTL_HANDLER_ARGS) { struct sbuf sbuf; struct da_softc *softc = arg1; int error; sbuf_new_for_sysctl(&sbuf, NULL, 0, req); if (softc->flags != 0) sbuf_printf(&sbuf, "0x%b", (unsigned)softc->flags, DA_FLAG_STRING); else sbuf_printf(&sbuf, "0"); error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct da_softc *softc; int i, error, value; softc = (struct da_softc *)arg1; value = softc->delete_method; if (value < 0 || value > DA_DELETE_MAX) p = "UNKNOWN"; else p = da_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); for (i = 0; i <= DA_DELETE_MAX; i++) { if (strcmp(buf, da_delete_method_names[i]) == 0) break; } if (i > DA_DELETE_MAX) return (EINVAL); softc->delete_method_pref = i; dadeletemethodchoose(softc, DA_DELETE_NONE); return (0); } static int dazonemodesysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[40]; struct da_softc *softc; int error; softc = (struct da_softc *)arg1; switch (softc->zone_mode) { case DA_ZONE_DRIVE_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); break; case DA_ZONE_HOST_AWARE: snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); break; case DA_ZONE_HOST_MANAGED: snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); break; case DA_ZONE_NONE: default: snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); break; } error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); return (error); } static int dazonesupsysctl(SYSCTL_HANDLER_ARGS) { char tmpbuf[180]; struct da_softc *softc; struct sbuf sb; int error, first; unsigned int i; softc = (struct da_softc *)arg1; error = 0; first = 1; sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); for (i = 0; i < sizeof(da_zone_desc_table) / sizeof(da_zone_desc_table[0]); i++) { if (softc->zone_flags & da_zone_desc_table[i].value) { if (first == 0) sbuf_printf(&sb, ", "); else first = 0; sbuf_cat(&sb, da_zone_desc_table[i].desc); } } if (first == 1) sbuf_printf(&sb, "None"); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); return (error); } static cam_status daregister(struct cam_periph *periph, void *arg) { struct da_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; int quirks; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("daregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("daregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); free(softc, M_DEVBUF); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); softc->state = DA_STATE_PROBE_WP; bioq_init(&softc->delete_run_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= DA_FLAG_PACK_REMOVABLE; softc->unmap_max_ranges = UNMAP_MAX_RANGES; softc->unmap_max_lba = UNMAP_RANGE_MAX; softc->unmap_gran = 0; softc->unmap_gran_align = 0; softc->ws_max_blks = WS16_MAX_BLKS; softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; softc->flags |= DA_FLAG_ROTATING; periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)da_quirk_table, nitems(da_quirk_table), sizeof(*da_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct da_quirk_entry *)match)->quirks; else softc->quirks = DA_Q_NONE; /* Check if the SIM does not want 6 byte commands */ xpt_path_inq(&cpi, periph->path); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= DA_Q_NO_6_BYTE; /* Override quirks if tunable is set */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.quirks", periph->unit_number); quirks = softc->quirks; TUNABLE_INT_FETCH(tmpstr, &quirks); softc->quirks = quirks; if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM) softc->zone_mode = DA_ZONE_HOST_MANAGED; else if (softc->quirks & DA_Q_SMR_DM) softc->zone_mode = DA_ZONE_DRIVE_MANAGED; else softc->zone_mode = DA_ZONE_NONE; if (softc->zone_mode != DA_ZONE_NONE) { if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) softc->zone_interface = DA_ZONE_IF_ATA_SAT; else softc->zone_interface = DA_ZONE_IF_ATA_PASS; } else softc->zone_interface = DA_ZONE_IF_SCSI; } TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); /* * Let XPT know we can use UMA-allocated CCBs. */ if (da_enable_uma_ccbs) { KASSERT(da_ccb_zone != NULL, ("%s: NULL da_ccb_zone", __func__)); periph->ccb_zone = da_ccb_zone; } /* * Take an exclusive section lock on the periph while dastart is called * to finish the probe. The lock will be dropped in dadone at the end * of probe. This locks out daopen and daclose from racing with the * probe. * * XXX if cam_periph_hold returns an error, we don't hold a refcount. */ (void)da_periph_hold(periph, PRIBIO, DA_REF_PROBE_HOLD); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, periph); cam_periph_unlock(periph); /* * RBC devices don't have to support READ(6), only READ(10). */ if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); /* * 6, 10, 12 and 16 are the currently permissible values. */ if (softc->minimum_cmd_size > 12) softc->minimum_cmd_size = 16; else if (softc->minimum_cmd_size > 10) softc->minimum_cmd_size = 12; else if (softc->minimum_cmd_size > 6) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* Predict whether device may support READ CAPACITY(16). */ if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && (softc->quirks & DA_Q_NO_RC16) == 0) { softc->flags |= DA_FLAG_CAN_RC16; } /* * Register this media as a disk. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = daopen; softc->disk->d_close = daclose; softc->disk->d_strategy = dastrategy; if (cam_sim_pollable(periph->sim)) softc->disk->d_dump = dadump; softc->disk->d_getattr = dagetattr; softc->disk->d_gone = dadiskgonecb; softc->disk->d_name = "da"; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > maxphys) softc->maxio = maxphys; /* for safety */ else softc->maxio = cpi.maxio; if (softc->quirks & DA_Q_128KB) softc->maxio = min(softc->maxio, 128 * 1024); softc->disk->d_maxsize = softc->maxio; softc->disk->d_unit = periph->unit_number; softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { softc->flags |= DA_FLAG_UNMAPPEDIO; softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; } cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment), "%s%d", cpi.dev_name, cpi.unit_number); /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ if (da_periph_acquire(periph, DA_REF_GEOM) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add async callbacks for events of interest. * I don't bother checking if this fails as, * in most cases, the system will function just * fine without them and the only alternative * would be to not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | AC_INQ_CHANGED, daasync, periph, periph->path); /* * Emit an attribute changed notification just in case * physical path information arrived before our async * event handler was registered, but after anyone attaching * to our disk device polled it. */ disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && da_poll_period != 0) callout_reset(&softc->mediapoll_c, da_poll_period * hz, damediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int da_zone_bio_to_scsi(int disk_zone_cmd) { switch (disk_zone_cmd) { case DISK_ZONE_OPEN: return ZBC_OUT_SA_OPEN; case DISK_ZONE_CLOSE: return ZBC_OUT_SA_CLOSE; case DISK_ZONE_FINISH: return ZBC_OUT_SA_FINISH; case DISK_ZONE_RWP: return ZBC_OUT_SA_RWP; } return -1; } static int da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb) { struct da_softc *softc; int error; error = 0; if (bp->bio_cmd != BIO_ZONE) { error = EINVAL; goto bailout; } softc = periph->softc; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: { int zone_flags; int zone_sa; uint64_t lba; zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd); if (zone_sa == -1) { xpt_print(periph->path, "Cannot translate zone " "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd); error = EINVAL; goto bailout; } zone_flags = 0; lba = bp->bio_zone.zone_params.rwp.id; if (bp->bio_zone.zone_params.rwp.flags & DISK_ZONE_RWP_FLAG_ALL) zone_flags |= ZBC_OUT_ALL; if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { scsi_zbc_out(&ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); } else { /* * Note that in this case, even though we can * technically use NCQ, we don't bother for several * reasons: * 1. It hasn't been tested on a SAT layer that * supports it. This is new as of SAT-4. * 2. Even when there is a SAT layer that supports * it, that SAT layer will also probably support * ZBC -> ZAC translation, since they are both * in the SAT-4 spec. * 3. Translation will likely be preferable to ATA * passthrough. LSI / Avago at least single * steps ATA passthrough commands in the HBA, * regardless of protocol, so unless that * changes, there is a performance penalty for * doing ATA passthrough no matter whether * you're using NCQ/FPDMA, DMA or PIO. * 4. It requires a 32-byte CDB, which at least at * this point in CAM requires a CDB pointer, which * would require us to allocate an additional bit * of storage separate from the CCB. */ error = scsi_ata_zac_mgmt_out(&ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*use_ncq*/ 0, /*zm_action*/ zone_sa, /*zone_id*/ lba, /*zone_flags*/ zone_flags, /*data_ptr*/ NULL, /*dxfer_len*/ 0, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (error != 0) { error = EINVAL; xpt_print(periph->path, "scsi_ata_zac_mgmt_out() returned an " "error!"); goto bailout; } } *queue_ccb = 1; break; } case DISK_ZONE_REPORT_ZONES: { uint8_t *rz_ptr; uint32_t num_entries, alloc_size; struct disk_zone_report *rep; rep = &bp->bio_zone.zone_params.report; num_entries = rep->entries_allocated; if (num_entries == 0) { xpt_print(periph->path, "No entries allocated for " "Report Zones request\n"); error = EINVAL; goto bailout; } alloc_size = sizeof(struct scsi_report_zones_hdr) + (sizeof(struct scsi_report_zones_desc) * num_entries); alloc_size = min(alloc_size, softc->disk->d_maxsize); rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO); if (rz_ptr == NULL) { xpt_print(periph->path, "Unable to allocate memory " "for Report Zones request\n"); error = ENOMEM; goto bailout; } if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { scsi_zbc_in(&ccb->csio, /*retries*/ da_retry_count, /*cbcfnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ ZBC_IN_SA_REPORT_ZONES, /*zone_start_lba*/ rep->starting_id, /*zone_options*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); } else { /* * Note that in this case, even though we can * technically use NCQ, we don't bother for several * reasons: * 1. It hasn't been tested on a SAT layer that * supports it. This is new as of SAT-4. * 2. Even when there is a SAT layer that supports * it, that SAT layer will also probably support * ZBC -> ZAC translation, since they are both * in the SAT-4 spec. * 3. Translation will likely be preferable to ATA * passthrough. LSI / Avago at least single * steps ATA passthrough commands in the HBA, * regardless of protocol, so unless that * changes, there is a performance penalty for * doing ATA passthrough no matter whether * you're using NCQ/FPDMA, DMA or PIO. * 4. It requires a 32-byte CDB, which at least at * this point in CAM requires a CDB pointer, which * would require us to allocate an additional bit * of storage separate from the CCB. */ error = scsi_ata_zac_mgmt_in(&ccb->csio, /*retries*/ da_retry_count, /*cbcfnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*use_ncq*/ 0, /*zm_action*/ ATA_ZM_REPORT_ZONES, /*zone_id*/ rep->starting_id, /*zone_flags*/ rep->rep_options, /*data_ptr*/ rz_ptr, /*dxfer_len*/ alloc_size, /*cdb_storage*/ NULL, /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (error != 0) { error = EINVAL; xpt_print(periph->path, "scsi_ata_zac_mgmt_in() returned an " "error!"); goto bailout; } } /* * For BIO_ZONE, this isn't normally needed. However, it * is used by devstat_end_transaction_bio() to determine * how much data was transferred. */ /* * XXX KDM we have a problem. But I'm not sure how to fix * it. devstat uses bio_bcount - bio_resid to calculate * the amount of data transferred. The GEOM disk code * uses bio_length - bio_resid to calculate the amount of * data in bio_completed. We have different structure * sizes above and below the ada(4) driver. So, if we * use the sizes above, the amount transferred won't be * quite accurate for devstat. If we use different sizes * for bio_bcount and bio_length (above and below * respectively), then the residual needs to match one or * the other. Everything is calculated after the bio * leaves the driver, so changing the values around isn't * really an option. For now, just set the count to the * passed in length. This means that the calculations * above (e.g. bio_completed) will be correct, but the * amount of data reported to devstat will be slightly * under or overstated. */ bp->bio_bcount = bp->bio_length; *queue_ccb = 1; break; } case DISK_ZONE_GET_PARAMS: { struct disk_zone_disk_params *params; params = &bp->bio_zone.zone_params.disk_params; bzero(params, sizeof(*params)); switch (softc->zone_mode) { case DA_ZONE_DRIVE_MANAGED: params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; break; case DA_ZONE_HOST_AWARE: params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; break; case DA_ZONE_HOST_MANAGED: params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; break; default: case DA_ZONE_NONE: params->zone_mode = DISK_ZONE_MODE_NONE; break; } if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ) params->flags |= DISK_ZONE_DISK_URSWRZ; if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) { params->optimal_seq_zones = softc->optimal_seq_zones; params->flags |= DISK_ZONE_OPT_SEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) { params->optimal_nonseq_zones = softc->optimal_nonseq_zones; params->flags |= DISK_ZONE_OPT_NONSEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) { params->max_seq_zones = softc->max_seq_zones; params->flags |= DISK_ZONE_MAX_SEQ_SET; } if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP) params->flags |= DISK_ZONE_RZ_SUP; if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP) params->flags |= DISK_ZONE_OPEN_SUP; if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP) params->flags |= DISK_ZONE_CLOSE_SUP; if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP) params->flags |= DISK_ZONE_FINISH_SUP; if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP) params->flags |= DISK_ZONE_RWP_SUP; break; } default: break; } bailout: return (error); } static void dastart(struct cam_periph *periph, union ccb *start_ccb) { struct da_softc *softc; cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); skipstate: switch (softc->state) { case DA_STATE_NORMAL: { struct bio *bp; uint8_t tag_code; more: bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { softc->flags |= DA_FLAG_TUR_PENDING; cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); scsi_test_unit_ready(&start_ccb->csio, /*retries*/ da_retry_count, dadone_tur, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); break; } if (bp->bio_cmd == BIO_DELETE) { if (softc->delete_func != NULL) { softc->delete_func(periph, start_ccb, bp); goto out; } else { /* * Not sure this is possible, but failsafe by * lying and saying "sure, done." */ biofinish(bp, NULL, 0); goto more; } } if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); da_periph_release_locked(periph, DA_REF_TUR); } if ((bp->bio_flags & BIO_ORDERED) != 0 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { softc->flags &= ~DA_FLAG_NEED_OTAG; softc->flags |= DA_FLAG_WAS_OTAG; tag_code = MSG_ORDERED_Q_TAG; } else { tag_code = MSG_SIMPLE_Q_TAG; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { void *data_ptr; int rw_op; biotrack(bp, __func__); if (bp->bio_cmd == BIO_WRITE) { softc->flags |= DA_FLAG_DIRTY; rw_op = SCSI_RW_WRITE; } else { rw_op = SCSI_RW_READ; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= SCSI_RW_BIO; data_ptr = bp; } scsi_read_write(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/tag_code, rw_op, /*byte2*/0, softc->minimum_cmd_size, /*lba*/bp->bio_pblkno, /*block_count*/bp->bio_bcount / softc->params.secsize, data_ptr, /*dxfer_len*/ bp->bio_bcount, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) start_ccb->csio.bio = bp; #endif break; } case BIO_FLUSH: /* * If we don't support sync cache, or the disk * isn't dirty, FLUSH is a no-op. Use the * allocated CCB for the next bio if one is * available. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 || (softc->flags & DA_FLAG_DIRTY) == 0) { biodone(bp); goto skipstate; } /* * BIO_FLUSH doesn't currently communicate * range data, so we synchronize the cache * over the whole disk. */ scsi_synchronize_cache(&start_ccb->csio, /*retries*/1, /*cbfcnp*/dadone, /*tag_action*/tag_code, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, da_default_timeout*1000); /* * Clear the dirty flag before sending the command. * Either this sync cache will be successful, or it * will fail after a retry. If it fails, it is * unlikely to be successful if retried later, so * we'll save ourselves time by just marking the * device clean. */ softc->flags &= ~DA_FLAG_DIRTY; break; case BIO_ZONE: { int error, queue_ccb; queue_ccb = 0; error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb); if ((error != 0) || (queue_ccb == 0)) { biofinish(bp, NULL, error); xpt_release_ccb(start_ccb); return; } break; } default: biofinish(bp, NULL, EOPNOTSUPP); xpt_release_ccb(start_ccb); return; } start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); out: LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); /* We expect a unit attention from this device */ if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; softc->flags &= ~DA_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); /* May have more work to do, so ensure we stay scheduled */ daschedule(periph); break; } case DA_STATE_PROBE_WP: { void *mode_buf; int mode_buf_len; if (da_disable_wp_detection) { if ((softc->flags & DA_FLAG_CAN_RC16) != 0) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; goto skipstate; } mode_buf_len = 192; mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT); if (mode_buf == NULL) { xpt_print(periph->path, "Unable to send mode sense - " "malloc failure\n"); if ((softc->flags & DA_FLAG_CAN_RC16) != 0) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; goto skipstate; } scsi_mode_sense_len(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_probewp, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*dbd*/ FALSE, /*pc*/ SMS_PAGE_CTRL_CURRENT, /*page*/ SMS_ALL_PAGES_PAGE, /*param_buf*/ mode_buf, /*param_len*/ mode_buf_len, /*minimum_cmd_size*/ softc->minimum_cmd_size, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_RC: { struct scsi_read_capacity_data *rcap; rcap = (struct scsi_read_capacity_data *) malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcap == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity(&start_ccb->csio, /*retries*/da_retry_count, dadone_proberc, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/5000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_RC16: { struct scsi_read_capacity_data_long *rcaplong; rcaplong = (struct scsi_read_capacity_data_long *) malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcaplong == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity_16(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_proberc, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)rcaplong, /*rcap_buf_len*/ sizeof(*rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; xpt_action(start_ccb); break; } case DA_STATE_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { /* * If we get here we don't support any SBC-3 delete * methods with UNMAP as the Logical Block Provisioning * VPD page support is required for devices which * support it according to T10/1799-D Revision 31 * however older revisions of the spec don't mandate * this so we currently don't remove these methods * from the available set. */ softc->state = DA_STATE_PROBE_BLK_LIMITS; goto skipstate; } lbp = (struct scsi_vpd_logical_block_prov *) malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); if (lbp == NULL) { printf("dastart: Couldn't malloc lbp data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone_probelbp, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)lbp, /*inq_len*/sizeof(*lbp), /*evpd*/TRUE, /*page_code*/SVPD_LBP, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { /* Not supported skip to next probe */ softc->state = DA_STATE_PROBE_BDC; goto skipstate; } block_limits = (struct scsi_vpd_block_limits *) malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); if (block_limits == NULL) { printf("dastart: Couldn't malloc block_limits data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone_probeblklimits, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)block_limits, /*inq_len*/sizeof(*block_limits), /*evpd*/TRUE, /*page_code*/SVPD_BLOCK_LIMITS, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BDC: { struct scsi_vpd_block_device_characteristics *bdc; if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { softc->state = DA_STATE_PROBE_ATA; goto skipstate; } bdc = (struct scsi_vpd_block_device_characteristics *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { printf("dastart: Couldn't malloc bdc data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone_probebdc, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_BDC, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA: { struct ata_params *ata_params; if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { if ((softc->zone_mode == DA_ZONE_HOST_AWARE) || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { /* * Note that if the ATA VPD page isn't * supported, we aren't talking to an ATA * device anyway. Support for that VPD * page is mandatory for SCSI to ATA (SAT) * translation layers. */ softc->state = DA_STATE_PROBE_ZONE; goto skipstate; } daprobedone(periph, start_ccb); break; } ata_params = &periph->path->device->ident_data; scsi_ata_identify(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone_probeata, /*tag_action*/MSG_SIMPLE_Q_TAG, /*data_ptr*/(u_int8_t *)ata_params, /*dxfer_len*/sizeof(*ata_params), /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_LOGDIR: { struct ata_gp_log_dir *log_dir; int retval; retval = 0; if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) { /* * If we don't have log support, not much point in * trying to probe zone support. */ daprobedone(periph, start_ccb); break; } /* * If we have an ATA device (the SCSI ATA Information VPD * page should be present and the ATA identify should have * succeeded) and it supports logs, ask for the log directory. */ log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO); if (log_dir == NULL) { xpt_print(periph->path, "Couldn't malloc log_dir " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_probeatalogdir, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_LOG_DIRECTORY, /*page_number*/ 0, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)log_dir, /*dxfer_len*/ sizeof(*log_dir), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(log_dir, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_IDDIR: { struct ata_identify_log_pages *id_dir; int retval; retval = 0; /* * Check here to see whether the Identify Device log is * supported in the directory of logs. If so, continue * with requesting the log of identify device pages. */ if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) { daprobedone(periph, start_ccb); break; } id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO); if (id_dir == NULL) { xpt_print(periph->path, "Couldn't malloc id_dir " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_probeataiddir, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_PAGE_LIST, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)id_dir, /*dxfer_len*/ sizeof(*id_dir), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(id_dir, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_SUP: { struct ata_identify_log_sup_cap *sup_cap; int retval; retval = 0; /* * Check here to see whether the Supported Capabilities log * is in the list of Identify Device logs. */ if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) { daprobedone(periph, start_ccb); break; } sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO); if (sup_cap == NULL) { xpt_print(periph->path, "Couldn't malloc sup_cap " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_probeatasup, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_SUP_CAP, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)sup_cap, /*dxfer_len*/ sizeof(*sup_cap), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(sup_cap, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA_ZONE: { struct ata_zoned_info_log *ata_zone; int retval; retval = 0; /* * Check here to see whether the zoned device information * page is supported. If so, continue on to request it. * If not, skip to DA_STATE_PROBE_LOG or done. */ if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) { daprobedone(periph, start_ccb); break; } ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA, M_NOWAIT|M_ZERO); if (ata_zone == NULL) { xpt_print(periph->path, "Couldn't malloc ata_zone " "data\n"); daprobedone(periph, start_ccb); break; } retval = scsi_ata_read_log(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone_probeatazone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*log_address*/ ATA_IDENTIFY_DATA_LOG, /*page_number*/ ATA_IDL_ZDI, /*block_count*/ 1, /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? AP_PROTO_DMA : AP_PROTO_PIO_IN, /*data_ptr*/ (uint8_t *)ata_zone, /*dxfer_len*/ sizeof(*ata_zone), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); if (retval != 0) { xpt_print(periph->path, "scsi_ata_read_log() failed!"); free(ata_zone, M_SCSIDA); daprobedone(periph, start_ccb); break; } start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ZONE: { struct scsi_vpd_zoned_bdc *bdc; /* * Note that this page will be supported for SCSI protocol * devices that support ZBC (SMR devices), as well as ATA * protocol devices that are behind a SAT (SCSI to ATA * Translation) layer that supports converting ZBC commands * to their ZAC equivalents. */ if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) { daprobedone(periph, start_ccb); break; } bdc = (struct scsi_vpd_zoned_bdc *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { xpt_release_ccb(start_ccb); xpt_print(periph->path, "Couldn't malloc zone VPD " "data\n"); break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone_probezone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_ZONED_BDC, /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE; xpt_action(start_ccb); break; } } } /* * In each of the methods below, while its the caller's * responsibility to ensure the request will fit into a * single device request, we might have changed the delete * method due to the device incorrectly advertising either * its supported methods or limits. * * To prevent this causing further issues we validate the * against the methods limits, and warn which would * otherwise be unnecessary. */ static void da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc; struct bio *bp1; uint8_t *buf = softc->unmap_buf; struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE]; uint64_t lba, lastlba = (uint64_t)-1; uint64_t totalcount = 0; uint64_t count; uint32_t c, lastcount = 0, ranges = 0; /* * Currently this doesn't take the UNMAP * Granularity and Granularity Alignment * fields into account. * * This could result in both unoptimal unmap * requests as as well as UNMAP calls unmapping * fewer LBA's than requested. */ bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { /* * Note: ada and da are different in how they store the * pending bp's in a trim. ada stores all of them in the * trim_req.bps. da stores all but the first one in the * delete_run_queue. ada then completes all the bps in * its adadone() loop. da completes all the bps in the * delete_run_queue in dadone, and relies on the biodone * after to complete. This should be reconciled since there's * no real reason to do it differently. XXX */ if (bp1 != bp) bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, UNMAP_RANGE_MAX - lastcount); lastlba += c; lastcount += c; scsi_ulto4b(lastcount, d[ranges - 1].length); count -= c; lba += c; totalcount += c; } else if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0) { /* Align length of the previous range. */ if ((c = lastcount % softc->unmap_gran) != 0) { if (lastcount <= c) { totalcount -= lastcount; lastlba = (uint64_t)-1; lastcount = 0; ranges--; } else { totalcount -= c; lastlba -= c; lastcount -= c; scsi_ulto4b(lastcount, d[ranges - 1].length); } } /* Align beginning of the new range. */ c = (lba - softc->unmap_gran_align) % softc->unmap_gran; if (c != 0) { c = softc->unmap_gran - c; if (count <= c) { count = 0; } else { lba += c; count -= c; } } } while (count > 0) { c = omin(count, UNMAP_RANGE_MAX); if (totalcount + c > softc->unmap_max_lba || ranges >= softc->unmap_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld" "|| %d >= %d", da_delete_method_desc[softc->delete_method], totalcount + c, softc->unmap_max_lba, ranges, softc->unmap_max_ranges); break; } scsi_u64to8b(lba, d[ranges].lba); scsi_ulto4b(c, d[ranges].length); lba += c; totalcount += c; ranges++; count -= c; lastlba = lba; lastcount = c; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (ranges >= softc->unmap_max_ranges || totalcount + bp1->bio_bcount / softc->params.secsize > softc->unmap_max_lba) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); /* Align length of the last range. */ if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 && (c = lastcount % softc->unmap_gran) != 0) { if (lastcount <= c) ranges--; else scsi_ulto4b(lastcount - c, d[ranges - 1].length); } scsi_ulto2b(ranges * 16 + 6, &buf[0]); scsi_ulto2b(ranges * 16, &buf[2]); scsi_unmap(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/0, /*data_ptr*/ buf, /*dxfer_len*/ ranges * 16 + 8, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; softc->trim_count++; softc->trim_ranges += ranges; softc->trim_lbas += totalcount; cam_iosched_submit_trim(softc->cam_iosched); } static void da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc; struct bio *bp1; uint8_t *buf = softc->unmap_buf; uint64_t lastlba = (uint64_t)-1; uint64_t count; uint64_t lba; uint32_t lastcount = 0, c, requestcount; int ranges = 0, off, block_count; bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; requestcount = count; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * 8; buf[off + 6] = lastcount & 0xff; buf[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; } while (count > 0) { c = omin(count, ATA_DSM_RANGE_MAX); off = ranges * 8; buf[off + 0] = lba & 0xff; buf[off + 1] = (lba >> 8) & 0xff; buf[off + 2] = (lba >> 16) & 0xff; buf[off + 3] = (lba >> 24) & 0xff; buf[off + 4] = (lba >> 32) & 0xff; buf[off + 5] = (lba >> 40) & 0xff; buf[off + 6] = c & 0xff; buf[off + 7] = (c >> 8) & 0xff; lba += c; ranges++; count -= c; lastcount = c; if (count != 0 && ranges == softc->trim_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], requestcount, (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX); break; } } lastlba = lba; bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (bp1->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); block_count = howmany(ranges, ATA_DSM_BLK_RANGES); scsi_ata_trim(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, block_count, /*data_ptr*/buf, /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } /* * We calculate ws_max_blks here based off d_delmaxsize instead * of using softc->ws_max_blks as it is absolute max for the * device not the protocol max which may well be lower. */ static void da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc; struct bio *bp1; uint64_t ws_max_blks; uint64_t lba; uint64_t count; /* forward compat with WS32 */ softc = (struct da_softc *)periph->softc; ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; lba = bp->bio_pblkno; count = 0; bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); count += bp1->bio_bcount / softc->params.secsize; if (count > ws_max_blks) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], count, ws_max_blks); count = omin(count, ws_max_blks); break; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (lba + count != bp1->bio_pblkno || count + bp1->bio_bcount / softc->params.secsize > ws_max_blks) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); scsi_write_same(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/softc->delete_method == DA_DELETE_ZERO ? 0 : SWS_UNMAP, softc->delete_method == DA_DELETE_WS16 ? 16 : 10, /*lba*/lba, /*block_count*/count, /*data_ptr*/ __DECONST(void *, zero_region), /*dxfer_len*/ softc->params.secsize, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static int cmd6workaround(union ccb *ccb) { struct scsi_rw_6 cmd6; struct scsi_rw_10 *cmd10; struct da_softc *softc; u_int8_t *cdb; struct bio *bp; int frozen; cdb = ccb->csio.cdb_io.cdb_bytes; softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { da_delete_methods old_method = softc->delete_method; /* * Typically there are two reasons for failure here * 1. Delete method was detected as supported but isn't * 2. Delete failed due to invalid params e.g. too big * * While we will attempt to choose an alternative delete method * this may result in short deletes if the existing delete * requests from geom are big for the new method chosen. * * This method assumes that the error which triggered this * will not retry the io otherwise a panic will occur */ dadeleteflag(softc, old_method, 0); dadeletemethodchoose(softc, DA_DELETE_DISABLE); if (softc->delete_method == DA_DELETE_DISABLE) xpt_print(ccb->ccb_h.path, "%s failed, disabling BIO_DELETE\n", da_delete_method_desc[old_method]); else xpt_print(ccb->ccb_h.path, "%s failed, switching to %s BIO_DELETE\n", da_delete_method_desc[old_method], da_delete_method_desc[softc->delete_method]); while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) cam_iosched_queue_work(softc->cam_iosched, bp); cam_iosched_queue_work(softc->cam_iosched, (struct bio *)ccb->ccb_h.ccb_bp); ccb->ccb_h.ccb_bp = NULL; return (0); } /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == PREVENT_ALLOW) && (softc->quirks & DA_Q_NO_PREVENT) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); softc->quirks |= DA_Q_NO_PREVENT; return (0); } /* Detect unsupported SYNCHRONIZE CACHE(10). */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == SYNCHRONIZE_CACHE) && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "SYNCHRONIZE CACHE(10) not supported.\n"); softc->quirks |= DA_Q_NO_SYNC_CACHE; softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; return (0); } /* Translation only possible if CDB is an array and cmd is R/W6 */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || (*cdb != READ_6 && *cdb != WRITE_6)) return 0; xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " "increasing minimum_cmd_size to 10.\n"); softc->minimum_cmd_size = 10; bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); cmd10 = (struct scsi_rw_10 *)cdb; cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; cmd10->byte2 = 0; scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); cmd10->reserved = 0; scsi_ulto2b(cmd6.length, cmd10->length); cmd10->control = cmd6.control; ccb->csio.cdb_len = sizeof(*cmd10); /* Requeue request, unfreezing queue if necessary */ frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static void dazonedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; struct bio *bp; softc = periph->softc; bp = (struct bio *)ccb->ccb_h.ccb_bp; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: case DISK_ZONE_CLOSE: case DISK_ZONE_FINISH: case DISK_ZONE_RWP: break; case DISK_ZONE_REPORT_ZONES: { uint32_t avail_len; struct disk_zone_report *rep; struct scsi_report_zones_hdr *hdr; struct scsi_report_zones_desc *desc; struct disk_zone_rep_entry *entry; uint32_t hdr_len, num_avail; uint32_t num_to_fill, i; int ata; rep = &bp->bio_zone.zone_params.report; avail_len = ccb->csio.dxfer_len - ccb->csio.resid; /* * Note that bio_resid isn't normally used for zone * commands, but it is used by devstat_end_transaction_bio() * to determine how much data was transferred. Because * the size of the SCSI/ATA data structures is different * than the size of the BIO interface structures, the * amount of data actually transferred from the drive will * be different than the amount of data transferred to * the user. */ bp->bio_resid = ccb->csio.resid; hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr; if (avail_len < sizeof(*hdr)) { /* * Is there a better error than EIO here? We asked * for at least the header, and we got less than * that. */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; break; } if (softc->zone_interface == DA_ZONE_IF_ATA_PASS) ata = 1; else ata = 0; hdr_len = ata ? le32dec(hdr->length) : scsi_4btoul(hdr->length); if (hdr_len > 0) rep->entries_available = hdr_len / sizeof(*desc); else rep->entries_available = 0; /* * NOTE: using the same values for the BIO version of the * same field as the SCSI/ATA values. This means we could * get some additional values that aren't defined in bio.h * if more values of the same field are defined later. */ rep->header.same = hdr->byte4 & SRZ_SAME_MASK; rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) : scsi_8btou64(hdr->maximum_lba); /* * If the drive reports no entries that match the query, * we're done. */ if (hdr_len == 0) { rep->entries_filled = 0; break; } num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), hdr_len / sizeof(*desc)); /* * If the drive didn't return any data, then we're done. */ if (num_avail == 0) { rep->entries_filled = 0; break; } num_to_fill = min(num_avail, rep->entries_allocated); /* * If the user didn't allocate any entries for us to fill, * we're done. */ if (num_to_fill == 0) { rep->entries_filled = 0; break; } for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; i < num_to_fill; i++, desc++, entry++) { /* * NOTE: we're mapping the values here directly * from the SCSI/ATA bit definitions to the bio.h * definitions. There is also a warning in * disk_zone.h, but the impact is that if * additional values are added in the SCSI/ATA * specs these will be visible to consumers of * this interface. */ entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; entry->zone_condition = (desc->zone_flags & SRZ_ZONE_COND_MASK) >> SRZ_ZONE_COND_SHIFT; entry->zone_flags |= desc->zone_flags & (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); entry->zone_length = ata ? le64dec(desc->zone_length) : scsi_8btou64(desc->zone_length); entry->zone_start_lba = ata ? le64dec(desc->zone_start_lba) : scsi_8btou64(desc->zone_start_lba); entry->write_pointer_lba = ata ? le64dec(desc->write_pointer_lba) : scsi_8btou64(desc->write_pointer_lba); } rep->entries_filled = num_to_fill; break; } case DISK_ZONE_GET_PARAMS: default: /* * In theory we should not get a GET_PARAMS bio, since it * should be handled without queueing the command to the * drive. */ panic("%s: Invalid zone command %d", __func__, bp->bio_zone.zone_cmd); break; } if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) free(ccb->csio.data_ptr, M_SCSIDA); } static void dadone(struct cam_periph *periph, union ccb *done_ccb) { struct bio *bp, *bp1; struct da_softc *softc; struct ccb_scsiio *csio; - u_int32_t priority; da_ccb_state state; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); softc = (struct da_softc *)periph->softc; - priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (csio->bio != NULL) biotrack(csio->bio, __func__); #endif state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; int sf; if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = daerror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* A retry was scheduled, so just return. */ cam_periph_unlock(periph); return; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if (error != 0) { int queued_error; /* * return all queued I/O with EIO, so that * the client can retry these I/Os in the * proper order should it attempt to recover. */ queued_error = EIO; if (error == ENXIO && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { /* * Catastrophic error. Mark our pack as * invalid. * * XXX See if this is really a media * XXX change first? */ xpt_print(periph->path, "Invalidating pack\n"); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif queued_error = ENXIO; } cam_iosched_flush(softc->cam_iosched, NULL, queued_error); if (bp != NULL) { bp->bio_error = error; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } } else if (bp != NULL) { if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) bp->bio_flags |= BIO_ERROR; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else if (bp != NULL) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); if (bp->bio_cmd == BIO_ZONE) dazonedone(periph, done_ccb); else if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE)) bp->bio_flags |= BIO_ERROR; if (softc->error_inject != 0) { bp->bio_error = softc->error_inject; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; softc->error_inject = 0; } } if (bp != NULL) biotrack(bp, __func__); LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); if (LIST_EMPTY(&softc->pending_ccbs)) softc->flags |= DA_FLAG_WAS_OTAG; /* * We need to call cam_iosched before we call biodone so that we don't * measure any activity that happens in the completion routine, which in * the case of sendfile can be quite extensive. Release the periph * refcount taken in dastart() for each CCB. */ cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); KASSERT(softc->refcount >= 1, ("dadone softc %p refcount %d", softc, softc->refcount)); softc->refcount--; if (state == DA_CCB_DELETE) { TAILQ_HEAD(, bio) queue; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); softc->delete_run_queue.insert_point = NULL; /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * delete_running set to 0 before the call above to * allow other I/O to progress when many BIO_DELETE * requests are pushed down. We set delete_running to 0 * and call daschedule again so that we don't stall if * there are no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); daschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = bp->bio_error; if (bp->bio_flags & BIO_ERROR) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { daschedule(periph); cam_periph_unlock(periph); } if (bp != NULL) biodone(bp); return; } static void dadone_probewp(struct cam_periph *periph, union ccb *done_ccb) { struct scsi_mode_header_6 *mode_hdr6; struct scsi_mode_header_10 *mode_hdr10; struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; uint8_t dev_spec; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probewp\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); KASSERT(softc->state == DA_STATE_PROBE_WP, ("State (%d) not PROBE_WP in dadone_probewp, periph %p ccb %p", softc->state, periph, done_ccb)); KASSERT((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) == DA_CCB_PROBE_WP, ("CCB State (%lu) not PROBE_WP in dadone_probewp, periph %p ccb %p", (unsigned long)csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK, periph, done_ccb)); if (softc->minimum_cmd_size > 6) { mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr; dev_spec = mode_hdr10->dev_spec; } else { mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr; dev_spec = mode_hdr6->dev_spec; } if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { if ((dev_spec & 0x80) != 0) softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT; else softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT; } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((softc->flags & DA_FLAG_CAN_RC16) != 0) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } static void dadone_proberc(struct cam_periph *periph, union ccb *done_ccb) { struct scsi_read_capacity_data *rdcap; struct scsi_read_capacity_data_long *rcaplong; struct da_softc *softc; struct ccb_scsiio *csio; da_ccb_state state; char *announce_buf; u_int32_t priority; int lbp, n; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_proberc\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; KASSERT(softc->state == DA_STATE_PROBE_RC || softc->state == DA_STATE_PROBE_RC16, ("State (%d) not PROBE_RC* in dadone_proberc, periph %p ccb %p", softc->state, periph, done_ccb)); KASSERT(state == DA_CCB_PROBE_RC || state == DA_CCB_PROBE_RC16, ("CCB State (%lu) not PROBE_RC* in dadone_probewp, periph %p ccb %p", (unsigned long)state, periph, done_ccb)); lbp = 0; rdcap = NULL; rcaplong = NULL; /* XXX TODO: can this be a malloc? */ announce_buf = softc->announce_temp; bzero(announce_buf, DA_ANNOUNCETMP_SZ); if (state == DA_CCB_PROBE_RC) rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; else rcaplong = (struct scsi_read_capacity_data_long *) csio->data_ptr; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct disk_params *dp; uint32_t block_size; uint64_t maxsector; u_int lalba; /* Lowest aligned LBA. */ if (state == DA_CCB_PROBE_RC) { block_size = scsi_4btoul(rdcap->length); maxsector = scsi_4btoul(rdcap->addr); lalba = 0; /* * According to SBC-2, if the standard 10 * byte READ CAPACITY command returns 2^32, * we should issue the 16 byte version of * the command, since the device in question * has more sectors than can be represented * with the short version of the command. */ if (maxsector == 0xffffffff) { free(rdcap, M_SCSIDA); softc->state = DA_STATE_PROBE_RC16; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } } else { block_size = scsi_4btoul(rcaplong->length); maxsector = scsi_8btou64(rcaplong->addr); lalba = scsi_2btoul(rcaplong->lalba_lbp); } /* * Because GEOM code just will panic us if we * give them an 'illegal' value we'll avoid that * here. */ if (block_size == 0) { block_size = 512; if (maxsector == 0) maxsector = -1; } if (block_size >= maxphys) { xpt_print(periph->path, "unsupportable block size %ju\n", (uintmax_t) block_size); announce_buf = NULL; cam_periph_invalidate(periph); } else { /* * We pass rcaplong into dasetgeom(), * because it will only use it if it is * non-NULL. */ dasetgeom(periph, block_size, maxsector, rcaplong, sizeof(*rcaplong)); lbp = (lalba & SRC16_LBPME_A); dp = &softc->params; n = snprintf(announce_buf, DA_ANNOUNCETMP_SZ, "%juMB (%ju %u byte sectors", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); if (softc->p_type != 0) { n += snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ", DIF type %d", softc->p_type); } snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ")"); } } else { int error; /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else if (error != 0) { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ status = done_ccb->ccb_h.status; if ((status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); memset(&cgd, 0, sizeof(cgd)); xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * If we tried READ CAPACITY(16) and failed, * fallback to READ CAPACITY(10). */ if ((state == DA_CCB_PROBE_RC16) && (softc->flags & DA_FLAG_CAN_RC16) && (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) || ((have_sense) && (error_code == SSD_CURRENT_ERROR || error_code == SSD_DESC_CURRENT_ERROR) && (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { cam_periph_assert(periph, MA_OWNED); softc->flags &= ~DA_FLAG_CAN_RC16; free(rdcap, M_SCSIDA); softc->state = DA_STATE_PROBE_RC; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } /* * Attach to anything that claims to be a * direct access or optical disk device, * as long as it doesn't return a "Logical * unit not supported" (0x25) error. * "Internal Target Failure" (0x44) is also * special and typically means that the * device is a SATA drive behind a SATL * translation that's fallen into a * terminally fatal state. */ if ((have_sense) && (asc != 0x25) && (asc != 0x44) && (error_code == SSD_CURRENT_ERROR || error_code == SSD_DESC_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; dasetgeom(periph, 512, -1, NULL, 0); scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, DA_ANNOUNCETMP_SZ, "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else { if (have_sense) scsi_sense_print(&done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); announce_buf = NULL; /* * Free up resources. */ cam_periph_invalidate(periph); } } } free(csio->data_ptr, M_SCSIDA); if (announce_buf != NULL && ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { struct sbuf sb; sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, announce_buf); xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, DA_Q_BIT_STRING); sbuf_finish(&sb); sbuf_putbuf(&sb); /* * Create our sysctl variables, now that we know * we have successfully attached. */ /* increase the refcount */ if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) { taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); } else { /* XXX This message is useless! */ xpt_print(periph->path, "fatal error, " "could not acquire reference count\n"); } } /* We already probed the device. */ if (softc->flags & DA_FLAG_PROBED) { daprobedone(periph, done_ccb); return; } /* Ensure re-probe doesn't see old delete. */ softc->delete_available = 0; dadeleteflag(softc, DA_DELETE_ZERO, 1); if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { /* * Based on older SBC-3 spec revisions * any of the UNMAP methods "may" be * available via LBP given this flag so * we flag all of them as available and * then remove those which further * probes confirm aren't available * later. * * We could also check readcap(16) p_type * flag to exclude one or more invalid * write same (X) types here */ dadeleteflag(softc, DA_DELETE_WS16, 1); dadeleteflag(softc, DA_DELETE_WS10, 1); dadeleteflag(softc, DA_DELETE_UNMAP, 1); softc->state = DA_STATE_PROBE_LBP; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } softc->state = DA_STATE_PROBE_BDC; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } static void dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb) { struct scsi_vpd_logical_block_prov *lbp; struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probelbp\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { /* * T10/1799-D Revision 31 states at least one of these * must be supported but we don't currently enforce this. */ dadeleteflag(softc, DA_DELETE_WS16, (lbp->flags & SVPD_LBP_WS16)); dadeleteflag(softc, DA_DELETE_WS10, (lbp->flags & SVPD_LBP_WS10)); dadeleteflag(softc, DA_DELETE_UNMAP, (lbp->flags & SVPD_LBP_UNMAP)); } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure indicates we don't support any SBC-3 * delete methods with UNMAP */ } } free(lbp, M_SCSIDA); softc->state = DA_STATE_PROBE_BLK_LIMITS; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } static void dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb) { struct scsi_vpd_block_limits *block_limits; struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeblklimits\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t max_txfer_len = scsi_4btoul( block_limits->max_txfer_len); uint32_t max_unmap_lba_cnt = scsi_4btoul( block_limits->max_unmap_lba_cnt); uint32_t max_unmap_blk_cnt = scsi_4btoul( block_limits->max_unmap_blk_cnt); uint32_t unmap_gran = scsi_4btoul( block_limits->opt_unmap_grain); uint32_t unmap_gran_align = scsi_4btoul( block_limits->unmap_grain_align); uint64_t ws_max_blks = scsi_8btou64( block_limits->max_write_same_length); if (max_txfer_len != 0) { softc->disk->d_maxsize = MIN(softc->maxio, (off_t)max_txfer_len * softc->params.secsize); } /* * We should already support UNMAP but we check lba * and block count to be sure */ if (max_unmap_lba_cnt != 0x00L && max_unmap_blk_cnt != 0x00L) { softc->unmap_max_lba = max_unmap_lba_cnt; softc->unmap_max_ranges = min(max_unmap_blk_cnt, UNMAP_MAX_RANGES); if (unmap_gran > 1) { softc->unmap_gran = unmap_gran; if (unmap_gran_align & 0x80000000) { softc->unmap_gran_align = unmap_gran_align & 0x7fffffff; } } } else { /* * Unexpected UNMAP limits which means the * device doesn't actually support UNMAP */ dadeleteflag(softc, DA_DELETE_UNMAP, 0); } if (ws_max_blks != 0x00L) softc->ws_max_blks = ws_max_blks; } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure here doesn't mean UNMAP is not * supported as this is an optional page. */ softc->unmap_max_lba = 1; softc->unmap_max_ranges = 1; } } free(block_limits, M_SCSIDA); softc->state = DA_STATE_PROBE_BDC; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } static void dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb) { struct scsi_vpd_block_device_characteristics *bdc; struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probebdc\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; bdc = (struct scsi_vpd_block_device_characteristics *)csio->data_ptr; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; /* * Disable queue sorting for non-rotational media * by default. */ u_int16_t old_rate = softc->disk->d_rotation_rate; valid_len = csio->dxfer_len - csio->resid; if (SBDC_IS_PRESENT(bdc, valid_len, medium_rotation_rate)) { softc->disk->d_rotation_rate = scsi_2btoul(bdc->medium_rotation_rate); if (softc->disk->d_rotation_rate == SVPD_NON_ROTATING) { cam_iosched_set_sort_queue( softc->cam_iosched, 0); softc->flags &= ~DA_FLAG_ROTATING; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } } if ((SBDC_IS_PRESENT(bdc, valid_len, flags)) && (softc->zone_mode == DA_ZONE_NONE)) { int ata_proto; if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) ata_proto = 1; else ata_proto = 0; /* * The Zoned field will only be set for * Drive Managed and Host Aware drives. If * they are Host Managed, the device type * in the standard INQUIRY data should be * set to T_ZBC_HM (0x14). */ if ((bdc->flags & SVPD_ZBC_MASK) == SVPD_HAW_ZBC) { softc->zone_mode = DA_ZONE_HOST_AWARE; softc->zone_interface = (ata_proto) ? DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; } else if ((bdc->flags & SVPD_ZBC_MASK) == SVPD_DM_ZBC) { softc->zone_mode =DA_ZONE_DRIVE_MANAGED; softc->zone_interface = (ata_proto) ? DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; } else if ((bdc->flags & SVPD_ZBC_MASK) != SVPD_ZBC_NR) { xpt_print(periph->path, "Unknown zoned " "type %#x", bdc->flags & SVPD_ZBC_MASK); } } } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(bdc, M_SCSIDA); softc->state = DA_STATE_PROBE_ATA; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } static void dadone_probeata(struct cam_periph *periph, union ccb *done_ccb) { struct ata_params *ata_params; struct ccb_scsiio *csio; struct da_softc *softc; u_int32_t priority; int continue_probe; int error; - int16_t *ptr; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeata\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; ata_params = (struct ata_params *)csio->data_ptr; - ptr = (uint16_t *)ata_params; continue_probe = 0; error = 0; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint16_t old_rate; ata_param_fixup(ata_params); if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && (softc->quirks & DA_Q_NO_UNMAP) == 0) { dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); if (ata_params->max_dsm_blocks != 0) softc->trim_max_ranges = min( softc->trim_max_ranges, ata_params->max_dsm_blocks * ATA_DSM_BLK_RANGES); } /* * Disable queue sorting for non-rotational media * by default. */ old_rate = softc->disk->d_rotation_rate; softc->disk->d_rotation_rate = ata_params->media_rotation_rate; if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) { cam_iosched_set_sort_queue(softc->cam_iosched, 0); softc->flags &= ~DA_FLAG_ROTATING; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } cam_periph_assert(periph, MA_OWNED); if (ata_params->capabilities1 & ATA_SUPPORT_DMA) softc->flags |= DA_FLAG_CAN_ATA_DMA; if (ata_params->support.extension & ATA_SUPPORT_GENLOG) softc->flags |= DA_FLAG_CAN_ATA_LOG; /* * At this point, if we have a SATA host aware drive, * we communicate via ATA passthrough unless the * SAT layer supports ZBC -> ZAC translation. In * that case, * * XXX KDM figure out how to detect a host managed * SATA drive. */ if (softc->zone_mode == DA_ZONE_NONE) { /* * Note that we don't override the zone * mode or interface if it has already been * set. This is because it has either been * set as a quirk, or when we probed the * SCSI Block Device Characteristics page, * the zoned field was set. The latter * means that the SAT layer supports ZBC to * ZAC translation, and we would prefer to * use that if it is available. */ if ((ata_params->support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) { softc->zone_mode = DA_ZONE_HOST_AWARE; softc->zone_interface = DA_ZONE_IF_ATA_PASS; } else if ((ata_params->support3 & ATA_SUPPORT_ZONE_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) { softc->zone_mode =DA_ZONE_DRIVE_MANAGED; softc->zone_interface = DA_ZONE_IF_ATA_PASS; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } if ((softc->zone_mode == DA_ZONE_HOST_AWARE) || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { /* * If the ATA IDENTIFY failed, we could be talking * to a SCSI drive, although that seems unlikely, * since the drive did report that it supported the * ATA Information VPD page. If the ATA IDENTIFY * succeeded, and the SAT layer doesn't support * ZBC -> ZAC translation, continue on to get the * directory of ATA logs, and complete the rest of * the ZAC probe. If the SAT layer does support * ZBC -> ZAC translation, we want to use that, * and we'll probe the SCSI Zoned Block Device * Characteristics VPD page next. */ if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_LOG) && (softc->zone_interface == DA_ZONE_IF_ATA_PASS)) softc->state = DA_STATE_PROBE_ATA_LOGDIR; else softc->state = DA_STATE_PROBE_ZONE; continue_probe = 1; } if (continue_probe != 0) { xpt_schedule(periph, priority); xpt_release_ccb(done_ccb); return; } else daprobedone(periph, done_ccb); return; } static void dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatalogdir\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { error = 0; softc->valid_logdir_len = 0; bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); softc->valid_logdir_len = csio->dxfer_len - csio->resid; if (softc->valid_logdir_len > 0) bcopy(csio->data_ptr, &softc->ata_logdir, min(softc->valid_logdir_len, sizeof(softc->ata_logdir))); /* * Figure out whether the Identify Device log is * supported. The General Purpose log directory * has a header, and lists the number of pages * available for each GP log identified by the * offset into the list. */ if ((softc->valid_logdir_len >= ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) && (le16dec(softc->ata_logdir.header) == ATA_GP_LOG_DIR_VERSION) && (le16dec(&softc->ata_logdir.num_pages[ (ATA_IDENTIFY_DATA_LOG * sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ softc->flags |= DA_FLAG_CAN_ATA_IDLOG; } else { softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA log directory, * then ATA logs are effectively not * supported even if the bit is set in the * identify data. */ softc->flags &= ~(DA_FLAG_CAN_ATA_LOG | DA_FLAG_CAN_ATA_IDLOG); if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) { softc->state = DA_STATE_PROBE_ATA_IDDIR; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } static void dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeataiddir\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { off_t entries_offset, max_entries; error = 0; softc->valid_iddir_len = 0; bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP | DA_FLAG_CAN_ATA_ZONE); softc->valid_iddir_len = csio->dxfer_len - csio->resid; if (softc->valid_iddir_len > 0) bcopy(csio->data_ptr, &softc->ata_iddir, min(softc->valid_iddir_len, sizeof(softc->ata_iddir))); entries_offset = __offsetof(struct ata_identify_log_pages,entries); max_entries = softc->valid_iddir_len - entries_offset; if ((softc->valid_iddir_len > (entries_offset + 1)) && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) && (softc->ata_iddir.entry_count > 0)) { int num_entries, i; num_entries = softc->ata_iddir.entry_count; num_entries = min(num_entries, softc->valid_iddir_len - entries_offset); for (i = 0; i < num_entries && i < max_entries; i++) { if (softc->ata_iddir.entries[i] == ATA_IDL_SUP_CAP) softc->flags |= DA_FLAG_CAN_ATA_SUPCAP; else if (softc->ata_iddir.entries[i] == ATA_IDL_ZDI) softc->flags |= DA_FLAG_CAN_ATA_ZONE; if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) break; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data log * directory, then it effectively isn't * supported even if the ATA Log directory * a non-zero number of pages present for * this log. */ softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) { softc->state = DA_STATE_PROBE_ATA_SUP; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } static void dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatasup\n")); softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_size; struct ata_identify_log_sup_cap *sup_cap; error = 0; sup_cap = (struct ata_identify_log_sup_cap *)csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_size = __offsetof(struct ata_identify_log_sup_cap, sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); if (valid_len >= needed_size) { uint64_t zoned, zac_cap; zoned = le64dec(sup_cap->zoned_cap); if (zoned & ATA_ZONED_VALID) { /* * This should have already been * set, because this is also in the * ATA identify data. */ if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_HOST_AWARE) softc->zone_mode = DA_ZONE_HOST_AWARE; else if ((zoned & ATA_ZONED_MASK) == ATA_SUPPORT_ZONE_DEV_MANAGED) softc->zone_mode = DA_ZONE_DRIVE_MANAGED; } zac_cap = le64dec(sup_cap->sup_zac_cap); if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { if (zac_cap & ATA_REPORT_ZONES_SUP) softc->zone_flags |= DA_ZONE_FLAG_RZ_SUP; if (zac_cap & ATA_ND_OPEN_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_OPEN_SUP; if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_CLOSE_SUP; if (zac_cap & ATA_ND_FINISH_ZONE_SUP) softc->zone_flags |= DA_ZONE_FLAG_FINISH_SUP; if (zac_cap & ATA_ND_RWP_SUP) softc->zone_flags |= DA_ZONE_FLAG_RWP_SUP; } else { /* * This field was introduced in * ACS-4, r08 on April 28th, 2015. * If the drive firmware was written * to an earlier spec, it won't have * the field. So, assume all * commands are supported. */ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { /* * If we can't get the ATA Identify Data * Supported Capabilities page, clear the * flag... */ softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP; /* * And clear zone capabilities. */ softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) { softc->state = DA_STATE_PROBE_ATA_ZONE; xpt_release_ccb(done_ccb); xpt_schedule(periph, priority); return; } daprobedone(periph, done_ccb); return; } static void dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatazone\n")); softc = (struct da_softc *)periph->softc; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct ata_zoned_info_log *zi_log; uint32_t valid_len; size_t needed_size; zi_log = (struct ata_zoned_info_log *)csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_size = __offsetof(struct ata_zoned_info_log, version_info) + 1 + sizeof(zi_log->version_info); if (valid_len >= needed_size) { uint64_t tmpvar; tmpvar = le64dec(zi_log->zoned_cap); if (tmpvar & ATA_ZDI_CAP_VALID) { if (tmpvar & ATA_ZDI_CAP_URSWRZ) softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; } tmpvar = le64dec(zi_log->optimal_seq_zones); if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = (tmpvar & ATA_ZDI_OPT_SEQ_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_seq_zones = 0; } tmpvar =le64dec(zi_log->optimal_nonseq_zones); if (tmpvar & ATA_ZDI_OPT_NS_VALID) { softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = (tmpvar & ATA_ZDI_OPT_NS_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->optimal_nonseq_zones = 0; } tmpvar = le64dec(zi_log->max_seq_req_zones); if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = (tmpvar & ATA_ZDI_MAX_SEQ_MASK); } else { softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET; softc->max_seq_zones = 0; } } } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { softc->flags &= ~DA_FLAG_CAN_ATA_ZONE; softc->flags &= ~DA_ZONE_FLAG_SET_MASK; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); daprobedone(periph, done_ccb); return; } static void dadone_probezone(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probezone\n")); softc = (struct da_softc *)periph->softc; csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t valid_len; size_t needed_len; struct scsi_vpd_zoned_bdc *zoned_bdc; error = 0; zoned_bdc = (struct scsi_vpd_zoned_bdc *)csio->data_ptr; valid_len = csio->dxfer_len - csio->resid; needed_len = __offsetof(struct scsi_vpd_zoned_bdc, max_seq_req_zones) + 1 + sizeof(zoned_bdc->max_seq_req_zones); if ((valid_len >= needed_len) && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) { if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ) softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; else softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; softc->optimal_seq_zones = scsi_4btoul(zoned_bdc->optimal_seq_zones); softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; softc->optimal_nonseq_zones = scsi_4btoul( zoned_bdc->optimal_nonseq_zones); softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; softc->max_seq_zones = scsi_4btoul(zoned_bdc->max_seq_req_zones); softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; } /* * All of the zone commands are mandatory for SCSI * devices. * * XXX KDM this is valid as of September 2015. * Re-check this assumption once the SAT spec is * updated to support SCSI ZBC to ATA ZAC mapping. * Since ATA allows zone commands to be reported * as supported or not, this may not necessarily * be true for an ATA device behind a SAT (SCSI to * ATA Translation) layer. */ softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; } else { error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(csio->data_ptr, M_SCSIDA); daprobedone(periph, done_ccb); return; } static void dadone_tur(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; - struct ccb_scsiio *csio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_tur\n")); softc = (struct da_softc *)periph->softc; - csio = &done_ccb->csio; cam_periph_assert(periph, MA_OWNED); if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; /* Will complete again, keep reference */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } softc->flags &= ~DA_FLAG_TUR_PENDING; xpt_release_ccb(done_ccb); da_periph_release_locked(periph, DA_REF_TUR); return; } static void dareprobe(struct cam_periph *periph) { struct da_softc *softc; int status; softc = (struct da_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); /* Probe in progress; don't interfere. */ if (softc->state != DA_STATE_NORMAL) return; status = da_periph_acquire(periph, DA_REF_REPROBE); KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed")); softc->state = DA_STATE_PROBE_WP; xpt_schedule(periph, CAM_PRIORITY_DEV); } static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct da_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->csio.bio != NULL) biotrack(ccb->csio.bio, __func__); #endif periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct da_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); /* * Automatically detect devices that do not support * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. */ error = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cmd6workaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cmd6workaround(ccb); /* * If the target replied with CAPACITY DATA HAS CHANGED UA, * query the capacity and notify upper layers. */ else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x2A && ascq == 0x09) { xpt_print(periph->path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x3F && ascq == 0x03) { xpt_print(periph->path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { softc->flags |= DA_FLAG_PACK_INVALID; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (ERESTART); #ifdef CAM_IO_STATS switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: softc->errors++; break; default: break; } #endif /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & DA_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return(cam_periph_error(ccb, cam_flags, sense_flags)); } static void damediapoll(void *arg) { struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && (softc->flags & DA_FLAG_TUR_PENDING) == 0 && softc->state == DA_STATE_NORMAL && LIST_EMPTY(&softc->pending_ccbs)) { if (da_periph_acquire(periph, DA_REF_TUR) == 0) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* Queue us up again */ if (da_poll_period != 0) callout_schedule(&softc->mediapoll_c, da_poll_period * hz); } static void daprevent(struct cam_periph *periph, int action) { struct da_softc *softc; union ccb *ccb; int error; cam_periph_assert(periph, MA_OWNED); softc = (struct da_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/1, /*cbcfp*/NULL, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, 5000); error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~DA_FLAG_PACK_LOCKED; else softc->flags |= DA_FLAG_PACK_LOCKED; } xpt_release_ccb(ccb); } static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) { struct ccb_calc_geometry ccg; struct da_softc *softc; struct disk_params *dp; u_int lbppbe, lalba; int error; softc = (struct da_softc *)periph->softc; dp = &softc->params; dp->secsize = block_len; dp->sectors = maxsector + 1; if (rcaplong != NULL) { lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; lalba = scsi_2btoul(rcaplong->lalba_lbp); lalba &= SRC16_LALBA_A; if (rcaplong->prot & SRC16_PROT_EN) softc->p_type = ((rcaplong->prot & SRC16_P_TYPE) >> SRC16_P_TYPE_SHIFT) + 1; else softc->p_type = 0; } else { lbppbe = 0; lalba = 0; softc->p_type = 0; } if (lbppbe > 0) { dp->stripesize = block_len << lbppbe; dp->stripeoffset = (dp->stripesize - block_len * lalba) % dp->stripesize; } else if (softc->quirks & DA_Q_4K) { dp->stripesize = 4096; dp->stripeoffset = 0; } else if (softc->unmap_gran != 0) { dp->stripesize = block_len * softc->unmap_gran; dp->stripeoffset = (dp->stripesize - block_len * softc->unmap_gran_align) % dp->stripesize; } else { dp->stripesize = 0; dp->stripeoffset = 0; } /* * Have the controller provide us with a geometry * for this disk. The only time the geometry * matters is when we boot and the controller * is the only one knowledgeable enough to come * up with something that will make this a bootable * device. */ memset(&ccg, 0, sizeof(ccg)); xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; ccg.block_size = dp->secsize; ccg.volume_size = dp->sectors; ccg.heads = 0; ccg.secs_per_track = 0; ccg.cylinders = 0; xpt_action((union ccb*)&ccg); if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* * We don't know what went wrong here- but just pick * a geometry so we don't have nasty things like divide * by zero. */ dp->heads = 255; dp->secs_per_track = 255; dp->cylinders = dp->sectors / (255 * 255); if (dp->cylinders == 0) { dp->cylinders = 1; } } else { dp->heads = ccg.heads; dp->secs_per_track = ccg.secs_per_track; dp->cylinders = ccg.cylinders; } /* * If the user supplied a read capacity buffer, and if it is * different than the previous buffer, update the data in the EDT. * If it's the same, we don't bother. This avoids sending an * update every time someone opens this device. */ if ((rcaplong != NULL) && (bcmp(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)) != 0)) { struct ccb_dev_advinfo cdai; memset(&cdai, 0, sizeof(cdai)); xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_RCAPLONG; cdai.flags = CDAI_FLAG_STORE; cdai.bufsiz = rcap_len; cdai.buf = (uint8_t *)rcaplong; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.ccb_h.status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: failed to set read " "capacity advinfo\n", __func__); /* Use cam_error_print() to decode the status */ cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, CAM_EPF_ALL); } else { bcopy(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)); } } softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; softc->disk->d_stripesize = softc->params.stripesize; softc->disk->d_stripeoffset = softc->params.stripeoffset; /* XXX: these are not actually "firmware" values, so they may be wrong */ softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; softc->disk->d_devstat->block_size = softc->params.secsize; softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; error = disk_resize(softc->disk, M_NOWAIT); if (error != 0) xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); } static void dasendorderedtag(void *arg) { struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; cam_periph_assert(periph, MA_OWNED); if (da_send_ordered) { if (!LIST_EMPTY(&softc->pending_ccbs)) { if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) softc->flags |= DA_FLAG_NEED_OTAG; softc->flags &= ~DA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, periph); } /* * Step through all DA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void dashutdown(void * arg, int howto) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &dadriver) { softc = (struct da_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & DA_FLAG_OPEN)) { dadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & DA_FLAG_OPEN) == 0) || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/0, /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /* whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 60 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } #else /* !_KERNEL */ /* * XXX These are only left out of the kernel build to silence warnings. If, * for some reason these functions are used in the kernel, the ifdefs should * be moved so they are included both in the kernel and userland. */ void scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_format_unit *scsi_cmd; scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = FORMAT_UNIT; scsi_cmd->byte2 = byte2; scsi_ulto2b(ileave, scsi_cmd->interleave); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t list_format, uint32_t addr_desc_index, uint8_t *data_ptr, uint32_t dxfer_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout) { uint8_t cdb_len; /* * These conditions allow using the 10 byte command. Otherwise we * need to use the 12 byte command. */ if ((minimum_cmd_size <= 10) && (addr_desc_index == 0) && (dxfer_len <= SRDD10_MAX_LENGTH)) { struct scsi_read_defect_data_10 *cdb10; cdb10 = (struct scsi_read_defect_data_10 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb10); bzero(cdb10, cdb_len); cdb10->opcode = READ_DEFECT_DATA_10; cdb10->format = list_format; scsi_ulto2b(dxfer_len, cdb10->alloc_length); } else { struct scsi_read_defect_data_12 *cdb12; cdb12 = (struct scsi_read_defect_data_12 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb12); bzero(cdb12, cdb_len); cdb12->opcode = READ_DEFECT_DATA_12; cdb12->format = list_format; scsi_ulto4b(dxfer_len, cdb12->alloc_length); scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); } cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t control, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sanitize *scsi_cmd; scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = SANITIZE; scsi_cmd->byte2 = byte2; scsi_cmd->control = control; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } #endif /* _KERNEL */ void scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { struct scsi_zbc_out *scsi_cmd; scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = ZBC_OUT; scsi_cmd->service_action = service_action; scsi_u64to8b(zone_id, scsi_cmd->zone_id); scsi_cmd->zone_flags = zone_flags; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout) { struct scsi_zbc_in *scsi_cmd; scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = ZBC_IN; scsi_cmd->service_action = service_action; scsi_ulto4b(dxfer_len, scsi_cmd->length); scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba); scsi_cmd->zone_options = zone_options; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } int scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout) { uint8_t command_out, protocol, ata_flags; uint16_t features_out; uint32_t sectors_out, auxiliary; int retval; retval = 0; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_OUT; features_out = (zm_action & 0xf) | (zone_flags << 8); ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; if (dxfer_len == 0) { protocol = AP_PROTO_NON_DATA; ata_flags |= AP_FLAG_TLEN_NO_DATA; sectors_out = 0; } else { protocol = AP_PROTO_DMA; ata_flags |= AP_FLAG_TLEN_SECT_CNT | AP_FLAG_TDIR_TO_DEV; sectors_out = ((dxfer_len >> 9) & 0xffff); } auxiliary = 0; } else { ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; if (dxfer_len == 0) { command_out = ATA_NCQ_NON_DATA; features_out = ATA_NCQ_ZAC_MGMT_OUT; /* * We're assuming the SCSI to ATA translation layer * will set the NCQ tag number in the tag field. * That isn't clear from the SAT-4 spec (as of rev 05). */ sectors_out = 0; ata_flags |= AP_FLAG_TLEN_NO_DATA; } else { command_out = ATA_SEND_FPDMA_QUEUED; /* * Note that we're defaulting to normal priority, * and assuming that the SCSI to ATA translation * layer will insert the NCQ tag number in the tag * field. That isn't clear in the SAT-4 spec (as * of rev 05). */ sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; ata_flags |= AP_FLAG_TLEN_FEAT | AP_FLAG_TDIR_TO_DEV; /* * For SEND FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large, and it may confuse the * the SAT layer, because generally that means that * 0 bytes should be transferred. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else if (dxfer_len <= (65535 * 512)) { features_out = ((dxfer_len >> 9) & 0xffff); } else { /* The transfer is too big. */ retval = 1; goto bailout; } } auxiliary = (zm_action & 0xf) | (zone_flags << 8); protocol = AP_PROTO_FPDMA; } protocol |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features_out, /*sector_count*/ sectors_out, /*lba*/ zone_id, /*command*/ command_out, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout); bailout: return (retval); } int scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout) { uint8_t command_out, protocol; uint16_t features_out, sectors_out; uint32_t auxiliary; int ata_flags; int retval; retval = 0; ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS; if (use_ncq == 0) { command_out = ATA_ZAC_MANAGEMENT_IN; /* XXX KDM put a macro here */ features_out = (zm_action & 0xf) | (zone_flags << 8); sectors_out = dxfer_len >> 9; /* XXX KDM macro */ protocol = AP_PROTO_DMA; ata_flags |= AP_FLAG_TLEN_SECT_CNT; auxiliary = 0; } else { ata_flags |= AP_FLAG_TLEN_FEAT; command_out = ATA_RECV_FPDMA_QUEUED; sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; /* * For RECEIVE FPDMA QUEUED, the transfer length is * encoded in the FEATURE register, and 0 means * that 65536 512 byte blocks are to be tranferred. * In practice, it seems unlikely that we'll see * a transfer that large, and it may confuse the * the SAT layer, because generally that means that * 0 bytes should be transferred. */ if (dxfer_len == (65536 * 512)) { features_out = 0; } else if (dxfer_len <= (65535 * 512)) { features_out = ((dxfer_len >> 9) & 0xffff); } else { /* The transfer is too big. */ retval = 1; goto bailout; } auxiliary = (zm_action & 0xf) | (zone_flags << 8), protocol = AP_PROTO_FPDMA; } protocol |= AP_EXTEND; retval = scsi_ata_pass(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features_out, /*sector_count*/ sectors_out, /*lba*/ zone_id, /*command*/ command_out, /*device*/ 0, /*icc*/ 0, /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */ /*cdb_storage*/ cdb_storage, /*cdb_storage_len*/ cdb_storage_len, /*minimum_cmd_size*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout); bailout: return (retval); } diff --git a/sys/cam/scsi/scsi_enc.c b/sys/cam/scsi/scsi_enc.c index 30cbf108fd44..ad6c09c99f58 100644 --- a/sys/cam/scsi/scsi_enc.c +++ b/sys/cam/scsi/scsi_enc.c @@ -1,1035 +1,1030 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ses.h" MALLOC_DEFINE(M_SCSIENC, "SCSI ENC", "SCSI ENC buffers"); /* Enclosure type independent driver */ static d_open_t enc_open; static d_close_t enc_close; static d_ioctl_t enc_ioctl; static periph_init_t enc_init; static periph_ctor_t enc_ctor; static periph_oninv_t enc_oninvalidate; static periph_dtor_t enc_dtor; static void enc_async(void *, uint32_t, struct cam_path *, void *); static enctyp enc_type(struct ccb_getdev *); SYSCTL_NODE(_kern_cam, OID_AUTO, enc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Enclosure Services driver"); #if defined(DEBUG) || defined(ENC_DEBUG) int enc_verbose = 1; #else int enc_verbose = 0; #endif SYSCTL_INT(_kern_cam_enc, OID_AUTO, verbose, CTLFLAG_RWTUN, &enc_verbose, 0, "Enable verbose logging"); const char *elm_type_names[] = ELM_TYPE_NAMES; CTASSERT(nitems(elm_type_names) - 1 == ELMTYP_LAST); static struct periph_driver encdriver = { enc_init, "ses", TAILQ_HEAD_INITIALIZER(encdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(enc, encdriver); static struct cdevsw enc_cdevsw = { .d_version = D_VERSION, .d_open = enc_open, .d_close = enc_close, .d_ioctl = enc_ioctl, .d_name = "ses", .d_flags = D_TRACKCLOSE, }; static void enc_init(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, enc_async, NULL, NULL); if (status != CAM_REQ_CMP) { printf("enc: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void enc_devgonecb(void *arg) { struct cam_periph *periph; struct enc_softc *enc; struct mtx *mtx; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); enc = (struct enc_softc *)periph->softc; /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < enc->open_count; i++) cam_periph_release_locked(periph); enc->open_count = 0; /* * Release the reference held for the device node, it is gone now. */ cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void enc_oninvalidate(struct cam_periph *periph) { struct enc_softc *enc; enc = periph->softc; enc->enc_flags |= ENC_FLAG_INVALID; /* If the sub-driver has an invalidate routine, call it */ if (enc->enc_vec.softc_invalidate != NULL) enc->enc_vec.softc_invalidate(enc); /* * Unregister any async callbacks. */ xpt_register_async(0, enc_async, periph, periph->path); /* * Shutdown our daemon. */ enc->enc_flags |= ENC_FLAG_SHUTDOWN; if (enc->enc_daemon != NULL) { /* Signal the ses daemon to terminate. */ wakeup(enc->enc_daemon); } callout_drain(&enc->status_updater); destroy_dev_sched_cb(enc->enc_dev, enc_devgonecb, periph); } static void enc_dtor(struct cam_periph *periph) { struct enc_softc *enc; enc = periph->softc; /* If the sub-driver has a cleanup routine, call it */ if (enc->enc_vec.softc_cleanup != NULL) enc->enc_vec.softc_cleanup(enc); root_mount_rel(&enc->enc_rootmount); ENC_FREE(enc); } static void enc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch(code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; path_id_t path_id; cgd = (struct ccb_getdev *)arg; if (arg == NULL) { break; } if (enc_type(cgd) == ENC_NONE) { /* * Schedule announcement of the ENC bindings for * this device if it is managed by a SEP. */ path_id = xpt_path_path_id(path); xpt_lock_buses(); TAILQ_FOREACH(periph, &encdriver.units, unit_links) { struct enc_softc *softc; softc = (struct enc_softc *)periph->softc; /* Check this SEP is ready. */ if (softc == NULL || (softc->enc_flags & ENC_FLAG_INITIALIZED) == 0 || softc->enc_vec.device_found == NULL) continue; /* Check this SEP may manage this device. */ if (xpt_path_path_id(periph->path) != path_id && (softc->enc_type != ENC_SEMB_SES || cgd->protocol != PROTO_ATA)) continue; softc->enc_vec.device_found(softc); } xpt_unlock_buses(); return; } status = cam_periph_alloc(enc_ctor, enc_oninvalidate, enc_dtor, NULL, "ses", CAM_PERIPH_BIO, path, enc_async, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { printf("enc_async: Unable to probe new device due to " "status 0x%x\n", status); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static int enc_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct enc_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != 0) return (ENXIO); cam_periph_lock(periph); softc = (struct enc_softc *)periph->softc; if ((softc->enc_flags & ENC_FLAG_INITIALIZED) == 0) { error = ENXIO; goto out; } if (softc->enc_flags & ENC_FLAG_INVALID) { error = ENXIO; goto out; } out: if (error != 0) cam_periph_release_locked(periph); else softc->open_count++; cam_periph_unlock(periph); return (error); } static int enc_close(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct enc_softc *enc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; mtx = cam_periph_mtx(periph); mtx_lock(mtx); enc = periph->softc; enc->open_count--; cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } int enc_error(union ccb *ccb, uint32_t cflags, uint32_t sflags) { - struct enc_softc *softc; - struct cam_periph *periph; - - periph = xpt_path_periph(ccb->ccb_h.path); - softc = (struct enc_softc *)periph->softc; return (cam_periph_error(ccb, cflags, sflags)); } static int enc_ioctl(struct cdev *dev, u_long cmd, caddr_t arg_addr, int flag, struct thread *td) { struct cam_periph *periph; encioc_enc_status_t tmp; encioc_string_t sstr; encioc_elm_status_t elms; encioc_elm_desc_t elmd; encioc_elm_devnames_t elmdn; encioc_element_t *uelm; enc_softc_t *enc; enc_cache_t *cache; void *addr; int error, i; #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) return (ENOTTY); #endif if (arg_addr) addr = *((caddr_t *) arg_addr); else addr = NULL; periph = (struct cam_periph *)dev->si_drv1; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering encioctl\n")); cam_periph_lock(periph); enc = (struct enc_softc *)periph->softc; cache = &enc->enc_cache; /* * Now check to see whether we're initialized or not. * This actually should never fail as we're not supposed * to get past enc_open w/o successfully initializing * things. */ if ((enc->enc_flags & ENC_FLAG_INITIALIZED) == 0) { cam_periph_unlock(periph); return (ENXIO); } cam_periph_unlock(periph); error = 0; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("trying to do ioctl %#lx\n", cmd)); /* * If this command can change the device's state, * we must have the device open for writing. * * For commands that get information about the * device- we don't need to lock the peripheral * if we aren't running a command. The periph * also can't go away while a user process has * it open. */ switch (cmd) { case ENCIOC_GETNELM: case ENCIOC_GETELMMAP: case ENCIOC_GETENCSTAT: case ENCIOC_GETELMSTAT: case ENCIOC_GETELMDESC: case ENCIOC_GETELMDEVNAMES: case ENCIOC_GETENCNAME: case ENCIOC_GETENCID: break; default: if ((flag & FWRITE) == 0) { return (EBADF); } } /* * XXX The values read here are only valid for the current * configuration generation. We need these ioctls * to also pass in/out a generation number. */ sx_slock(&enc->enc_cache_lock); switch (cmd) { case ENCIOC_GETNELM: error = copyout(&cache->nelms, addr, sizeof (cache->nelms)); break; case ENCIOC_GETELMMAP: for (uelm = addr, i = 0; i != cache->nelms; i++) { encioc_element_t kelm; kelm.elm_idx = i; kelm.elm_subenc_id = cache->elm_map[i].subenclosure; kelm.elm_type = cache->elm_map[i].elm_type; error = copyout(&kelm, &uelm[i], sizeof(kelm)); if (error) break; } break; case ENCIOC_GETENCSTAT: cam_periph_lock(periph); error = enc->enc_vec.get_enc_status(enc, 1); if (error) { cam_periph_unlock(periph); break; } tmp = cache->enc_status; cam_periph_unlock(periph); error = copyout(&tmp, addr, sizeof(tmp)); cache->enc_status = tmp; break; case ENCIOC_SETENCSTAT: error = copyin(addr, &tmp, sizeof(tmp)); if (error) break; cam_periph_lock(periph); error = enc->enc_vec.set_enc_status(enc, tmp, 1); cam_periph_unlock(periph); break; case ENCIOC_GETSTRING: case ENCIOC_SETSTRING: case ENCIOC_GETENCNAME: case ENCIOC_GETENCID: if (enc->enc_vec.handle_string == NULL) { error = EINVAL; break; } error = copyin(addr, &sstr, sizeof(sstr)); if (error) break; cam_periph_lock(periph); error = enc->enc_vec.handle_string(enc, &sstr, cmd); cam_periph_unlock(periph); if (error == 0 || error == ENOMEM) (void)copyout(&sstr.bufsiz, &((encioc_string_t *)addr)->bufsiz, sizeof(sstr.bufsiz)); break; case ENCIOC_GETELMSTAT: error = copyin(addr, &elms, sizeof(elms)); if (error) break; if (elms.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = enc->enc_vec.get_elm_status(enc, &elms, 1); cam_periph_unlock(periph); if (error) break; error = copyout(&elms, addr, sizeof(elms)); break; case ENCIOC_GETELMDESC: error = copyin(addr, &elmd, sizeof(elmd)); if (error) break; if (elmd.elm_idx >= cache->nelms) { error = EINVAL; break; } if (enc->enc_vec.get_elm_desc != NULL) { error = enc->enc_vec.get_elm_desc(enc, &elmd); if (error) break; } else elmd.elm_desc_len = 0; error = copyout(&elmd, addr, sizeof(elmd)); break; case ENCIOC_GETELMDEVNAMES: if (enc->enc_vec.get_elm_devnames == NULL) { error = EINVAL; break; } error = copyin(addr, &elmdn, sizeof(elmdn)); if (error) break; if (elmdn.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = (*enc->enc_vec.get_elm_devnames)(enc, &elmdn); cam_periph_unlock(periph); if (error) break; error = copyout(&elmdn, addr, sizeof(elmdn)); break; case ENCIOC_SETELMSTAT: error = copyin(addr, &elms, sizeof(elms)); if (error) break; if (elms.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = enc->enc_vec.set_elm_status(enc, &elms, 1); cam_periph_unlock(periph); break; case ENCIOC_INIT: cam_periph_lock(periph); error = enc->enc_vec.init_enc(enc); cam_periph_unlock(periph); break; default: cam_periph_lock(periph); error = cam_periph_ioctl(periph, cmd, arg_addr, enc_error); cam_periph_unlock(periph); break; } sx_sunlock(&enc->enc_cache_lock); return (error); } int enc_runcmd(struct enc_softc *enc, char *cdb, int cdbl, char *dptr, int *dlenp) { int error, dlen, tdlen; ccb_flags ddf; union ccb *ccb; CAM_DEBUG(enc->periph->path, CAM_DEBUG_TRACE, ("entering enc_runcmd\n")); if (dptr) { if ((dlen = *dlenp) < 0) { dlen = -dlen; ddf = CAM_DIR_OUT; } else { ddf = CAM_DIR_IN; } } else { dlen = 0; ddf = CAM_DIR_NONE; } if (cdbl > IOCDBLEN) { cdbl = IOCDBLEN; } ccb = cam_periph_getccb(enc->periph, CAM_PRIORITY_NORMAL); if (enc->enc_type == ENC_SEMB_SES || enc->enc_type == ENC_SEMB_SAFT) { tdlen = min(dlen, 1020); tdlen = (tdlen + 3) & ~3; cam_fill_ataio(&ccb->ataio, 0, NULL, ddf, 0, dptr, tdlen, 30 * 1000); if (cdb[0] == RECEIVE_DIAGNOSTIC) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, cdb[2], 0x02, tdlen / 4); else if (cdb[0] == SEND_DIAGNOSTIC) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, dlen > 0 ? dptr[0] : 0, 0x82, tdlen / 4); else if (cdb[0] == READ_BUFFER) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, cdb[2], 0x00, tdlen / 4); else ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, dlen > 0 ? dptr[0] : 0, 0x80, tdlen / 4); } else { tdlen = dlen; cam_fill_csio(&ccb->csio, 0, NULL, ddf, MSG_SIMPLE_Q_TAG, dptr, dlen, sizeof (struct scsi_sense_data), cdbl, 60 * 1000); bcopy(cdb, ccb->csio.cdb_io.cdb_bytes, cdbl); } error = cam_periph_runccb(ccb, enc_error, ENC_CFLAGS, ENC_FLAGS, NULL); if (error) { if (dptr) { *dlenp = dlen; } } else { if (dptr) { if (ccb->ccb_h.func_code == XPT_ATA_IO) *dlenp = ccb->ataio.resid; else *dlenp = ccb->csio.resid; *dlenp += tdlen - dlen; } } xpt_release_ccb(ccb); CAM_DEBUG(enc->periph->path, CAM_DEBUG_SUBTRACE, ("exiting enc_runcmd: *dlenp = %d\n", *dlenp)); return (error); } void enc_log(struct enc_softc *enc, const char *fmt, ...) { va_list ap; printf("%s%d: ", enc->periph->periph_name, enc->periph->unit_number); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } /* * The code after this point runs on many platforms, * so forgive the slightly awkward and nonconforming * appearance. */ /* * Is this a device that supports enclosure services? * * It's a pretty simple ruleset- if it is device type * 0x0D (13), it's an ENCLOSURE device. */ #define SAFTE_START 44 #define SAFTE_END 50 #define SAFTE_LEN SAFTE_END-SAFTE_START static enctyp enc_type(struct ccb_getdev *cgd) { int buflen; unsigned char *iqd; if (cgd->protocol == PROTO_SEMB) { iqd = (unsigned char *)&cgd->ident_data; if (STRNCMP(iqd + 43, "S-E-S", 5) == 0) return (ENC_SEMB_SES); else if (STRNCMP(iqd + 43, "SAF-TE", 6) == 0) return (ENC_SEMB_SAFT); return (ENC_NONE); } else if (cgd->protocol != PROTO_SCSI) return (ENC_NONE); iqd = (unsigned char *)&cgd->inq_data; buflen = min(sizeof(cgd->inq_data), SID_ADDITIONAL_LENGTH(&cgd->inq_data)); if ((iqd[0] & 0x1f) == T_ENCLOSURE) return (ENC_SES); #ifdef SES_ENABLE_PASSTHROUGH if ((iqd[6] & 0x40) && (iqd[2] & 0x7) >= 2) { /* * PassThrough Device. */ return (ENC_SES_PASSTHROUGH); } #endif /* * The comparison is short for a reason- * some vendors were chopping it short. */ if (buflen < SAFTE_END - 2) { return (ENC_NONE); } if (STRNCMP((char *)&iqd[SAFTE_START], "SAF-TE", SAFTE_LEN - 2) == 0) { return (ENC_SAFT); } return (ENC_NONE); } /*================== Enclosure Monitoring/Processing Daemon ==================*/ /** * \brief Queue an update request for a given action, if needed. * * \param enc SES softc to queue the request for. * \param action Action requested. */ void enc_update_request(enc_softc_t *enc, uint32_t action) { if ((enc->pending_actions & (0x1 << action)) == 0) { enc->pending_actions |= (0x1 << action); ENC_DLOG(enc, "%s: queing requested action %d\n", __func__, action); if (enc->current_action == ENC_UPDATE_NONE) wakeup(enc->enc_daemon); } else { ENC_DLOG(enc, "%s: ignoring requested action %d - " "Already queued\n", __func__, action); } } /** * \brief Invoke the handler of the highest priority pending * state in the SES state machine. * * \param enc The SES instance invoking the state machine. */ static void enc_fsm_step(enc_softc_t *enc) { union ccb *ccb; uint8_t *buf; struct enc_fsm_state *cur_state; int error; uint32_t xfer_len; ENC_DLOG(enc, "%s enter %p\n", __func__, enc); enc->current_action = ffs(enc->pending_actions) - 1; enc->pending_actions &= ~(0x1 << enc->current_action); cur_state = &enc->enc_fsm_states[enc->current_action]; buf = NULL; if (cur_state->buf_size != 0) { cam_periph_unlock(enc->periph); buf = malloc(cur_state->buf_size, M_SCSIENC, M_WAITOK|M_ZERO); cam_periph_lock(enc->periph); } error = 0; ccb = NULL; if (cur_state->fill != NULL) { ccb = cam_periph_getccb(enc->periph, CAM_PRIORITY_NORMAL); error = cur_state->fill(enc, cur_state, ccb, buf); if (error != 0) goto done; error = cam_periph_runccb(ccb, cur_state->error, ENC_CFLAGS, ENC_FLAGS|SF_QUIET_IR, NULL); } if (ccb != NULL) { if (ccb->ccb_h.func_code == XPT_ATA_IO) xfer_len = ccb->ataio.dxfer_len - ccb->ataio.resid; else xfer_len = ccb->csio.dxfer_len - ccb->csio.resid; } else xfer_len = 0; cam_periph_unlock(enc->periph); cur_state->done(enc, cur_state, ccb, &buf, error, xfer_len); cam_periph_lock(enc->periph); done: ENC_DLOG(enc, "%s exit - result %d\n", __func__, error); ENC_FREE_AND_NULL(buf); if (ccb != NULL) xpt_release_ccb(ccb); } /** * \invariant Called with cam_periph mutex held. */ static void enc_status_updater(void *arg) { enc_softc_t *enc; enc = arg; if (enc->enc_vec.poll_status != NULL) enc->enc_vec.poll_status(enc); } static void enc_daemon(void *arg) { enc_softc_t *enc; enc = arg; cam_periph_lock(enc->periph); while ((enc->enc_flags & ENC_FLAG_SHUTDOWN) == 0) { if (enc->pending_actions == 0) { /* * Reset callout and msleep, or * issue timed task completion * status command. */ enc->current_action = ENC_UPDATE_NONE; /* * We've been through our state machine at least * once. Allow the transition to userland. */ root_mount_rel(&enc->enc_rootmount); callout_reset(&enc->status_updater, 60*hz, enc_status_updater, enc); cam_periph_sleep(enc->periph, enc->enc_daemon, PUSER, "idle", 0); } else { enc_fsm_step(enc); } } enc->enc_daemon = NULL; cam_periph_unlock(enc->periph); cam_periph_release(enc->periph); kproc_exit(0); } static int enc_kproc_init(enc_softc_t *enc) { int result; callout_init_mtx(&enc->status_updater, cam_periph_mtx(enc->periph), 0); if (cam_periph_acquire(enc->periph) != 0) return (ENXIO); result = kproc_create(enc_daemon, enc, &enc->enc_daemon, /*flags*/0, /*stackpgs*/0, "enc_daemon%d", enc->periph->unit_number); if (result == 0) { /* Do an initial load of all page data. */ cam_periph_lock(enc->periph); enc->enc_vec.poll_status(enc); cam_periph_unlock(enc->periph); } else cam_periph_release(enc->periph); return (result); } static cam_status enc_ctor(struct cam_periph *periph, void *arg) { cam_status status = CAM_REQ_CMP_ERR; int err; enc_softc_t *enc; struct ccb_getdev *cgd; char *tname; struct make_dev_args args; struct sbuf sb; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("enc_ctor: no getdev CCB, can't register device\n"); goto out; } enc = ENC_MALLOCZ(sizeof(*enc)); if (enc == NULL) { printf("enc_ctor: Unable to probe new device. " "Unable to allocate enc\n"); goto out; } enc->periph = periph; enc->current_action = ENC_UPDATE_INVALID; enc->enc_type = enc_type(cgd); sx_init(&enc->enc_cache_lock, "enccache"); switch (enc->enc_type) { case ENC_SES: case ENC_SES_PASSTHROUGH: case ENC_SEMB_SES: err = ses_softc_init(enc); break; case ENC_SAFT: case ENC_SEMB_SAFT: err = safte_softc_init(enc); break; case ENC_NONE: default: ENC_FREE(enc); return (CAM_REQ_CMP_ERR); } if (err) { xpt_print(periph->path, "error %d initializing\n", err); goto out; } /* * Hold off userland until we have made at least one pass * through our state machine so that physical path data is * present. */ if (enc->enc_vec.poll_status != NULL) { root_mount_hold_token(periph->periph_name, &enc->enc_rootmount); } /* * The softc field is set only once the enc is fully initialized * so that we can rely on this field to detect partially * initialized periph objects in the AC_FOUND_DEVICE handler. */ periph->softc = enc; cam_periph_unlock(periph); if (enc->enc_vec.poll_status != NULL) { err = enc_kproc_init(enc); if (err) { xpt_print(periph->path, "error %d starting enc_daemon\n", err); goto out; } } /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } make_dev_args_init(&args); args.mda_devsw = &enc_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; err = make_dev_s(&args, &enc->enc_dev, "%s%d", periph->periph_name, periph->unit_number); cam_periph_lock(periph); if (err != 0) { cam_periph_release_locked(periph); return (CAM_REQ_CMP_ERR); } enc->enc_flags |= ENC_FLAG_INITIALIZED; /* * Add an async callback so that we get notified if this * device goes away. */ xpt_register_async(AC_LOST_DEVICE, enc_async, periph, periph->path); switch (enc->enc_type) { default: case ENC_NONE: tname = "No ENC device"; break; case ENC_SES: tname = "SES Device"; break; case ENC_SES_PASSTHROUGH: tname = "SES Passthrough Device"; break; case ENC_SAFT: tname = "SAF-TE Device"; break; case ENC_SEMB_SES: tname = "SEMB SES Device"; break; case ENC_SEMB_SAFT: tname = "SEMB SAF-TE Device"; break; } sbuf_new(&sb, enc->announce_buf, ENC_ANNOUNCE_SZ, SBUF_FIXEDLEN); xpt_announce_periph_sbuf(periph, &sb, tname); sbuf_finish(&sb); sbuf_putbuf(&sb); status = CAM_REQ_CMP; out: if (status != CAM_REQ_CMP) enc_dtor(periph); return (status); } diff --git a/sys/cam/scsi/scsi_enc_ses.c b/sys/cam/scsi/scsi_enc_ses.c index 39d99fd23eb9..803630f31f54 100644 --- a/sys/cam/scsi/scsi_enc_ses.c +++ b/sys/cam/scsi/scsi_enc_ses.c @@ -1,3057 +1,3053 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Matthew Jacob * Copyright (c) 2010 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * \file scsi_enc_ses.c * * Structures and routines specific && private to SES only */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* SES Native Type Device Support */ /* SES Diagnostic Page Codes */ typedef enum { SesSupportedPages = 0x0, SesConfigPage = 0x1, SesControlPage = 0x2, SesStatusPage = SesControlPage, SesHelpTxt = 0x3, SesStringOut = 0x4, SesStringIn = SesStringOut, SesThresholdOut = 0x5, SesThresholdIn = SesThresholdOut, SesArrayControl = 0x6, /* Obsolete in SES v2 */ SesArrayStatus = SesArrayControl, SesElementDescriptor = 0x7, SesShortStatus = 0x8, SesEnclosureBusy = 0x9, SesAddlElementStatus = 0xa } SesDiagPageCodes; typedef struct ses_type { const struct ses_elm_type_desc *hdr; const char *text; } ses_type_t; typedef struct ses_comstat { uint8_t comstatus; uint8_t comstat[3]; } ses_comstat_t; typedef union ses_addl_data { struct ses_elm_sas_device_phy *sasdev_phys; struct ses_elm_sas_expander_phy *sasexp_phys; struct ses_elm_sas_port_phy *sasport_phys; struct ses_fcobj_port *fc_ports; } ses_add_data_t; typedef struct ses_addl_status { struct ses_elm_addlstatus_base_hdr *hdr; union { union ses_fcobj_hdr *fc; union ses_elm_sas_hdr *sas; struct ses_elm_ata_hdr *ata; } proto_hdr; union ses_addl_data proto_data; /* array sizes stored in header */ } ses_add_status_t; typedef struct ses_element { uint8_t eip; /* eip bit is set */ uint16_t descr_len; /* length of the descriptor */ const char *descr; /* descriptor for this object */ struct ses_addl_status addl; /* additional status info */ } ses_element_t; typedef struct ses_control_request { int elm_idx; ses_comstat_t elm_stat; int result; TAILQ_ENTRY(ses_control_request) links; } ses_control_request_t; TAILQ_HEAD(ses_control_reqlist, ses_control_request); typedef struct ses_control_reqlist ses_control_reqlist_t; enum { SES_SETSTATUS_ENC_IDX = -1 }; static void ses_terminate_control_requests(ses_control_reqlist_t *reqlist, int result) { ses_control_request_t *req; while ((req = TAILQ_FIRST(reqlist)) != NULL) { TAILQ_REMOVE(reqlist, req, links); req->result = result; wakeup(req); } } enum ses_iter_index_values { /** * \brief Value of an initialized but invalid index * in a ses_iterator object. * * This value is used for the individual_element_index of * overal status elements and for all index types when * an iterator is first initialized. */ ITERATOR_INDEX_INVALID = -1, /** * \brief Value of an index in a ses_iterator object * when the iterator has traversed past the last * valid element.. */ ITERATOR_INDEX_END = INT_MAX }; /** * \brief Structure encapsulating all data necessary to traverse the * elements of a SES configuration. * * The ses_iterator object simplifies the task of iterating through all * elements detected via the SES configuration page by tracking the numerous * element indexes that, instead of memoizing in the softc, we calculate * on the fly during the traversal of the element objects. The various * indexes are necessary due to the varying needs of matching objects in * the different SES pages. Some pages (e.g. Status/Control) contain all * elements, while others (e.g. Additional Element Status) only contain * individual elements (no overal status elements) of particular types. * * To use an iterator, initialize it with ses_iter_init(), and then * use ses_iter_next() to traverse the elements (including the first) in * the configuration. Once an iterator is initiailized with ses_iter_init(), * you may also seek to any particular element by either it's global or * individual element index via the ses_iter_seek_to() function. You may * also return an iterator to the position just before the first element * (i.e. the same state as after an ses_iter_init()), with ses_iter_reset(). */ struct ses_iterator { /** * \brief Backlink to the overal software configuration structure. * * This is included for convenience so the iteration functions * need only take a single, struct ses_iterator *, argument. */ enc_softc_t *enc; enc_cache_t *cache; /** * \brief Index of the type of the current element within the * ses_cache's ses_types array. */ int type_index; /** * \brief The position (0 based) of this element relative to all other * elements of this type. * * This index resets to zero every time the iterator transitions * to elements of a new type in the configuration. */ int type_element_index; /** * \brief The position (0 based) of this element relative to all * other individual status elements in the configuration. * * This index ranges from 0 through the number of individual * elements in the configuration. When the iterator returns * an overall status element, individual_element_index is * set to ITERATOR_INDEX_INVALID, to indicate that it does * not apply to the current element. */ int individual_element_index; /** * \brief The position (0 based) of this element relative to * all elements in the configration. * * This index is appropriate for indexing into enc->ses_elm_map. */ int global_element_index; /** * \brief The last valid individual element index of this * iterator. * * When an iterator traverses an overal status element, the * individual element index is reset to ITERATOR_INDEX_INVALID * to prevent unintential use of the individual_element_index * field. The saved_individual_element_index allows the iterator * to restore it's position in the individual elements upon * reaching the next individual element. */ int saved_individual_element_index; }; typedef enum { SES_UPDATE_NONE, SES_UPDATE_PAGES, SES_UPDATE_GETCONFIG, SES_UPDATE_GETSTATUS, SES_UPDATE_GETELMDESCS, SES_UPDATE_GETELMADDLSTATUS, SES_PROCESS_CONTROL_REQS, SES_PUBLISH_PHYSPATHS, SES_PUBLISH_CACHE, SES_NUM_UPDATE_STATES } ses_update_action; static enc_softc_cleanup_t ses_softc_cleanup; #define SCSZ 0x8000 static fsm_fill_handler_t ses_fill_rcv_diag_io; static fsm_fill_handler_t ses_fill_control_request; static fsm_done_handler_t ses_process_pages; static fsm_done_handler_t ses_process_config; static fsm_done_handler_t ses_process_status; static fsm_done_handler_t ses_process_elm_descs; static fsm_done_handler_t ses_process_elm_addlstatus; static fsm_done_handler_t ses_process_control_request; static fsm_done_handler_t ses_publish_physpaths; static fsm_done_handler_t ses_publish_cache; static struct enc_fsm_state enc_fsm_states[SES_NUM_UPDATE_STATES] = { { "SES_UPDATE_NONE", 0, 0, 0, NULL, NULL, NULL }, { "SES_UPDATE_PAGES", SesSupportedPages, SCSZ, 60 * 1000, ses_fill_rcv_diag_io, ses_process_pages, enc_error }, { "SES_UPDATE_GETCONFIG", SesConfigPage, SCSZ, 60 * 1000, ses_fill_rcv_diag_io, ses_process_config, enc_error }, { "SES_UPDATE_GETSTATUS", SesStatusPage, SCSZ, 60 * 1000, ses_fill_rcv_diag_io, ses_process_status, enc_error }, { "SES_UPDATE_GETELMDESCS", SesElementDescriptor, SCSZ, 60 * 1000, ses_fill_rcv_diag_io, ses_process_elm_descs, enc_error }, { "SES_UPDATE_GETELMADDLSTATUS", SesAddlElementStatus, SCSZ, 60 * 1000, ses_fill_rcv_diag_io, ses_process_elm_addlstatus, enc_error }, { "SES_PROCESS_CONTROL_REQS", SesControlPage, SCSZ, 60 * 1000, ses_fill_control_request, ses_process_control_request, enc_error }, { "SES_PUBLISH_PHYSPATHS", 0, 0, 0, NULL, ses_publish_physpaths, NULL }, { "SES_PUBLISH_CACHE", 0, 0, 0, NULL, ses_publish_cache, NULL } }; typedef struct ses_cache { /* Source for all the configuration data pointers */ const struct ses_cfg_page *cfg_page; /* References into the config page. */ int ses_nsubencs; const struct ses_enc_desc * const *subencs; int ses_ntypes; const ses_type_t *ses_types; /* Source for all the status pointers */ const struct ses_status_page *status_page; /* Source for all the object descriptor pointers */ const struct ses_elem_descr_page *elm_descs_page; /* Source for all the additional object status pointers */ const struct ses_addl_elem_status_page *elm_addlstatus_page; } ses_cache_t; typedef struct ses_softc { uint32_t ses_flags; #define SES_FLAG_TIMEDCOMP 0x01 #define SES_FLAG_ADDLSTATUS 0x02 #define SES_FLAG_DESC 0x04 ses_control_reqlist_t ses_requests; ses_control_reqlist_t ses_pending_requests; } ses_softc_t; static int ses_search_globally = 0; SYSCTL_INT(_kern_cam_enc, OID_AUTO, search_globally, CTLFLAG_RWTUN, &ses_search_globally, 0, "Search for disks on other buses"); /** * \brief Reset a SES iterator to just before the first element * in the configuration. * * \param iter The iterator object to reset. * * The indexes within a reset iterator are invalid and will only * become valid upon completion of a ses_iter_seek_to() or a * ses_iter_next(). */ static void ses_iter_reset(struct ses_iterator *iter) { /* * Set our indexes to just before the first valid element * of the first type (ITERATOR_INDEX_INVALID == -1). This * simplifies the implementation of ses_iter_next(). */ iter->type_index = 0; iter->type_element_index = ITERATOR_INDEX_INVALID; iter->global_element_index = ITERATOR_INDEX_INVALID; iter->individual_element_index = ITERATOR_INDEX_INVALID; iter->saved_individual_element_index = ITERATOR_INDEX_INVALID; } /** * \brief Initialize the storage of a SES iterator and reset it to * the position just before the first element of the * configuration. * * \param enc The SES softc for the SES instance whose configuration * will be enumerated by this iterator. * \param iter The iterator object to initialize. */ static void ses_iter_init(enc_softc_t *enc, enc_cache_t *cache, struct ses_iterator *iter) { iter->enc = enc; iter->cache = cache; ses_iter_reset(iter); } /** * \brief Traverse the provided SES iterator to the next element * within the configuration. * * \param iter The iterator to move. * * \return If a valid next element exists, a pointer to it's enc_element_t. * Otherwise NULL. */ static enc_element_t * ses_iter_next(struct ses_iterator *iter) { ses_cache_t *ses_cache; const ses_type_t *element_type; ses_cache = iter->cache->private; /* * Note: Treat nelms as signed, so we will hit this case * and immediately terminate the iteration if the * configuration has 0 objects. */ if (iter->global_element_index >= (int)iter->cache->nelms - 1) { /* Elements exhausted. */ iter->type_index = ITERATOR_INDEX_END; iter->type_element_index = ITERATOR_INDEX_END; iter->global_element_index = ITERATOR_INDEX_END; iter->individual_element_index = ITERATOR_INDEX_END; iter->saved_individual_element_index = ITERATOR_INDEX_END; return (NULL); } KASSERT((iter->type_index < ses_cache->ses_ntypes), ("Corrupted element iterator. %d not less than %d", iter->type_index, ses_cache->ses_ntypes)); element_type = &ses_cache->ses_types[iter->type_index]; iter->global_element_index++; iter->type_element_index++; /* * There is an object for overal type status in addition * to one for each allowed element, but only if the element * count is non-zero. */ if (iter->type_element_index > element_type->hdr->etype_maxelt) { /* * We've exhausted the elements of this type. * This next element belongs to the next type. */ iter->type_index++; iter->type_element_index = 0; iter->individual_element_index = ITERATOR_INDEX_INVALID; } if (iter->type_element_index > 0) { iter->individual_element_index = ++iter->saved_individual_element_index; } return (&iter->cache->elm_map[iter->global_element_index]); } /** * Element index types tracked by a SES iterator. */ typedef enum { /** * Index relative to all elements (overall and individual) * in the system. */ SES_ELEM_INDEX_GLOBAL, /** * \brief Index relative to all individual elements in the system. * * This index counts only individual elements, skipping overall * status elements. This is the index space of the additional * element status page (page 0xa). */ SES_ELEM_INDEX_INDIVIDUAL } ses_elem_index_type_t; /** * \brief Move the provided iterator forwards or backwards to the object * having the give index. * * \param iter The iterator on which to perform the seek. * \param element_index The index of the element to find. * \param index_type The type (global or individual) of element_index. * * \return If the element is found, a pointer to it's enc_element_t. * Otherwise NULL. */ static enc_element_t * ses_iter_seek_to(struct ses_iterator *iter, int element_index, ses_elem_index_type_t index_type) { enc_element_t *element; int *cur_index; if (index_type == SES_ELEM_INDEX_GLOBAL) cur_index = &iter->global_element_index; else cur_index = &iter->individual_element_index; if (*cur_index == element_index) { /* Already there. */ return (&iter->cache->elm_map[iter->global_element_index]); } ses_iter_reset(iter); while ((element = ses_iter_next(iter)) != NULL && *cur_index != element_index) ; if (*cur_index != element_index) return (NULL); return (element); } #if 0 static int ses_encode(enc_softc_t *, uint8_t *, int, int, struct ses_comstat *); #endif static int ses_set_timed_completion(enc_softc_t *, uint8_t); #if 0 static int ses_putstatus(enc_softc_t *, int, struct ses_comstat *); #endif static void ses_poll_status(enc_softc_t *); static void ses_print_addl_data(enc_softc_t *, enc_element_t *); /*=========================== SES cleanup routines ===========================*/ static void ses_cache_free_elm_addlstatus(enc_softc_t *enc, enc_cache_t *cache) { ses_cache_t *ses_cache; ses_cache_t *other_ses_cache; enc_element_t *cur_elm; enc_element_t *last_elm; ENC_DLOG(enc, "%s: enter\n", __func__); ses_cache = cache->private; if (ses_cache->elm_addlstatus_page == NULL) return; for (cur_elm = cache->elm_map, last_elm = &cache->elm_map[cache->nelms]; cur_elm != last_elm; cur_elm++) { ses_element_t *elmpriv; elmpriv = cur_elm->elm_private; /* Clear references to the additional status page. */ bzero(&elmpriv->addl, sizeof(elmpriv->addl)); } other_ses_cache = enc_other_cache(enc, cache)->private; if (other_ses_cache->elm_addlstatus_page != ses_cache->elm_addlstatus_page) ENC_FREE(ses_cache->elm_addlstatus_page); ses_cache->elm_addlstatus_page = NULL; } static void ses_cache_free_elm_descs(enc_softc_t *enc, enc_cache_t *cache) { ses_cache_t *ses_cache; ses_cache_t *other_ses_cache; enc_element_t *cur_elm; enc_element_t *last_elm; ENC_DLOG(enc, "%s: enter\n", __func__); ses_cache = cache->private; if (ses_cache->elm_descs_page == NULL) return; for (cur_elm = cache->elm_map, last_elm = &cache->elm_map[cache->nelms]; cur_elm != last_elm; cur_elm++) { ses_element_t *elmpriv; elmpriv = cur_elm->elm_private; elmpriv->descr_len = 0; elmpriv->descr = NULL; } other_ses_cache = enc_other_cache(enc, cache)->private; if (other_ses_cache->elm_descs_page != ses_cache->elm_descs_page) ENC_FREE(ses_cache->elm_descs_page); ses_cache->elm_descs_page = NULL; } static void ses_cache_free_status(enc_softc_t *enc, enc_cache_t *cache) { ses_cache_t *ses_cache; ses_cache_t *other_ses_cache; ENC_DLOG(enc, "%s: enter\n", __func__); ses_cache = cache->private; if (ses_cache->status_page == NULL) return; other_ses_cache = enc_other_cache(enc, cache)->private; if (other_ses_cache->status_page != ses_cache->status_page) ENC_FREE(ses_cache->status_page); ses_cache->status_page = NULL; } static void ses_cache_free_elm_map(enc_softc_t *enc, enc_cache_t *cache) { enc_element_t *cur_elm; enc_element_t *last_elm; ENC_DLOG(enc, "%s: enter\n", __func__); if (cache->elm_map == NULL) return; ses_cache_free_elm_descs(enc, cache); ses_cache_free_elm_addlstatus(enc, cache); for (cur_elm = cache->elm_map, last_elm = &cache->elm_map[cache->nelms]; cur_elm != last_elm; cur_elm++) { ENC_FREE_AND_NULL(cur_elm->elm_private); } ENC_FREE_AND_NULL(cache->elm_map); cache->nelms = 0; ENC_DLOG(enc, "%s: exit\n", __func__); } static void ses_cache_free(enc_softc_t *enc, enc_cache_t *cache) { ses_cache_t *other_ses_cache; ses_cache_t *ses_cache; ENC_DLOG(enc, "%s: enter\n", __func__); ses_cache_free_elm_addlstatus(enc, cache); ses_cache_free_status(enc, cache); ses_cache_free_elm_map(enc, cache); ses_cache = cache->private; ses_cache->ses_ntypes = 0; other_ses_cache = enc_other_cache(enc, cache)->private; if (other_ses_cache->subencs != ses_cache->subencs) ENC_FREE(ses_cache->subencs); ses_cache->subencs = NULL; if (other_ses_cache->ses_types != ses_cache->ses_types) ENC_FREE(ses_cache->ses_types); ses_cache->ses_types = NULL; if (other_ses_cache->cfg_page != ses_cache->cfg_page) ENC_FREE(ses_cache->cfg_page); ses_cache->cfg_page = NULL; ENC_DLOG(enc, "%s: exit\n", __func__); } static void ses_cache_clone(enc_softc_t *enc, enc_cache_t *src, enc_cache_t *dst) { ses_cache_t *dst_ses_cache; ses_cache_t *src_ses_cache; enc_element_t *src_elm; enc_element_t *dst_elm; enc_element_t *last_elm; ses_cache_free(enc, dst); src_ses_cache = src->private; dst_ses_cache = dst->private; /* * The cloned enclosure cache and ses specific cache are * mostly identical to the source. */ *dst = *src; *dst_ses_cache = *src_ses_cache; /* * But the ses cache storage is still independent. Restore * the pointer that was clobbered by the structure copy above. */ dst->private = dst_ses_cache; /* * The element map is independent even though it starts out * pointing to the same constant page data. */ dst->elm_map = malloc(dst->nelms * sizeof(enc_element_t), M_SCSIENC, M_WAITOK); memcpy(dst->elm_map, src->elm_map, dst->nelms * sizeof(enc_element_t)); for (dst_elm = dst->elm_map, src_elm = src->elm_map, last_elm = &src->elm_map[src->nelms]; src_elm != last_elm; src_elm++, dst_elm++) { dst_elm->elm_private = malloc(sizeof(ses_element_t), M_SCSIENC, M_WAITOK); memcpy(dst_elm->elm_private, src_elm->elm_private, sizeof(ses_element_t)); } } /* Structure accessors. These are strongly typed to avoid errors. */ int ses_elm_sas_descr_type(union ses_elm_sas_hdr *obj) { return ((obj)->base_hdr.byte1 >> 6); } int ses_elm_addlstatus_proto(struct ses_elm_addlstatus_base_hdr *hdr) { return ((hdr)->byte0 & 0xf); } int ses_elm_addlstatus_eip(struct ses_elm_addlstatus_base_hdr *hdr) { return ((hdr)->byte0 >> 4) & 0x1; } int ses_elm_addlstatus_invalid(struct ses_elm_addlstatus_base_hdr *hdr) { return ((hdr)->byte0 >> 7); } int ses_elm_sas_type0_not_all_phys(union ses_elm_sas_hdr *hdr) { return ((hdr)->type0_noneip.byte1 & 0x1); } int ses_elm_sas_dev_phy_sata_dev(struct ses_elm_sas_device_phy *phy) { return ((phy)->target_ports & 0x1); } int ses_elm_sas_dev_phy_sata_port(struct ses_elm_sas_device_phy *phy) { return ((phy)->target_ports >> 7); } int ses_elm_sas_dev_phy_dev_type(struct ses_elm_sas_device_phy *phy) { return (((phy)->byte0 >> 4) & 0x7); } /** * \brief Verify that the cached configuration data in our softc * is valid for processing the page data corresponding to * the provided page header. * * \param ses_cache The SES cache to validate. * \param gen_code The 4 byte generation code from a SES diagnostic * page header. * * \return non-zero if true, 0 if false. */ static int ses_config_cache_valid(ses_cache_t *ses_cache, const uint8_t *gen_code) { uint32_t cache_gc; uint32_t cur_gc; if (ses_cache->cfg_page == NULL) return (0); cache_gc = scsi_4btoul(ses_cache->cfg_page->hdr.gen_code); cur_gc = scsi_4btoul(gen_code); return (cache_gc == cur_gc); } /** * Function signature for consumers of the ses_devids_iter() interface. */ typedef void ses_devid_callback_t(enc_softc_t *, enc_element_t *, struct scsi_vpd_id_descriptor *, void *); /** * \brief Iterate over and create vpd device id records from the * additional element status data for elm, passing that data * to the provided callback. * * \param enc SES instance containing elm * \param elm Element for which to extract device ID data. * \param callback The callback function to invoke on each generated * device id descriptor for elm. * \param callback_arg Argument passed through to callback on each invocation. */ static void ses_devids_iter(enc_softc_t *enc, enc_element_t *elm, ses_devid_callback_t *callback, void *callback_arg) { ses_element_t *elmpriv; struct ses_addl_status *addl; u_int i; size_t devid_record_size; elmpriv = elm->elm_private; addl = &(elmpriv->addl); devid_record_size = SVPD_DEVICE_ID_DESC_HDR_LEN + sizeof(struct scsi_vpd_id_naa_ieee_reg); for (i = 0; i < addl->proto_hdr.sas->base_hdr.num_phys; i++) { uint8_t devid_buf[devid_record_size]; struct scsi_vpd_id_descriptor *devid; uint8_t *phy_addr; devid = (struct scsi_vpd_id_descriptor *)devid_buf; phy_addr = addl->proto_data.sasdev_phys[i].phy_addr; devid->proto_codeset = (SCSI_PROTO_SAS << SVPD_ID_PROTO_SHIFT) | SVPD_ID_CODESET_BINARY; devid->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA; devid->reserved = 0; devid->length = sizeof(struct scsi_vpd_id_naa_ieee_reg); memcpy(devid->identifier, phy_addr, devid->length); callback(enc, elm, devid, callback_arg); } } /** * Function signature for consumers of the ses_paths_iter() interface. */ typedef void ses_path_callback_t(enc_softc_t *, enc_element_t *, struct cam_path *, void *); /** * Argument package passed through ses_devids_iter() by * ses_paths_iter() to ses_path_iter_devid_callback(). */ typedef struct ses_path_iter_args { ses_path_callback_t *callback; void *callback_arg; } ses_path_iter_args_t; /** * ses_devids_iter() callback function used by ses_paths_iter() * to map device ids to peripheral driver instances. * * \param enc SES instance containing elm * \param elm Element on which device ID matching is active. * \param periph A device ID corresponding to elm. * \param arg Argument passed through to callback on each invocation. */ static void ses_path_iter_devid_callback(enc_softc_t *enc, enc_element_t *elem, struct scsi_vpd_id_descriptor *devid, void *arg) { struct ccb_dev_match cdm; struct dev_match_pattern match_pattern; struct dev_match_result match_result; struct device_match_result *device_match; struct device_match_pattern *device_pattern; ses_path_iter_args_t *args; struct cam_path *path; args = (ses_path_iter_args_t *)arg; match_pattern.type = DEV_MATCH_DEVICE; device_pattern = &match_pattern.pattern.device_pattern; device_pattern->flags = DEV_MATCH_DEVID; device_pattern->data.devid_pat.id_len = offsetof(struct scsi_vpd_id_descriptor, identifier) + devid->length; memcpy(device_pattern->data.devid_pat.id, devid, device_pattern->data.devid_pat.id_len); if (!ses_search_globally) { device_pattern->flags |= DEV_MATCH_PATH; device_pattern->path_id = xpt_path_path_id(enc->periph->path); } memset(&cdm, 0, sizeof(cdm)); if (xpt_create_path(&cdm.ccb_h.path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) return; cdm.ccb_h.func_code = XPT_DEV_MATCH; cdm.num_patterns = 1; cdm.patterns = &match_pattern; cdm.pattern_buf_len = sizeof(match_pattern); cdm.match_buf_len = sizeof(match_result); cdm.matches = &match_result; do { xpt_action((union ccb *)&cdm); if ((cdm.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP || (cdm.status != CAM_DEV_MATCH_LAST && cdm.status != CAM_DEV_MATCH_MORE) || cdm.num_matches == 0) break; device_match = &match_result.result.device_result; if (xpt_create_path(&path, /*periph*/NULL, device_match->path_id, device_match->target_id, device_match->target_lun) == CAM_REQ_CMP) { args->callback(enc, elem, path, args->callback_arg); xpt_free_path(path); } } while (cdm.status == CAM_DEV_MATCH_MORE); xpt_free_path(cdm.ccb_h.path); } /** * \brief Iterate over and find the matching periph objects for the * specified element. * * \param enc SES instance containing elm * \param elm Element for which to perform periph object matching. * \param callback The callback function to invoke with each matching * periph object. * \param callback_arg Argument passed through to callback on each invocation. */ static void ses_paths_iter(enc_softc_t *enc, enc_element_t *elm, ses_path_callback_t *callback, void *callback_arg) { ses_element_t *elmpriv; struct ses_addl_status *addl; elmpriv = elm->elm_private; addl = &(elmpriv->addl); if (addl->hdr == NULL) return; switch(ses_elm_addlstatus_proto(addl->hdr)) { case SPSP_PROTO_SAS: if (addl->proto_hdr.sas != NULL && addl->proto_data.sasdev_phys != NULL) { ses_path_iter_args_t args; args.callback = callback; args.callback_arg = callback_arg; ses_devids_iter(enc, elm, ses_path_iter_devid_callback, &args); } break; case SPSP_PROTO_ATA: if (addl->proto_hdr.ata != NULL) { struct cam_path *path; struct ccb_getdev cgd; if (xpt_create_path(&path, /*periph*/NULL, scsi_4btoul(addl->proto_hdr.ata->bus), scsi_4btoul(addl->proto_hdr.ata->target), 0) != CAM_REQ_CMP) return; memset(&cgd, 0, sizeof(cgd)); xpt_setup_ccb(&cgd.ccb_h, path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (cgd.ccb_h.status == CAM_REQ_CMP) callback(enc, elm, path, callback_arg); xpt_free_path(path); } break; } } /** * ses_paths_iter() callback function used by ses_get_elmdevname() * to record periph driver instance strings corresponding to a SES * element. * * \param enc SES instance containing elm * \param elm Element on which periph matching is active. * \param periph A periph instance that matches elm. * \param arg Argument passed through to callback on each invocation. */ static void ses_elmdevname_callback(enc_softc_t *enc, enc_element_t *elem, struct cam_path *path, void *arg) { struct sbuf *sb; sb = (struct sbuf *)arg; cam_periph_list(path, sb); } /** * Argument package passed through ses_paths_iter() to * ses_getcampath_callback. */ typedef struct ses_setphyspath_callback_args { struct sbuf *physpath; int num_set; } ses_setphyspath_callback_args_t; /** * \brief ses_paths_iter() callback to set the physical path on the * CAM EDT entries corresponding to a given SES element. * * \param enc SES instance containing elm * \param elm Element on which periph matching is active. * \param periph A periph instance that matches elm. * \param arg Argument passed through to callback on each invocation. */ static void ses_setphyspath_callback(enc_softc_t *enc, enc_element_t *elm, struct cam_path *path, void *arg) { struct ccb_dev_advinfo cdai; ses_setphyspath_callback_args_t *args; char *old_physpath; args = (ses_setphyspath_callback_args_t *)arg; old_physpath = malloc(MAXPATHLEN, M_SCSIENC, M_WAITOK|M_ZERO); xpt_path_lock(path); memset(&cdai, 0, sizeof(cdai)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_PHYS_PATH; cdai.flags = CDAI_FLAG_NONE; cdai.bufsiz = MAXPATHLEN; cdai.buf = old_physpath; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (strcmp(old_physpath, sbuf_data(args->physpath)) != 0) { xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_PHYS_PATH; cdai.flags = CDAI_FLAG_STORE; cdai.bufsiz = sbuf_len(args->physpath); cdai.buf = sbuf_data(args->physpath); xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.ccb_h.status == CAM_REQ_CMP) args->num_set++; } xpt_path_unlock(path); free(old_physpath, M_SCSIENC); } /** * \brief Set a device's physical path string in CAM XPT. * * \param enc SES instance containing elm * \param elm Element to publish physical path string for * \param iter Iterator whose state corresponds to elm * * \return 0 on success, errno otherwise. */ static int ses_set_physpath(enc_softc_t *enc, enc_element_t *elm, struct ses_iterator *iter) { struct ccb_dev_advinfo cdai; ses_setphyspath_callback_args_t args; int i, ret; struct sbuf sb; struct scsi_vpd_id_descriptor *idd; uint8_t *devid; ses_element_t *elmpriv; const char *c; ret = EIO; devid = NULL; elmpriv = elm->elm_private; if (elmpriv->addl.hdr == NULL) goto out; /* * Assemble the components of the physical path starting with * the device ID of the enclosure itself. */ memset(&cdai, 0, sizeof(cdai)); xpt_setup_ccb(&cdai.ccb_h, enc->periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.flags = CDAI_FLAG_NONE; cdai.buftype = CDAI_TYPE_SCSI_DEVID; cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; cdai.buf = devid = malloc(cdai.bufsiz, M_SCSIENC, M_WAITOK|M_ZERO); cam_periph_lock(enc->periph); xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); cam_periph_unlock(enc->periph); if (cdai.ccb_h.status != CAM_REQ_CMP) goto out; idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_naa_ieee_reg); if (idd == NULL) goto out; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) { ret = ENOMEM; goto out; } /* Next, generate the physical path string */ sbuf_printf(&sb, "id1,enc@n%jx/type@%x/slot@%x", scsi_8btou64(idd->identifier), iter->type_index, iter->type_element_index); /* Append the element descriptor if one exists */ if (elmpriv->descr != NULL && elmpriv->descr_len > 0) { sbuf_cat(&sb, "/elmdesc@"); for (i = 0, c = elmpriv->descr; i < elmpriv->descr_len; i++, c++) { if (!isprint(*c) || isspace(*c) || *c == '/') sbuf_putc(&sb, '_'); else sbuf_putc(&sb, *c); } } sbuf_finish(&sb); /* * Set this physical path on any CAM devices with a device ID * descriptor that matches one created from the SES additional * status data for this element. */ args.physpath= &sb; args.num_set = 0; ses_paths_iter(enc, elm, ses_setphyspath_callback, &args); sbuf_delete(&sb); ret = args.num_set == 0 ? ENOENT : 0; out: if (devid != NULL) ENC_FREE(devid); return (ret); } /** * \brief Helper to set the CDB fields appropriately. * * \param cdb Buffer containing the cdb. * \param pagenum SES diagnostic page to query for. * \param dir Direction of query. */ static void ses_page_cdb(char *cdb, int bufsiz, SesDiagPageCodes pagenum, int dir) { /* Ref: SPC-4 r25 Section 6.20 Table 223 */ if (dir == CAM_DIR_IN) { cdb[0] = RECEIVE_DIAGNOSTIC; cdb[1] = 1; /* Set page code valid bit */ cdb[2] = pagenum; } else { cdb[0] = SEND_DIAGNOSTIC; cdb[1] = 0x10; cdb[2] = pagenum; } cdb[3] = bufsiz >> 8; /* high bits */ cdb[4] = bufsiz & 0xff; /* low bits */ cdb[5] = 0; } /** * \brief Discover whether this instance supports timed completion of a * RECEIVE DIAGNOSTIC RESULTS command requesting the Enclosure Status * page, and store the result in the softc, updating if necessary. * * \param enc SES instance to query and update. * \param tc_en Value of timed completion to set (see \return). * * \return 1 if timed completion enabled, 0 otherwise. */ static int ses_set_timed_completion(enc_softc_t *enc, uint8_t tc_en) { union ccb *ccb; struct cam_periph *periph; struct ses_mgmt_mode_page *mgmt; uint8_t *mode_buf; size_t mode_buf_len; ses_softc_t *ses; periph = enc->periph; ses = enc->enc_private; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); mode_buf_len = sizeof(struct ses_mgmt_mode_page); mode_buf = ENC_MALLOCZ(mode_buf_len); if (mode_buf == NULL) goto out; scsi_mode_sense(&ccb->csio, /*retries*/4, NULL, MSG_SIMPLE_Q_TAG, /*dbd*/FALSE, SMS_PAGE_CTRL_CURRENT, SES_MGMT_MODE_PAGE_CODE, mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60 * 1000); /* * Ignore illegal request errors, as they are quite common and we * will print something out in that case anyway. */ cam_periph_runccb(ccb, enc_error, ENC_CFLAGS, ENC_FLAGS|SF_QUIET_IR, NULL); if (ccb->ccb_h.status != CAM_REQ_CMP) { ENC_VLOG(enc, "Timed Completion Unsupported\n"); goto release; } /* Skip the mode select if the desired value is already set */ mgmt = (struct ses_mgmt_mode_page *)mode_buf; if ((mgmt->byte5 & SES_MGMT_TIMED_COMP_EN) == tc_en) goto done; /* Value is not what we wanted, set it */ if (tc_en) mgmt->byte5 |= SES_MGMT_TIMED_COMP_EN; else mgmt->byte5 &= ~SES_MGMT_TIMED_COMP_EN; /* SES2r20: a completion time of zero means as long as possible */ bzero(&mgmt->max_comp_time, sizeof(mgmt->max_comp_time)); scsi_mode_select(&ccb->csio, 5, NULL, MSG_SIMPLE_Q_TAG, /*page_fmt*/FALSE, /*save_pages*/TRUE, mode_buf, mode_buf_len, SSD_FULL_SIZE, /*timeout*/60 * 1000); cam_periph_runccb(ccb, enc_error, ENC_CFLAGS, ENC_FLAGS, NULL); if (ccb->ccb_h.status != CAM_REQ_CMP) { ENC_VLOG(enc, "Timed Completion Set Failed\n"); goto release; } done: if ((mgmt->byte5 & SES_MGMT_TIMED_COMP_EN) != 0) { ENC_LOG(enc, "Timed Completion Enabled\n"); ses->ses_flags |= SES_FLAG_TIMEDCOMP; } else { ENC_LOG(enc, "Timed Completion Disabled\n"); ses->ses_flags &= ~SES_FLAG_TIMEDCOMP; } release: ENC_FREE(mode_buf); xpt_release_ccb(ccb); out: return (ses->ses_flags & SES_FLAG_TIMEDCOMP); } /** * \brief Process the list of supported pages and update flags. * * \param enc SES device to query. * \param buf Buffer containing the config page. * \param xfer_len Length of the config page in the buffer. * * \return 0 on success, errno otherwise. */ static int ses_process_pages(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { ses_softc_t *ses; struct scsi_diag_page *page; int err, i, length; CAM_DEBUG(enc->periph->path, CAM_DEBUG_SUBTRACE, ("entering %s(%p, %d)\n", __func__, bufp, xfer_len)); ses = enc->enc_private; err = -1; if (error != 0) { err = error; goto out; } if (xfer_len < sizeof(*page)) { ENC_VLOG(enc, "Unable to parse Diag Pages List Header\n"); err = EIO; goto out; } page = (struct scsi_diag_page *)*bufp; length = scsi_2btoul(page->length); if (length + offsetof(struct scsi_diag_page, params) > xfer_len) { ENC_VLOG(enc, "Diag Pages List Too Long\n"); goto out; } ENC_DLOG(enc, "%s: page length %d, xfer_len %d\n", __func__, length, xfer_len); err = 0; for (i = 0; i < length; i++) { if (page->params[i] == SesElementDescriptor) ses->ses_flags |= SES_FLAG_DESC; else if (page->params[i] == SesAddlElementStatus) ses->ses_flags |= SES_FLAG_ADDLSTATUS; } out: ENC_DLOG(enc, "%s: exiting with err %d\n", __func__, err); return (err); } /** * \brief Process the config page and update associated structures. * * \param enc SES device to query. * \param buf Buffer containing the config page. * \param xfer_len Length of the config page in the buffer. * * \return 0 on success, errno otherwise. */ static int ses_process_config(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct ses_iterator iter; - ses_softc_t *ses; enc_cache_t *enc_cache; ses_cache_t *ses_cache; uint8_t *buf; int length; int err; int nelm; int ntype; struct ses_cfg_page *cfg_page; struct ses_enc_desc *buf_subenc; const struct ses_enc_desc **subencs; const struct ses_enc_desc **cur_subenc; const struct ses_enc_desc **last_subenc; ses_type_t *ses_types; ses_type_t *sestype; const struct ses_elm_type_desc *cur_buf_type; const struct ses_elm_type_desc *last_buf_type; uint8_t *last_valid_byte; enc_element_t *element; const char *type_text; CAM_DEBUG(enc->periph->path, CAM_DEBUG_SUBTRACE, ("entering %s(%p, %d)\n", __func__, bufp, xfer_len)); - ses = enc->enc_private; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; buf = *bufp; err = -1; if (error != 0) { err = error; goto out; } if (xfer_len < sizeof(cfg_page->hdr)) { ENC_VLOG(enc, "Unable to parse SES Config Header\n"); err = EIO; goto out; } cfg_page = (struct ses_cfg_page *)buf; length = ses_page_length(&cfg_page->hdr); if (length > xfer_len) { ENC_VLOG(enc, "Enclosure Config Page Too Long\n"); goto out; } last_valid_byte = &buf[length - 1]; ENC_DLOG(enc, "%s: total page length %d, xfer_len %d\n", __func__, length, xfer_len); err = 0; if (ses_config_cache_valid(ses_cache, cfg_page->hdr.gen_code)) { /* Our cache is still valid. Proceed to fetching status. */ goto out; } /* Cache is no longer valid. Free old data to make way for new. */ ses_cache_free(enc, enc_cache); ENC_VLOG(enc, "Generation Code 0x%x has %d SubEnclosures\n", scsi_4btoul(cfg_page->hdr.gen_code), ses_cfg_page_get_num_subenc(cfg_page)); /* Take ownership of the buffer. */ ses_cache->cfg_page = cfg_page; *bufp = NULL; /* * Now waltz through all the subenclosures summing the number of * types available in each. */ subencs = malloc(ses_cfg_page_get_num_subenc(cfg_page) * sizeof(*subencs), M_SCSIENC, M_WAITOK|M_ZERO); /* * Sub-enclosure data is const after construction (i.e. when * accessed via our cache object. * * The cast here is not required in C++ but C99 is not so * sophisticated (see C99 6.5.16.1(1)). */ ses_cache->ses_nsubencs = ses_cfg_page_get_num_subenc(cfg_page); ses_cache->subencs = subencs; buf_subenc = cfg_page->subencs; cur_subenc = subencs; last_subenc = &subencs[ses_cache->ses_nsubencs - 1]; ntype = 0; while (cur_subenc <= last_subenc) { if (!ses_enc_desc_is_complete(buf_subenc, last_valid_byte)) { ENC_VLOG(enc, "Enclosure %d Beyond End of " "Descriptors\n", cur_subenc - subencs); err = EIO; goto out; } ENC_VLOG(enc, " SubEnclosure ID %d, %d Types With this ID, " "Descriptor Length %d, offset %d\n", buf_subenc->subenc_id, buf_subenc->num_types, buf_subenc->length, &buf_subenc->byte0 - buf); ENC_VLOG(enc, "WWN: %jx\n", (uintmax_t)scsi_8btou64(buf_subenc->logical_id)); ntype += buf_subenc->num_types; *cur_subenc = buf_subenc; cur_subenc++; buf_subenc = ses_enc_desc_next(buf_subenc); } /* Process the type headers. */ ses_types = malloc(ntype * sizeof(*ses_types), M_SCSIENC, M_WAITOK|M_ZERO); /* * Type data is const after construction (i.e. when accessed via * our cache object. */ ses_cache->ses_ntypes = ntype; ses_cache->ses_types = ses_types; cur_buf_type = (const struct ses_elm_type_desc *) (&(*last_subenc)->length + (*last_subenc)->length + 1); last_buf_type = cur_buf_type + ntype - 1; type_text = (const uint8_t *)(last_buf_type + 1); nelm = 0; sestype = ses_types; while (cur_buf_type <= last_buf_type) { if (&cur_buf_type->etype_txt_len > last_valid_byte) { ENC_VLOG(enc, "Runt Enclosure Type Header %d\n", sestype - ses_types); err = EIO; goto out; } sestype->hdr = cur_buf_type; sestype->text = type_text; type_text += cur_buf_type->etype_txt_len; ENC_VLOG(enc, " Type Desc[%d]: Type 0x%x, MaxElt %d, In Subenc " "%d, Text Length %d: %.*s\n", sestype - ses_types, sestype->hdr->etype_elm_type, sestype->hdr->etype_maxelt, sestype->hdr->etype_subenc, sestype->hdr->etype_txt_len, sestype->hdr->etype_txt_len, sestype->text); nelm += sestype->hdr->etype_maxelt + /*overall status element*/1; sestype++; cur_buf_type++; } /* Create the object map. */ enc_cache->elm_map = malloc(nelm * sizeof(enc_element_t), M_SCSIENC, M_WAITOK|M_ZERO); enc_cache->nelms = nelm; ses_iter_init(enc, enc_cache, &iter); while ((element = ses_iter_next(&iter)) != NULL) { const struct ses_elm_type_desc *thdr; ENC_DLOG(enc, "%s: checking obj %d(%d,%d)\n", __func__, iter.global_element_index, iter.type_index, nelm, iter.type_element_index); thdr = ses_cache->ses_types[iter.type_index].hdr; element->elm_idx = iter.global_element_index; element->elm_type = thdr->etype_elm_type; element->subenclosure = thdr->etype_subenc; element->type_elm_idx = iter.type_element_index; element->elm_private = malloc(sizeof(ses_element_t), M_SCSIENC, M_WAITOK|M_ZERO); ENC_DLOG(enc, "%s: creating elmpriv %d(%d,%d) subenc %d " "type 0x%x\n", __func__, iter.global_element_index, iter.type_index, iter.type_element_index, thdr->etype_subenc, thdr->etype_elm_type); } err = 0; out: if (err) ses_cache_free(enc, enc_cache); else { ses_poll_status(enc); enc_update_request(enc, SES_PUBLISH_CACHE); } ENC_DLOG(enc, "%s: exiting with err %d\n", __func__, err); return (err); } /** * \brief Update the status page and associated structures. * * \param enc SES softc to update for. * \param buf Buffer containing the status page. * \param bufsz Amount of data in the buffer. * * \return 0 on success, errno otherwise. */ static int ses_process_status(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct ses_iterator iter; enc_element_t *element; ses_softc_t *ses; enc_cache_t *enc_cache; ses_cache_t *ses_cache; uint8_t *buf; int err = -1; int length; struct ses_status_page *page; union ses_status_element *cur_stat; union ses_status_element *last_stat; ses = enc->enc_private; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; buf = *bufp; ENC_DLOG(enc, "%s: enter (%p, %p, %d)\n", __func__, enc, buf, xfer_len); page = (struct ses_status_page *)buf; length = ses_page_length(&page->hdr); if (error != 0) { err = error; goto out; } /* * Make sure the length fits in the buffer. * * XXX all this means is that the page is larger than the space * we allocated. Since we use a statically sized buffer, this * could happen... Need to use dynamic discovery of the size. */ if (length > xfer_len) { ENC_VLOG(enc, "Enclosure Status Page Too Long\n"); goto out; } /* Check for simple enclosure reporting short enclosure status. */ if (length >= 4 && page->hdr.page_code == SesShortStatus) { ENC_DLOG(enc, "Got Short Enclosure Status page\n"); ses->ses_flags &= ~(SES_FLAG_ADDLSTATUS | SES_FLAG_DESC); ses_cache_free(enc, enc_cache); enc_cache->enc_status = page->hdr.page_specific_flags; enc_update_request(enc, SES_PUBLISH_CACHE); err = 0; goto out; } /* Make sure the length contains at least one header and status */ if (length < (sizeof(*page) + sizeof(*page->elements))) { ENC_VLOG(enc, "Enclosure Status Page Too Short\n"); goto out; } if (!ses_config_cache_valid(ses_cache, page->hdr.gen_code)) { ENC_DLOG(enc, "%s: Generation count change detected\n", __func__); enc_update_request(enc, SES_UPDATE_GETCONFIG); goto out; } ses_cache_free_status(enc, enc_cache); ses_cache->status_page = page; *bufp = NULL; enc_cache->enc_status = page->hdr.page_specific_flags; /* * Read in individual element status. The element order * matches the order reported in the config page (i.e. the * order of an unfiltered iteration of the config objects).. */ ses_iter_init(enc, enc_cache, &iter); cur_stat = page->elements; last_stat = (union ses_status_element *) &buf[length - sizeof(*last_stat)]; ENC_DLOG(enc, "%s: total page length %d, xfer_len %d\n", __func__, length, xfer_len); while (cur_stat <= last_stat && (element = ses_iter_next(&iter)) != NULL) { ENC_DLOG(enc, "%s: obj %d(%d,%d) off=0x%tx status=%jx\n", __func__, iter.global_element_index, iter.type_index, iter.type_element_index, (uint8_t *)cur_stat - buf, scsi_4btoul(cur_stat->bytes)); memcpy(&element->encstat, cur_stat, sizeof(element->encstat)); element->svalid = 1; cur_stat++; } if (ses_iter_next(&iter) != NULL) { ENC_VLOG(enc, "Status page, length insufficient for " "expected number of objects\n"); } else { if (cur_stat <= last_stat) ENC_VLOG(enc, "Status page, exhausted objects before " "exhausing page\n"); enc_update_request(enc, SES_PUBLISH_CACHE); err = 0; } out: ENC_DLOG(enc, "%s: exiting with error %d\n", __func__, err); return (err); } typedef enum { /** * The enclosure should not provide additional element * status for this element type in page 0x0A. * * \note This status is returned for any types not * listed SES3r02. Further types added in a * future specification will be incorrectly * classified. */ TYPE_ADDLSTATUS_NONE, /** * The element type provides additional element status * in page 0x0A. */ TYPE_ADDLSTATUS_MANDATORY, /** * The element type may provide additional element status * in page 0x0A, but i */ TYPE_ADDLSTATUS_OPTIONAL } ses_addlstatus_avail_t; /** * \brief Check to see whether a given type (as obtained via type headers) is * supported by the additional status command. * * \param enc SES softc to check. * \param typidx Type index to check for. * * \return An enumeration indicating if additional status is mandatory, * optional, or not required for this type. */ static ses_addlstatus_avail_t ses_typehasaddlstatus(enc_softc_t *enc, uint8_t typidx) { enc_cache_t *enc_cache; ses_cache_t *ses_cache; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; switch(ses_cache->ses_types[typidx].hdr->etype_elm_type) { case ELMTYP_DEVICE: case ELMTYP_ARRAY_DEV: case ELMTYP_SAS_EXP: return (TYPE_ADDLSTATUS_MANDATORY); case ELMTYP_SCSI_INI: case ELMTYP_SCSI_TGT: case ELMTYP_ESCC: return (TYPE_ADDLSTATUS_OPTIONAL); default: /* No additional status information available. */ break; } return (TYPE_ADDLSTATUS_NONE); } static int ses_get_elm_addlstatus_fc(enc_softc_t *, enc_cache_t *, uint8_t *, int); static int ses_get_elm_addlstatus_sas(enc_softc_t *, enc_cache_t *, uint8_t *, int, int, int, int); static int ses_get_elm_addlstatus_ata(enc_softc_t *, enc_cache_t *, uint8_t *, int, int, int, int); /** * \brief Parse the additional status element data for each object. * * \param enc The SES softc to update. * \param buf The buffer containing the additional status * element response. * \param xfer_len Size of the buffer. * * \return 0 on success, errno otherwise. */ static int ses_process_elm_addlstatus(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct ses_iterator iter, titer; int eip; int err; int length; int offset; enc_cache_t *enc_cache; ses_cache_t *ses_cache; uint8_t *buf; ses_element_t *elmpriv; const struct ses_page_hdr *hdr; enc_element_t *element, *telement; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; buf = *bufp; err = -1; if (error != 0) { err = error; goto out; } ses_cache_free_elm_addlstatus(enc, enc_cache); ses_cache->elm_addlstatus_page = (struct ses_addl_elem_status_page *)buf; *bufp = NULL; /* * The objects appear in the same order here as in Enclosure Status, * which itself is ordered by the Type Descriptors from the Config * page. However, it is necessary to skip elements that are not * supported by this page when counting them. */ hdr = &ses_cache->elm_addlstatus_page->hdr; length = ses_page_length(hdr); ENC_DLOG(enc, "Additional Element Status Page Length 0x%x\n", length); /* Make sure the length includes at least one header. */ if (length < sizeof(*hdr)+sizeof(struct ses_elm_addlstatus_base_hdr)) { ENC_VLOG(enc, "Runt Additional Element Status Page\n"); goto out; } if (length > xfer_len) { ENC_VLOG(enc, "Additional Element Status Page Too Long\n"); goto out; } if (!ses_config_cache_valid(ses_cache, hdr->gen_code)) { ENC_DLOG(enc, "%s: Generation count change detected\n", __func__); enc_update_request(enc, SES_UPDATE_GETCONFIG); goto out; } offset = sizeof(struct ses_page_hdr); ses_iter_init(enc, enc_cache, &iter); while (offset < length && (element = ses_iter_next(&iter)) != NULL) { struct ses_elm_addlstatus_base_hdr *elm_hdr; int proto_info_len; ses_addlstatus_avail_t status_type; /* * Additional element status is only provided for * individual elements (i.e. overal status elements * are excluded) and those of the types specified * in the SES spec. */ status_type = ses_typehasaddlstatus(enc, iter.type_index); if (iter.individual_element_index == ITERATOR_INDEX_INVALID || status_type == TYPE_ADDLSTATUS_NONE) continue; elm_hdr = (struct ses_elm_addlstatus_base_hdr *)&buf[offset]; eip = ses_elm_addlstatus_eip(elm_hdr); if (eip) { struct ses_elm_addlstatus_eip_hdr *eip_hdr; int expected_index, index; ses_elem_index_type_t index_type; eip_hdr = (struct ses_elm_addlstatus_eip_hdr *)elm_hdr; if (SES_ADDL_EIP_EIIOE_EI_GLOB(eip_hdr->byte2)) { index_type = SES_ELEM_INDEX_GLOBAL; expected_index = iter.global_element_index; } else { index_type = SES_ELEM_INDEX_INDIVIDUAL; expected_index = iter.individual_element_index; } if (eip_hdr->element_index < expected_index) { ENC_VLOG(enc, "%s: provided %selement index " "%d is lower then expected %d\n", __func__, SES_ADDL_EIP_EIIOE_EI_GLOB( eip_hdr->byte2) ? "global " : "", eip_hdr->element_index, expected_index); goto badindex; } titer = iter; telement = ses_iter_seek_to(&titer, eip_hdr->element_index, index_type); if (telement == NULL) { ENC_VLOG(enc, "%s: provided %selement index " "%d does not exist\n", __func__, SES_ADDL_EIP_EIIOE_EI_GLOB(eip_hdr->byte2) ? "global " : "", eip_hdr->element_index); goto badindex; } if (ses_typehasaddlstatus(enc, titer.type_index) == TYPE_ADDLSTATUS_NONE) { ENC_VLOG(enc, "%s: provided %selement index " "%d can't have additional status\n", __func__, SES_ADDL_EIP_EIIOE_EI_GLOB(eip_hdr->byte2) ? "global " : "", eip_hdr->element_index); badindex: /* * If we expected mandatory element, we may * guess it was just a wrong index and we may * use the status. If element was optional, * then we have no idea where status belongs. */ if (status_type == TYPE_ADDLSTATUS_OPTIONAL) break; } else { iter = titer; element = telement; } if (SES_ADDL_EIP_EIIOE_EI_GLOB(eip_hdr->byte2)) index = iter.global_element_index; else index = iter.individual_element_index; if (index > expected_index && status_type == TYPE_ADDLSTATUS_MANDATORY) { ENC_VLOG(enc, "%s: provided %s element" "index %d skips mandatory status " " element at index %d\n", __func__, SES_ADDL_EIP_EIIOE_EI_GLOB( eip_hdr->byte2) ? "global " : "", index, expected_index); } } elmpriv = element->elm_private; ENC_DLOG(enc, "%s: global element index=%d, type index=%d " "type element index=%d, offset=0x%x, " "byte0=0x%x, length=0x%x\n", __func__, iter.global_element_index, iter.type_index, iter.type_element_index, offset, elm_hdr->byte0, elm_hdr->length); /* Skip to after the length field */ offset += sizeof(struct ses_elm_addlstatus_base_hdr); /* Make sure the descriptor is within bounds */ if ((offset + elm_hdr->length) > length) { ENC_VLOG(enc, "Element %d Beyond End " "of Additional Element Status Descriptors\n", iter.global_element_index); break; } /* Skip elements marked as invalid. */ if (ses_elm_addlstatus_invalid(elm_hdr)) { offset += elm_hdr->length; continue; } elmpriv->addl.hdr = elm_hdr; /* Advance to the protocol data, skipping eip bytes if needed */ offset += (eip * SES_EIP_HDR_EXTRA_LEN); proto_info_len = elm_hdr->length - (eip * SES_EIP_HDR_EXTRA_LEN); /* Errors in this block are ignored as they are non-fatal */ switch(ses_elm_addlstatus_proto(elm_hdr)) { case SPSP_PROTO_FC: if (elm_hdr->length == 0) break; ses_get_elm_addlstatus_fc(enc, enc_cache, &buf[offset], proto_info_len); break; case SPSP_PROTO_SAS: if (elm_hdr->length <= 2) break; ses_get_elm_addlstatus_sas(enc, enc_cache, &buf[offset], proto_info_len, eip, iter.type_index, iter.global_element_index); break; case SPSP_PROTO_ATA: ses_get_elm_addlstatus_ata(enc, enc_cache, &buf[offset], proto_info_len, eip, iter.type_index, iter.global_element_index); break; default: ENC_VLOG(enc, "Element %d: Unknown Additional Element " "Protocol 0x%x\n", iter.global_element_index, ses_elm_addlstatus_proto(elm_hdr)); break; } offset += proto_info_len; } err = 0; out: if (err) ses_cache_free_elm_addlstatus(enc, enc_cache); enc_update_request(enc, SES_PUBLISH_PHYSPATHS); enc_update_request(enc, SES_PUBLISH_CACHE); return (err); } static int ses_process_control_request(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { ses_softc_t *ses; ses = enc->enc_private; /* * Possible errors: * o Generation count wrong. * o Some SCSI status error. */ ses_terminate_control_requests(&ses->ses_pending_requests, error); ses_poll_status(enc); return (0); } static int ses_publish_physpaths(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { struct ses_iterator iter; enc_cache_t *enc_cache; enc_element_t *element; enc_cache = &enc->enc_daemon_cache; ses_iter_init(enc, enc_cache, &iter); while ((element = ses_iter_next(&iter)) != NULL) { /* * ses_set_physpath() returns success if we changed * the physpath of any element. This allows us to * only announce devices once regardless of how * many times we process additional element status. */ if (ses_set_physpath(enc, element, &iter) == 0) ses_print_addl_data(enc, element); } return (0); } static int ses_publish_cache(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { sx_xlock(&enc->enc_cache_lock); ses_cache_clone(enc, /*src*/&enc->enc_daemon_cache, /*dst*/&enc->enc_cache); sx_xunlock(&enc->enc_cache_lock); return (0); } /* * \brief Sanitize an element descriptor * * The SES4r3 standard, sections 3.1.2 and 6.1.10, specifies that element * descriptors may only contain ASCII characters in the range 0x20 to 0x7e. * But some vendors violate that rule. Ensure that we only expose compliant * descriptors to userland. * * \param desc SES element descriptor as reported by the hardware * \param len Length of desc in bytes, not necessarily including * trailing NUL. It will be modified if desc is invalid. */ static const char* ses_sanitize_elm_desc(const char *desc, uint16_t *len) { const char *invalid = ""; int i; for (i = 0; i < *len; i++) { if (desc[i] == 0) { break; } else if (desc[i] < 0x20 || desc[i] > 0x7e) { *len = strlen(invalid); return (invalid); } } return (desc); } /** * \brief Parse the descriptors for each object. * * \param enc The SES softc to update. * \param buf The buffer containing the descriptor list response. * \param xfer_len Size of the buffer. * * \return 0 on success, errno otherwise. */ static int ses_process_elm_descs(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t **bufp, int error, int xfer_len) { ses_softc_t *ses; struct ses_iterator iter; enc_element_t *element; int err; int offset; u_long length, plength; enc_cache_t *enc_cache; ses_cache_t *ses_cache; uint8_t *buf; ses_element_t *elmpriv; const struct ses_page_hdr *phdr; const struct ses_elm_desc_hdr *hdr; ses = enc->enc_private; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; buf = *bufp; err = -1; if (error != 0) { err = error; goto out; } ses_cache_free_elm_descs(enc, enc_cache); ses_cache->elm_descs_page = (struct ses_elem_descr_page *)buf; *bufp = NULL; phdr = &ses_cache->elm_descs_page->hdr; plength = ses_page_length(phdr); if (xfer_len < sizeof(struct ses_page_hdr)) { ENC_VLOG(enc, "Runt Element Descriptor Page\n"); goto out; } if (plength > xfer_len) { ENC_VLOG(enc, "Element Descriptor Page Too Long\n"); goto out; } if (!ses_config_cache_valid(ses_cache, phdr->gen_code)) { ENC_VLOG(enc, "%s: Generation count change detected\n", __func__); enc_update_request(enc, SES_UPDATE_GETCONFIG); goto out; } offset = sizeof(struct ses_page_hdr); ses_iter_init(enc, enc_cache, &iter); while (offset < plength && (element = ses_iter_next(&iter)) != NULL) { if ((offset + sizeof(struct ses_elm_desc_hdr)) > plength) { ENC_VLOG(enc, "Element %d Descriptor Header Past " "End of Buffer\n", iter.global_element_index); goto out; } hdr = (struct ses_elm_desc_hdr *)&buf[offset]; length = scsi_2btoul(hdr->length); ENC_DLOG(enc, "%s: obj %d(%d,%d) length=%d off=%d\n", __func__, iter.global_element_index, iter.type_index, iter.type_element_index, length, offset); if ((offset + sizeof(*hdr) + length) > plength) { ENC_VLOG(enc, "Element%d Descriptor Past " "End of Buffer\n", iter.global_element_index); goto out; } offset += sizeof(*hdr); if (length > 0) { elmpriv = element->elm_private; elmpriv->descr_len = length; elmpriv->descr = ses_sanitize_elm_desc(&buf[offset], &elmpriv->descr_len); } /* skip over the descriptor itself */ offset += length; } err = 0; out: if (err == 0) { if (ses->ses_flags & SES_FLAG_ADDLSTATUS) enc_update_request(enc, SES_UPDATE_GETELMADDLSTATUS); } enc_update_request(enc, SES_PUBLISH_CACHE); return (err); } static int ses_fill_rcv_diag_io(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t *buf) { if (enc->enc_type == ENC_SEMB_SES) { semb_receive_diagnostic_results(&ccb->ataio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, /*pcv*/1, state->page_code, buf, state->buf_size, state->timeout); } else { scsi_receive_diagnostic_results(&ccb->csio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, /*pcv*/1, state->page_code, buf, state->buf_size, SSD_FULL_SIZE, state->timeout); } return (0); } /** * \brief Encode the object status into the response buffer, which is * expected to contain the current enclosure status. This function * turns off all the 'select' bits for the objects except for the * object specified, then sends it back to the enclosure. * * \param enc SES enclosure the change is being applied to. * \param buf Buffer containing the current enclosure status response. * \param amt Length of the response in the buffer. * \param req The control request to be applied to buf. * * \return 0 on success, errno otherwise. */ static int ses_encode(enc_softc_t *enc, uint8_t *buf, int amt, ses_control_request_t *req) { struct ses_iterator iter; enc_element_t *element; int offset; struct ses_control_page_hdr *hdr; ses_iter_init(enc, &enc->enc_cache, &iter); hdr = (struct ses_control_page_hdr *)buf; if (req->elm_idx == -1) { /* for enclosure status, at least 2 bytes are needed */ if (amt < 2) return EIO; hdr->control_flags = req->elm_stat.comstatus & SES_SET_STATUS_MASK; ENC_DLOG(enc, "Set EncStat %x\n", hdr->control_flags); return (0); } element = ses_iter_seek_to(&iter, req->elm_idx, SES_ELEM_INDEX_GLOBAL); if (element == NULL) return (ENXIO); /* * Seek to the type set that corresponds to the requested object. * The +1 is for the overall status element for the type. */ offset = sizeof(struct ses_control_page_hdr) + (iter.global_element_index * sizeof(struct ses_comstat)); /* Check for buffer overflow. */ if (offset + sizeof(struct ses_comstat) > amt) return (EIO); /* Set the status. */ memcpy(&buf[offset], &req->elm_stat, sizeof(struct ses_comstat)); ENC_DLOG(enc, "Set Type 0x%x Obj 0x%x (offset %d) with %x %x %x %x\n", iter.type_index, iter.global_element_index, offset, req->elm_stat.comstatus, req->elm_stat.comstat[0], req->elm_stat.comstat[1], req->elm_stat.comstat[2]); return (0); } static int ses_fill_control_request(enc_softc_t *enc, struct enc_fsm_state *state, union ccb *ccb, uint8_t *buf) { ses_softc_t *ses; enc_cache_t *enc_cache; ses_cache_t *ses_cache; struct ses_control_page_hdr *hdr; ses_control_request_t *req; size_t plength; size_t offset; ses = enc->enc_private; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; hdr = (struct ses_control_page_hdr *)buf; if (ses_cache->status_page == NULL) { ses_terminate_control_requests(&ses->ses_requests, EIO); return (EIO); } plength = ses_page_length(&ses_cache->status_page->hdr); memcpy(buf, ses_cache->status_page, plength); /* Disable the select bits in all status entries. */ offset = sizeof(struct ses_control_page_hdr); for (offset = sizeof(struct ses_control_page_hdr); offset < plength; offset += sizeof(struct ses_comstat)) { buf[offset] &= ~SESCTL_CSEL; } /* And make sure the INVOP bit is clear. */ hdr->control_flags &= ~SES_ENCSTAT_INVOP; /* Apply incoming requests. */ while ((req = TAILQ_FIRST(&ses->ses_requests)) != NULL) { TAILQ_REMOVE(&ses->ses_requests, req, links); req->result = ses_encode(enc, buf, plength, req); if (req->result != 0) { wakeup(req); continue; } TAILQ_INSERT_TAIL(&ses->ses_pending_requests, req, links); } if (TAILQ_EMPTY(&ses->ses_pending_requests) != 0) return (ENOENT); /* Fill out the ccb */ if (enc->enc_type == ENC_SEMB_SES) { semb_send_diagnostic(&ccb->ataio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, buf, ses_page_length(&ses_cache->status_page->hdr), state->timeout); } else { scsi_send_diagnostic(&ccb->csio, /*retries*/5, NULL, MSG_SIMPLE_Q_TAG, /*unit_offline*/0, /*device_offline*/0, /*self_test*/0, /*page_format*/1, /*self_test_code*/0, buf, ses_page_length(&ses_cache->status_page->hdr), SSD_FULL_SIZE, state->timeout); } return (0); } static int ses_get_elm_addlstatus_fc(enc_softc_t *enc, enc_cache_t *enc_cache, uint8_t *buf, int bufsiz) { ENC_VLOG(enc, "FC Device Support Stubbed in Additional Status Page\n"); return (ENODEV); } #define SES_PRINT_PORTS(p, type) do { \ if (((p) & SES_SASOBJ_DEV_PHY_PROTOMASK) != 0) { \ sbuf_printf(sbp, " %s (", type); \ if ((p) & SES_SASOBJ_DEV_PHY_SMP) \ sbuf_printf(sbp, " SMP"); \ if ((p) & SES_SASOBJ_DEV_PHY_STP) \ sbuf_printf(sbp, " STP"); \ if ((p) & SES_SASOBJ_DEV_PHY_SSP) \ sbuf_printf(sbp, " SSP"); \ sbuf_printf(sbp, " )"); \ } \ } while(0) /** * \brief Print the additional element status data for this object, for SAS * type 0 objects. See SES2 r20 Section 6.1.13.3.2. * * \param sesname SES device name associated with the object. * \param sbp Sbuf to print to. * \param obj The object to print the data for. */ static void ses_print_addl_data_sas_type0(char *sesname, struct sbuf *sbp, enc_element_t *obj) { int i; ses_element_t *elmpriv; struct ses_addl_status *addl; struct ses_elm_sas_device_phy *phy; elmpriv = obj->elm_private; addl = &(elmpriv->addl); sbuf_printf(sbp, ", SAS Slot: %d%s phys", addl->proto_hdr.sas->base_hdr.num_phys, ses_elm_sas_type0_not_all_phys(addl->proto_hdr.sas) ? "+" : ""); if (ses_elm_addlstatus_eip(addl->hdr)) sbuf_printf(sbp, " at slot %d", addl->proto_hdr.sas->type0_eip.dev_slot_num); sbuf_printf(sbp, "\n"); if (addl->proto_data.sasdev_phys == NULL) return; for (i = 0;i < addl->proto_hdr.sas->base_hdr.num_phys;i++) { phy = &addl->proto_data.sasdev_phys[i]; sbuf_printf(sbp, "%s: phy %d:", sesname, i); if (ses_elm_sas_dev_phy_sata_dev(phy)) /* Spec says all other fields are specific values */ sbuf_printf(sbp, " SATA device\n"); else { sbuf_printf(sbp, " SAS device type %d phy %d", ses_elm_sas_dev_phy_dev_type(phy), phy->phy_id); SES_PRINT_PORTS(phy->initiator_ports, "Initiator"); SES_PRINT_PORTS(phy->target_ports, "Target"); sbuf_printf(sbp, "\n"); } sbuf_printf(sbp, "%s: phy %d: parent %jx addr %jx\n", sesname, i, (uintmax_t)scsi_8btou64(phy->parent_addr), (uintmax_t)scsi_8btou64(phy->phy_addr)); } } #undef SES_PRINT_PORTS /** * \brief Print the additional element status data for this object, for SAS * type 1 objects. See SES2 r20 Sections 6.1.13.3.3 and 6.1.13.3.4. * * \param sesname SES device name associated with the object. * \param sbp Sbuf to print to. * \param obj The object to print the data for. */ static void ses_print_addl_data_sas_type1(char *sesname, struct sbuf *sbp, enc_element_t *obj) { int i, num_phys; ses_element_t *elmpriv; struct ses_addl_status *addl; struct ses_elm_sas_expander_phy *exp_phy; struct ses_elm_sas_port_phy *port_phy; elmpriv = obj->elm_private; addl = &(elmpriv->addl); sbuf_printf(sbp, ", SAS "); if (obj->elm_type == ELMTYP_SAS_EXP) { num_phys = addl->proto_hdr.sas->base_hdr.num_phys; sbuf_printf(sbp, "Expander: %d phys", num_phys); if (addl->proto_data.sasexp_phys == NULL) return; for (i = 0;i < num_phys;i++) { exp_phy = &addl->proto_data.sasexp_phys[i]; sbuf_printf(sbp, "%s: phy %d: connector %d other %d\n", sesname, i, exp_phy->connector_index, exp_phy->other_index); } } else { num_phys = addl->proto_hdr.sas->base_hdr.num_phys; sbuf_printf(sbp, "Port: %d phys", num_phys); if (addl->proto_data.sasport_phys == NULL) return; for (i = 0;i < num_phys;i++) { port_phy = &addl->proto_data.sasport_phys[i]; sbuf_printf(sbp, "%s: phy %d: id %d connector %d other %d\n", sesname, i, port_phy->phy_id, port_phy->connector_index, port_phy->other_index); sbuf_printf(sbp, "%s: phy %d: addr %jx\n", sesname, i, (uintmax_t)scsi_8btou64(port_phy->phy_addr)); } } } /** * \brief Print the additional element status data for this object, for * ATA objects. * * \param sbp Sbuf to print to. * \param obj The object to print the data for. */ static void ses_print_addl_data_ata(struct sbuf *sbp, enc_element_t *obj) { ses_element_t *elmpriv = obj->elm_private; struct ses_addl_status *addl = &elmpriv->addl; struct ses_elm_ata_hdr *ata = addl->proto_hdr.ata; sbuf_printf(sbp, ", SATA Slot: scbus%d target %d\n", scsi_4btoul(ata->bus), scsi_4btoul(ata->target)); } /** * \brief Print the additional element status data for this object. * * \param enc SES softc associated with the object. * \param obj The object to print the data for. */ static void ses_print_addl_data(enc_softc_t *enc, enc_element_t *obj) { ses_element_t *elmpriv; struct ses_addl_status *addl; struct sbuf sesname, name, out; elmpriv = obj->elm_private; if (elmpriv == NULL) return; addl = &(elmpriv->addl); if (addl->hdr == NULL) return; sbuf_new(&sesname, NULL, 16, SBUF_AUTOEXTEND); sbuf_new(&name, NULL, 16, SBUF_AUTOEXTEND); sbuf_new(&out, NULL, 512, SBUF_AUTOEXTEND); ses_paths_iter(enc, obj, ses_elmdevname_callback, &name); if (sbuf_len(&name) == 0) sbuf_printf(&name, "(none)"); sbuf_finish(&name); sbuf_printf(&sesname, "%s%d", enc->periph->periph_name, enc->periph->unit_number); sbuf_finish(&sesname); sbuf_printf(&out, "%s: %s in ", sbuf_data(&sesname), sbuf_data(&name)); if (elmpriv->descr != NULL) sbuf_printf(&out, "'%s'", elmpriv->descr); else { if (obj->elm_type <= ELMTYP_LAST) sbuf_cat(&out, elm_type_names[obj->elm_type]); else sbuf_printf(&out, "", obj->elm_type); sbuf_printf(&out, " %d", obj->type_elm_idx); if (obj->subenclosure != 0) sbuf_printf(&out, " of subenc %d", obj->subenclosure); } switch(ses_elm_addlstatus_proto(addl->hdr)) { case SPSP_PROTO_FC: goto noaddl; /* stubbed for now */ case SPSP_PROTO_SAS: if (addl->proto_hdr.sas == NULL) goto noaddl; switch(ses_elm_sas_descr_type(addl->proto_hdr.sas)) { case SES_SASOBJ_TYPE_SLOT: ses_print_addl_data_sas_type0(sbuf_data(&sesname), &out, obj); break; case SES_SASOBJ_TYPE_OTHER: ses_print_addl_data_sas_type1(sbuf_data(&sesname), &out, obj); break; default: goto noaddl; } break; case SPSP_PROTO_ATA: if (addl->proto_hdr.ata == NULL) goto noaddl; ses_print_addl_data_ata(&out, obj); break; default: noaddl: sbuf_cat(&out, "\n"); break; } sbuf_finish(&out); printf("%s", sbuf_data(&out)); sbuf_delete(&out); sbuf_delete(&name); sbuf_delete(&sesname); } /** * \brief Update the softc with the additional element status data for this * object, for SAS type 0 objects. * * \param enc SES softc to be updated. * \param buf The additional element status response buffer. * \param bufsiz Size of the response buffer. * \param eip The EIP bit value. * \param nobj Number of objects attached to the SES softc. * * \return 0 on success, errno otherwise. */ static int ses_get_elm_addlstatus_sas_type0(enc_softc_t *enc, enc_cache_t *enc_cache, uint8_t *buf, int bufsiz, int eip, int nobj) { int err, offset, physz; enc_element_t *obj; ses_element_t *elmpriv; struct ses_addl_status *addl; err = offset = 0; /* basic object setup */ obj = &(enc_cache->elm_map[nobj]); elmpriv = obj->elm_private; addl = &(elmpriv->addl); addl->proto_hdr.sas = (union ses_elm_sas_hdr *)&buf[offset]; /* Don't assume this object has any phys */ bzero(&addl->proto_data, sizeof(addl->proto_data)); if (addl->proto_hdr.sas->base_hdr.num_phys == 0) goto out; /* Skip forward to the phy list */ if (eip) offset += sizeof(struct ses_elm_sas_type0_eip_hdr); else offset += sizeof(struct ses_elm_sas_type0_base_hdr); /* Make sure the phy list fits in the buffer */ physz = addl->proto_hdr.sas->base_hdr.num_phys; physz *= sizeof(struct ses_elm_sas_device_phy); if (physz > (bufsiz - offset + 4)) { ENC_VLOG(enc, "Element %d Device Phy List Beyond End Of Buffer\n", nobj); err = EIO; goto out; } /* Point to the phy list */ addl->proto_data.sasdev_phys = (struct ses_elm_sas_device_phy *)&buf[offset]; out: return (err); } /** * \brief Update the softc with the additional element status data for this * object, for SAS type 1 objects. * * \param enc SES softc to be updated. * \param buf The additional element status response buffer. * \param bufsiz Size of the response buffer. * \param eip The EIP bit value. * \param nobj Number of objects attached to the SES softc. * * \return 0 on success, errno otherwise. */ static int ses_get_elm_addlstatus_sas_type1(enc_softc_t *enc, enc_cache_t *enc_cache, uint8_t *buf, int bufsiz, int eip, int nobj) { int err, offset, physz; enc_element_t *obj; ses_element_t *elmpriv; struct ses_addl_status *addl; err = offset = 0; /* basic object setup */ obj = &(enc_cache->elm_map[nobj]); elmpriv = obj->elm_private; addl = &(elmpriv->addl); addl->proto_hdr.sas = (union ses_elm_sas_hdr *)&buf[offset]; /* Don't assume this object has any phys */ bzero(&addl->proto_data, sizeof(addl->proto_data)); if (addl->proto_hdr.sas->base_hdr.num_phys == 0) goto out; /* Process expanders differently from other type1 cases */ if (obj->elm_type == ELMTYP_SAS_EXP) { offset += sizeof(struct ses_elm_sas_type1_expander_hdr); physz = addl->proto_hdr.sas->base_hdr.num_phys * sizeof(struct ses_elm_sas_expander_phy); if (physz > (bufsiz - offset)) { ENC_VLOG(enc, "Element %d: Expander Phy List Beyond " "End Of Buffer\n", nobj); err = EIO; goto out; } addl->proto_data.sasexp_phys = (struct ses_elm_sas_expander_phy *)&buf[offset]; } else { offset += sizeof(struct ses_elm_sas_type1_nonexpander_hdr); physz = addl->proto_hdr.sas->base_hdr.num_phys * sizeof(struct ses_elm_sas_port_phy); if (physz > (bufsiz - offset + 4)) { ENC_VLOG(enc, "Element %d: Port Phy List Beyond End " "Of Buffer\n", nobj); err = EIO; goto out; } addl->proto_data.sasport_phys = (struct ses_elm_sas_port_phy *)&buf[offset]; } out: return (err); } /** * \brief Update the softc with the additional element status data for this * object, for SAS objects. * * \param enc SES softc to be updated. * \param buf The additional element status response buffer. * \param bufsiz Size of the response buffer. * \param eip The EIP bit value. * \param tidx Type index for this object. * \param nobj Number of objects attached to the SES softc. * * \return 0 on success, errno otherwise. */ static int ses_get_elm_addlstatus_sas(enc_softc_t *enc, enc_cache_t *enc_cache, uint8_t *buf, int bufsiz, int eip, int tidx, int nobj) { int dtype, err; ses_cache_t *ses_cache; union ses_elm_sas_hdr *hdr; /* Need to be able to read the descriptor type! */ if (bufsiz < sizeof(union ses_elm_sas_hdr)) { err = EIO; goto out; } ses_cache = enc_cache->private; hdr = (union ses_elm_sas_hdr *)buf; dtype = ses_elm_sas_descr_type(hdr); switch(dtype) { case SES_SASOBJ_TYPE_SLOT: switch(ses_cache->ses_types[tidx].hdr->etype_elm_type) { case ELMTYP_DEVICE: case ELMTYP_ARRAY_DEV: break; default: ENC_VLOG(enc, "Element %d has Additional Status type 0, " "invalid for SES element type 0x%x\n", nobj, ses_cache->ses_types[tidx].hdr->etype_elm_type); err = ENODEV; goto out; } err = ses_get_elm_addlstatus_sas_type0(enc, enc_cache, buf, bufsiz, eip, nobj); break; case SES_SASOBJ_TYPE_OTHER: switch(ses_cache->ses_types[tidx].hdr->etype_elm_type) { case ELMTYP_SAS_EXP: case ELMTYP_SCSI_INI: case ELMTYP_SCSI_TGT: case ELMTYP_ESCC: break; default: ENC_VLOG(enc, "Element %d has Additional Status type 1, " "invalid for SES element type 0x%x\n", nobj, ses_cache->ses_types[tidx].hdr->etype_elm_type); err = ENODEV; goto out; } err = ses_get_elm_addlstatus_sas_type1(enc, enc_cache, buf, bufsiz, eip, nobj); break; default: ENC_VLOG(enc, "Element %d of type 0x%x has Additional Status " "of unknown type 0x%x\n", nobj, ses_cache->ses_types[tidx].hdr->etype_elm_type, dtype); err = ENODEV; break; } out: return (err); } /** * \brief Update the softc with the additional element status data for this * object, for ATA objects. * * \param enc SES softc to be updated. * \param buf The additional element status response buffer. * \param bufsiz Size of the response buffer. * \param eip The EIP bit value. * \param tidx Type index for this object. * \param nobj Number of objects attached to the SES softc. * * \return 0 on success, errno otherwise. */ static int ses_get_elm_addlstatus_ata(enc_softc_t *enc, enc_cache_t *enc_cache, uint8_t *buf, int bufsiz, int eip, int tidx, int nobj) { int err; ses_cache_t *ses_cache; if (bufsiz < sizeof(struct ses_elm_ata_hdr)) { err = EIO; goto out; } ses_cache = enc_cache->private; switch(ses_cache->ses_types[tidx].hdr->etype_elm_type) { case ELMTYP_DEVICE: case ELMTYP_ARRAY_DEV: break; default: ENC_VLOG(enc, "Element %d has Additional Status, " "invalid for SES element type 0x%x\n", nobj, ses_cache->ses_types[tidx].hdr->etype_elm_type); err = ENODEV; goto out; } ((ses_element_t *)enc_cache->elm_map[nobj].elm_private) ->addl.proto_hdr.ata = (struct ses_elm_ata_hdr *)buf; err = 0; out: return (err); } static void ses_softc_invalidate(enc_softc_t *enc) { ses_softc_t *ses; ses = enc->enc_private; ses_terminate_control_requests(&ses->ses_requests, ENXIO); } static void ses_softc_cleanup(enc_softc_t *enc) { ses_cache_free(enc, &enc->enc_cache); ses_cache_free(enc, &enc->enc_daemon_cache); ENC_FREE_AND_NULL(enc->enc_private); ENC_FREE_AND_NULL(enc->enc_cache.private); ENC_FREE_AND_NULL(enc->enc_daemon_cache.private); } static int ses_init_enc(enc_softc_t *enc) { return (0); } static int ses_get_enc_status(enc_softc_t *enc, int slpflag) { /* Automatically updated, caller checks enc_cache->encstat itself */ return (0); } static int ses_set_enc_status(enc_softc_t *enc, uint8_t encstat, int slpflag) { ses_control_request_t req; ses_softc_t *ses; ses = enc->enc_private; req.elm_idx = SES_SETSTATUS_ENC_IDX; req.elm_stat.comstatus = encstat & 0xf; TAILQ_INSERT_TAIL(&ses->ses_requests, &req, links); enc_update_request(enc, SES_PROCESS_CONTROL_REQS); cam_periph_sleep(enc->periph, &req, PUSER, "encstat", 0); return (req.result); } static int ses_get_elm_status(enc_softc_t *enc, encioc_elm_status_t *elms, int slpflag) { unsigned int i = elms->elm_idx; memcpy(elms->cstat, &enc->enc_cache.elm_map[i].encstat, 4); return (0); } static int ses_set_elm_status(enc_softc_t *enc, encioc_elm_status_t *elms, int slpflag) { ses_control_request_t req; ses_softc_t *ses; /* If this is clear, we don't do diddly. */ if ((elms->cstat[0] & SESCTL_CSEL) == 0) return (0); ses = enc->enc_private; req.elm_idx = elms->elm_idx; memcpy(&req.elm_stat, elms->cstat, sizeof(req.elm_stat)); TAILQ_INSERT_TAIL(&ses->ses_requests, &req, links); enc_update_request(enc, SES_PROCESS_CONTROL_REQS); cam_periph_sleep(enc->periph, &req, PUSER, "encstat", 0); return (req.result); } static int ses_get_elm_desc(enc_softc_t *enc, encioc_elm_desc_t *elmd) { int i = (int)elmd->elm_idx; ses_element_t *elmpriv; /* Assume caller has already checked obj_id validity */ elmpriv = enc->enc_cache.elm_map[i].elm_private; /* object might not have a descriptor */ if (elmpriv == NULL || elmpriv->descr == NULL) { elmd->elm_desc_len = 0; return (0); } if (elmd->elm_desc_len > elmpriv->descr_len) elmd->elm_desc_len = elmpriv->descr_len; copyout(elmpriv->descr, elmd->elm_desc_str, elmd->elm_desc_len); return (0); } /** * \brief Respond to ENCIOC_GETELMDEVNAME, providing a device name for the * given object id if one is available. * * \param enc SES softc to examine. * \param objdn ioctl structure to read/write device name info. * * \return 0 on success, errno otherwise. */ static int ses_get_elm_devnames(enc_softc_t *enc, encioc_elm_devnames_t *elmdn) { struct sbuf sb; int len; len = elmdn->elm_names_size; if (len < 0) return (EINVAL); cam_periph_unlock(enc->periph); sbuf_new(&sb, NULL, len, SBUF_FIXEDLEN); ses_paths_iter(enc, &enc->enc_cache.elm_map[elmdn->elm_idx], ses_elmdevname_callback, &sb); sbuf_finish(&sb); elmdn->elm_names_len = sbuf_len(&sb); copyout(sbuf_data(&sb), elmdn->elm_devnames, elmdn->elm_names_len + 1); sbuf_delete(&sb); cam_periph_lock(enc->periph); return (elmdn->elm_names_len > 0 ? 0 : ENODEV); } /** * \brief Send a string to the primary subenclosure using the String Out * SES diagnostic page. * * \param enc SES enclosure to run the command on. * \param sstr SES string structure to operate on * \param ioc Ioctl being performed * * \return 0 on success, errno otherwise. */ static int ses_handle_string(enc_softc_t *enc, encioc_string_t *sstr, int ioc) { - ses_softc_t *ses; enc_cache_t *enc_cache; ses_cache_t *ses_cache; const struct ses_enc_desc *enc_desc; int amt, payload, ret; char cdb[6]; char str[32]; char vendor[9]; char product[17]; char rev[5]; uint8_t *buf; size_t size, rsize; - ses = enc->enc_private; enc_cache = &enc->enc_daemon_cache; ses_cache = enc_cache->private; /* Implement SES2r20 6.1.6 */ if (sstr->bufsiz > 0xffff) return (EINVAL); /* buffer size too large */ switch (ioc) { case ENCIOC_SETSTRING: payload = sstr->bufsiz + 4; /* header for SEND DIAGNOSTIC */ amt = 0 - payload; buf = ENC_MALLOC(payload); if (buf == NULL) return (ENOMEM); ses_page_cdb(cdb, payload, 0, CAM_DIR_OUT); /* Construct the page request */ buf[0] = SesStringOut; buf[1] = 0; buf[2] = sstr->bufsiz >> 8; buf[3] = sstr->bufsiz & 0xff; ret = copyin(sstr->buf, &buf[4], sstr->bufsiz); if (ret != 0) { ENC_FREE(buf); return (ret); } break; case ENCIOC_GETSTRING: payload = sstr->bufsiz; amt = payload; buf = ENC_MALLOC(payload); if (buf == NULL) return (ENOMEM); ses_page_cdb(cdb, payload, SesStringIn, CAM_DIR_IN); break; case ENCIOC_GETENCNAME: if (ses_cache->ses_nsubencs < 1) return (ENODEV); enc_desc = ses_cache->subencs[0]; cam_strvis(vendor, enc_desc->vendor_id, sizeof(enc_desc->vendor_id), sizeof(vendor)); cam_strvis(product, enc_desc->product_id, sizeof(enc_desc->product_id), sizeof(product)); cam_strvis(rev, enc_desc->product_rev, sizeof(enc_desc->product_rev), sizeof(rev)); rsize = snprintf(str, sizeof(str), "%s %s %s", vendor, product, rev) + 1; if (rsize > sizeof(str)) rsize = sizeof(str); size = rsize; if (size > sstr->bufsiz) size = sstr->bufsiz; copyout(str, sstr->buf, size); sstr->bufsiz = rsize; return (size == rsize ? 0 : ENOMEM); case ENCIOC_GETENCID: if (ses_cache->ses_nsubencs < 1) return (ENODEV); enc_desc = ses_cache->subencs[0]; rsize = snprintf(str, sizeof(str), "%16jx", scsi_8btou64(enc_desc->logical_id)) + 1; if (rsize > sizeof(str)) rsize = sizeof(str); size = rsize; if (size > sstr->bufsiz) size = sstr->bufsiz; copyout(str, sstr->buf, size); sstr->bufsiz = rsize; return (size == rsize ? 0 : ENOMEM); default: return (EINVAL); } ret = enc_runcmd(enc, cdb, 6, buf, &amt); if (ret == 0 && ioc == ENCIOC_GETSTRING) ret = copyout(buf, sstr->buf, sstr->bufsiz); if (ioc == ENCIOC_SETSTRING || ioc == ENCIOC_GETSTRING) ENC_FREE(buf); return (ret); } /** * \invariant Called with cam_periph mutex held. */ static void ses_poll_status(enc_softc_t *enc) { ses_softc_t *ses; ses = enc->enc_private; enc_update_request(enc, SES_UPDATE_GETSTATUS); if (ses->ses_flags & SES_FLAG_DESC) enc_update_request(enc, SES_UPDATE_GETELMDESCS); if (ses->ses_flags & SES_FLAG_ADDLSTATUS) enc_update_request(enc, SES_UPDATE_GETELMADDLSTATUS); } /** * \brief Notification received when CAM detects a new device in the * SCSI domain in which this SEP resides. * * \param enc SES enclosure instance. */ static void ses_device_found(enc_softc_t *enc) { ses_poll_status(enc); enc_update_request(enc, SES_PUBLISH_PHYSPATHS); } static struct enc_vec ses_enc_vec = { .softc_invalidate = ses_softc_invalidate, .softc_cleanup = ses_softc_cleanup, .init_enc = ses_init_enc, .get_enc_status = ses_get_enc_status, .set_enc_status = ses_set_enc_status, .get_elm_status = ses_get_elm_status, .set_elm_status = ses_set_elm_status, .get_elm_desc = ses_get_elm_desc, .get_elm_devnames = ses_get_elm_devnames, .handle_string = ses_handle_string, .device_found = ses_device_found, .poll_status = ses_poll_status }; /** * \brief Initialize a new SES instance. * * \param enc SES softc structure to set up the instance in. * \param doinit Do the initialization (see main driver). * * \return 0 on success, errno otherwise. */ int ses_softc_init(enc_softc_t *enc) { ses_softc_t *ses_softc; CAM_DEBUG(enc->periph->path, CAM_DEBUG_SUBTRACE, ("entering enc_softc_init(%p)\n", enc)); enc->enc_vec = ses_enc_vec; enc->enc_fsm_states = enc_fsm_states; if (enc->enc_private == NULL) enc->enc_private = ENC_MALLOCZ(sizeof(ses_softc_t)); if (enc->enc_cache.private == NULL) enc->enc_cache.private = ENC_MALLOCZ(sizeof(ses_cache_t)); if (enc->enc_daemon_cache.private == NULL) enc->enc_daemon_cache.private = ENC_MALLOCZ(sizeof(ses_cache_t)); if (enc->enc_private == NULL || enc->enc_cache.private == NULL || enc->enc_daemon_cache.private == NULL) { ENC_FREE_AND_NULL(enc->enc_private); ENC_FREE_AND_NULL(enc->enc_cache.private); ENC_FREE_AND_NULL(enc->enc_daemon_cache.private); return (ENOMEM); } ses_softc = enc->enc_private; TAILQ_INIT(&ses_softc->ses_requests); TAILQ_INIT(&ses_softc->ses_pending_requests); enc_update_request(enc, SES_UPDATE_PAGES); // XXX: Move this to the FSM so it doesn't hang init if (0) (void) ses_set_timed_completion(enc, 1); return (0); } diff --git a/sys/cam/scsi/scsi_pass.c b/sys/cam/scsi/scsi_pass.c index e7d9d755c8cc..5d6febe74539 100644 --- a/sys/cam/scsi/scsi_pass.c +++ b/sys/cam/scsi/scsi_pass.c @@ -1,2249 +1,2241 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { PASS_FLAG_OPEN = 0x01, PASS_FLAG_LOCKED = 0x02, PASS_FLAG_INVALID = 0x04, PASS_FLAG_INITIAL_PHYSPATH = 0x08, PASS_FLAG_ZONE_INPROG = 0x10, PASS_FLAG_ZONE_VALID = 0x20, PASS_FLAG_UNMAPPED_CAPABLE = 0x40, PASS_FLAG_ABANDONED_REF_SET = 0x80 } pass_flags; typedef enum { PASS_STATE_NORMAL } pass_state; typedef enum { PASS_CCB_BUFFER_IO, PASS_CCB_QUEUED_IO } pass_ccb_types; #define ccb_type ppriv_field0 #define ccb_ioreq ppriv_ptr1 /* * The maximum number of memory segments we preallocate. */ #define PASS_MAX_SEGS 16 typedef enum { PASS_IO_NONE = 0x00, PASS_IO_USER_SEG_MALLOC = 0x01, PASS_IO_KERN_SEG_MALLOC = 0x02, PASS_IO_ABANDONED = 0x04 } pass_io_flags; struct pass_io_req { union ccb ccb; union ccb *alloced_ccb; union ccb *user_ccb_ptr; camq_entry user_periph_links; ccb_ppriv_area user_periph_priv; struct cam_periph_map_info mapinfo; pass_io_flags flags; ccb_flags data_flags; int num_user_segs; bus_dma_segment_t user_segs[PASS_MAX_SEGS]; int num_kern_segs; bus_dma_segment_t kern_segs[PASS_MAX_SEGS]; bus_dma_segment_t *user_segptr; bus_dma_segment_t *kern_segptr; int num_bufs; uint32_t dirs[CAM_PERIPH_MAXMAPS]; uint32_t lengths[CAM_PERIPH_MAXMAPS]; uint8_t *user_bufs[CAM_PERIPH_MAXMAPS]; uint8_t *kern_bufs[CAM_PERIPH_MAXMAPS]; struct bintime start_time; TAILQ_ENTRY(pass_io_req) links; }; struct pass_softc { pass_state state; pass_flags flags; u_int8_t pd_type; int open_count; u_int maxio; struct devstat *device_stats; struct cdev *dev; struct cdev *alias_dev; struct task add_physpath_task; struct task shutdown_kqueue_task; struct selinfo read_select; TAILQ_HEAD(, pass_io_req) incoming_queue; TAILQ_HEAD(, pass_io_req) active_queue; TAILQ_HEAD(, pass_io_req) abandoned_queue; TAILQ_HEAD(, pass_io_req) done_queue; struct cam_periph *periph; char zone_name[12]; char io_zone_name[12]; uma_zone_t pass_zone; uma_zone_t pass_io_zone; size_t io_zone_size; }; static d_open_t passopen; static d_close_t passclose; static d_ioctl_t passioctl; static d_ioctl_t passdoioctl; static d_poll_t passpoll; static d_kqfilter_t passkqfilter; static void passreadfiltdetach(struct knote *kn); static int passreadfilt(struct knote *kn, long hint); static periph_init_t passinit; static periph_ctor_t passregister; static periph_oninv_t passoninvalidate; static periph_dtor_t passcleanup; static periph_start_t passstart; static void pass_shutdown_kqueue(void *context, int pending); static void pass_add_physpath(void *context, int pending); static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void passdone(struct cam_periph *periph, union ccb *done_ccb); static int passcreatezone(struct cam_periph *periph); static void passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req); static int passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req, ccb_flags direction); static int passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req); static int passmemdone(struct cam_periph *periph, struct pass_io_req *io_req); static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb); static struct periph_driver passdriver = { passinit, "pass", TAILQ_HEAD_INITIALIZER(passdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(pass, passdriver); static struct cdevsw pass_cdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_open = passopen, .d_close = passclose, .d_ioctl = passioctl, .d_poll = passpoll, .d_kqfilter = passkqfilter, .d_name = "pass", }; static struct filterops passread_filtops = { .f_isfd = 1, .f_detach = passreadfiltdetach, .f_event = passreadfilt }; static MALLOC_DEFINE(M_SCSIPASS, "scsi_pass", "scsi passthrough buffers"); static void passinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, passasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("pass: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void passrejectios(struct cam_periph *periph) { struct pass_io_req *io_req, *io_req2; struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; /* * The user can no longer get status for I/O on the done queue, so * clean up all outstanding I/O on the done queue. */ TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) { TAILQ_REMOVE(&softc->done_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * The underlying device is gone, so we can't issue these I/Os. * The devfs node has been shut down, so we can't return status to * the user. Free any I/O left on the incoming queue. */ TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) { TAILQ_REMOVE(&softc->incoming_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * Normally we would put I/Os on the abandoned queue and acquire a * reference when we saw the final close. But, the device went * away and devfs may have moved everything off to deadfs by the * time the I/O done callback is called; as a result, we won't see * any more closes. So, if we have any active I/Os, we need to put * them on the abandoned queue. When the abandoned queue is empty, * we'll release the remaining reference (see below) to the peripheral. */ TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) { TAILQ_REMOVE(&softc->active_queue, io_req, links); io_req->flags |= PASS_IO_ABANDONED; TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links); } /* * If we put any I/O on the abandoned queue, acquire a reference. */ if ((!TAILQ_EMPTY(&softc->abandoned_queue)) && ((softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0)) { cam_periph_doacquire(periph); softc->flags |= PASS_FLAG_ABANDONED_REF_SET; } } static void passdevgonecb(void *arg) { struct cam_periph *periph; struct mtx *mtx; struct pass_softc *softc; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct pass_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for the device node, it is gone now. * Accordingly, inform all queued I/Os of their fate. */ cam_periph_release_locked(periph); passrejectios(periph); /* * We reference the SIM lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); /* * We have to remove our kqueue context from a thread because it * may sleep. It would be nice if we could get a callback from * kqueue when it is done cleaning up resources. */ taskqueue_enqueue(taskqueue_thread, &softc->shutdown_kqueue_task); } static void passoninvalidate(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, passasync, periph, periph->path); softc->flags |= PASS_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, passdevgonecb, periph); } static void passcleanup(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); KASSERT(TAILQ_EMPTY(&softc->active_queue), ("%s called when there are commands on the active queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->abandoned_queue), ("%s called when there are commands on the abandoned queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->incoming_queue), ("%s called when there are commands on the incoming queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->done_queue), ("%s called when there are commands on the done queue!\n", __func__)); devstat_remove_entry(softc->device_stats); cam_periph_unlock(periph); /* * We call taskqueue_drain() for the physpath task to make sure it * is complete. We drop the lock because this can potentially * sleep. XXX KDM that is bad. Need a way to get a callback when * a taskqueue is drained. * * Note that we don't drain the kqueue shutdown task queue. This * is because we hold a reference on the periph for kqueue, and * release that reference from the kqueue shutdown task queue. So * we cannot come into this routine unless we've released that * reference. Also, because that could be the last reference, we * could be called from the cam_periph_release() call in * pass_shutdown_kqueue(). In that case, the taskqueue_drain() * would deadlock. It would be preferable if we had a way to * get a callback when a taskqueue is done. */ taskqueue_drain(taskqueue_thread, &softc->add_physpath_task); cam_periph_lock(periph); free(softc, M_DEVBUF); } static void pass_shutdown_kqueue(void *context, int pending) { struct cam_periph *periph; struct pass_softc *softc; periph = context; softc = periph->softc; knlist_clear(&softc->read_select.si_note, /*is_locked*/ 0); knlist_destroy(&softc->read_select.si_note); /* * Release the reference we held for kqueue. */ cam_periph_release(periph); } static void pass_add_physpath(void *context, int pending) { struct cam_periph *periph; struct pass_softc *softc; struct mtx *mtx; char *physpath; /* * If we have one, create a devfs alias for our * physical path. */ periph = context; softc = periph->softc; physpath = malloc(MAXPATHLEN, M_DEVBUF, M_WAITOK); mtx = cam_periph_mtx(periph); mtx_lock(mtx); if (periph->flags & CAM_PERIPH_INVALID) goto out; if (xpt_getattr(physpath, MAXPATHLEN, "GEOM::physpath", periph->path) == 0 && strlen(physpath) != 0) { mtx_unlock(mtx); make_dev_physpath_alias(MAKEDEV_WAITOK, &softc->alias_dev, softc->dev, softc->alias_dev, physpath); mtx_lock(mtx); } out: /* * Now that we've made our alias, we no longer have to have a * reference to the device. */ if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0) softc->flags |= PASS_FLAG_INITIAL_PHYSPATH; /* * We always acquire a reference to the periph before queueing this * task queue function, so it won't go away before we run. */ while (pending-- > 0) cam_periph_release_locked(periph); mtx_unlock(mtx); free(physpath, M_DEVBUF); } static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(passregister, passoninvalidate, passcleanup, passstart, "pass", CAM_PERIPH_BIO, path, passasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("passasync: Unable to attach new device " "due to status %#x: %s\n", status, entry ? entry->status_text : "Unknown"); } break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; /* * Acquire a reference to the periph before we * start the taskqueue, so that we don't run into * a situation where the periph goes away before * the task queue has a chance to run. */ if (cam_periph_acquire(periph) != 0) break; taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status passregister(struct cam_periph *periph, void *arg) { struct pass_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; struct make_dev_args args; int error, no_tags; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("%s: no getdev CCB, can't register device\n", __func__); return(CAM_REQ_CMP_ERR); } softc = (struct pass_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT); if (softc == NULL) { printf("%s: Unable to probe new device. " "Unable to allocate softc\n", __func__); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); softc->state = PASS_STATE_NORMAL; if (cgd->protocol == PROTO_SCSI || cgd->protocol == PROTO_ATAPI) softc->pd_type = SID_TYPE(&cgd->inq_data); else if (cgd->protocol == PROTO_SATAPM) softc->pd_type = T_ENCLOSURE; else softc->pd_type = T_DIRECT; periph->softc = softc; softc->periph = periph; TAILQ_INIT(&softc->incoming_queue); TAILQ_INIT(&softc->active_queue); TAILQ_INIT(&softc->abandoned_queue); TAILQ_INIT(&softc->done_queue); snprintf(softc->zone_name, sizeof(softc->zone_name), "%s%d", periph->periph_name, periph->unit_number); snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO", periph->periph_name, periph->unit_number); softc->io_zone_size = maxphys; knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph)); xpt_path_inq(&cpi, periph->path); if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > maxphys) softc->maxio = maxphys; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ if (cpi.hba_misc & PIM_UNMAPPED) softc->flags |= PASS_FLAG_UNMAPPED_CAPABLE; /* * We pass in 0 for a blocksize, since we don't * know what the blocksize of this device is, if * it even has a blocksize. */ cam_periph_unlock(periph); no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0; softc->device_stats = devstat_new_entry("pass", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0), softc->pd_type | XPORT_DEVSTAT_TYPE(cpi.transport) | DEVSTAT_TYPE_PASS, DEVSTAT_PRIORITY_PASS); /* * Initialize the taskqueue handler for shutting down kqueue. */ TASK_INIT(&softc->shutdown_kqueue_task, /*priority*/ 0, pass_shutdown_kqueue, periph); /* * Acquire a reference to the periph that we can release once we've * cleaned up the kqueue. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* Register the device */ make_dev_args_init(&args); args.mda_devsw = &pass_cdevsw; args.mda_unit = periph->unit_number; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = periph; args.mda_flags = MAKEDEV_NOWAIT; error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, periph->unit_number); if (error != 0) { cam_periph_lock(periph); cam_periph_release_locked(periph); return (CAM_REQ_CMP_ERR); } /* * Hold a reference to the periph before we create the physical * path alias so it can't go away. */ if (cam_periph_acquire(periph) != 0) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } cam_periph_lock(periph); TASK_INIT(&softc->add_physpath_task, /*priority*/0, pass_add_physpath, periph); /* * See if physical path information is already available. */ taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task); /* * Add an async callback so that we get notified if * this device goes away or its physical path * (stored in the advanced info data of the EDT) has * changed. */ xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED, passasync, periph, periph->path); if (bootverbose) xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); } static int passopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != 0) return (ENXIO); cam_periph_lock(periph); softc = (struct pass_softc *)periph->softc; if (softc->flags & PASS_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } /* * Don't allow access when we're running at a high securelevel. */ error = securelevel_gt(td->td_ucred, 1); if (error) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(error); } /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(EPERM); } /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { xpt_print(periph->path, "can't do nonblocking access\n"); cam_periph_release_locked(periph); cam_periph_unlock(periph); return(EINVAL); } softc->open_count++; cam_periph_unlock(periph); return (error); } static int passclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = periph->softc; softc->open_count--; if (softc->open_count == 0) { struct pass_io_req *io_req, *io_req2; TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) { TAILQ_REMOVE(&softc->done_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) { TAILQ_REMOVE(&softc->incoming_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * If there are any active I/Os, we need to forcibly acquire a * reference to the peripheral so that we don't go away * before they complete. We'll release the reference when * the abandoned queue is empty. */ io_req = TAILQ_FIRST(&softc->active_queue); if ((io_req != NULL) && (softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0) { cam_periph_doacquire(periph); softc->flags |= PASS_FLAG_ABANDONED_REF_SET; } /* * Since the I/O in the active queue is not under our * control, just set a flag so that we can clean it up when * it completes and put it on the abandoned queue. This * will prevent our sending spurious completions in the * event that the device is opened again before these I/Os * complete. */ TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) { TAILQ_REMOVE(&softc->active_queue, io_req, links); io_req->flags |= PASS_IO_ABANDONED; TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links); } } cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } static void passstart(struct cam_periph *periph, union ccb *start_ccb) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; switch (softc->state) { case PASS_STATE_NORMAL: { struct pass_io_req *io_req; /* * Check for any queued I/O requests that require an * allocated slot. */ io_req = TAILQ_FIRST(&softc->incoming_queue); if (io_req == NULL) { xpt_release_ccb(start_ccb); break; } TAILQ_REMOVE(&softc->incoming_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links); /* * Merge the user's CCB into the allocated CCB. */ xpt_merge_ccb(start_ccb, &io_req->ccb); start_ccb->ccb_h.ccb_type = PASS_CCB_QUEUED_IO; start_ccb->ccb_h.ccb_ioreq = io_req; start_ccb->ccb_h.cbfcnp = passdone; io_req->alloced_ccb = start_ccb; binuptime(&io_req->start_time); devstat_start_transaction(softc->device_stats, &io_req->start_time); xpt_action(start_ccb); /* * If we have any more I/O waiting, schedule ourselves again. */ if (!TAILQ_EMPTY(&softc->incoming_queue)) xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } default: break; } } static void passdone(struct cam_periph *periph, union ccb *done_ccb) { struct pass_softc *softc; struct ccb_scsiio *csio; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); csio = &done_ccb->csio; switch (csio->ccb_h.ccb_type) { case PASS_CCB_QUEUED_IO: { struct pass_io_req *io_req; io_req = done_ccb->ccb_h.ccb_ioreq; #if 0 xpt_print(periph->path, "%s: called for user CCB %p\n", __func__, io_req->user_ccb_ptr); #endif if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) && (done_ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) && ((io_req->flags & PASS_IO_ABANDONED) == 0)) { int error; error = passerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ return; } } /* * Copy the allocated CCB contents back to the malloced CCB * so we can give status back to the user when he requests it. */ bcopy(done_ccb, &io_req->ccb, sizeof(*done_ccb)); /* * Log data/transaction completion with devstat(9). */ switch (done_ccb->ccb_h.func_code) { case XPT_SCSI_IO: devstat_end_transaction(softc->device_stats, done_ccb->csio.dxfer_len - done_ccb->csio.resid, done_ccb->csio.tag_action & 0x3, ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (done_ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, &io_req->start_time); break; case XPT_ATA_IO: devstat_end_transaction(softc->device_stats, done_ccb->ataio.dxfer_len - done_ccb->ataio.resid, 0, /* Not used in ATA */ ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (done_ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, &io_req->start_time); break; case XPT_SMP_IO: /* * XXX KDM this isn't quite right, but there isn't * currently an easy way to represent a bidirectional * transfer in devstat. The only way to do it * and have the byte counts come out right would * mean that we would have to record two * transactions, one for the request and one for the * response. For now, so that we report something, * just treat the entire thing as a read. */ devstat_end_transaction(softc->device_stats, done_ccb->smpio.smp_request_len + done_ccb->smpio.smp_response_len, DEVSTAT_TAG_SIMPLE, DEVSTAT_READ, NULL, &io_req->start_time); break; default: devstat_end_transaction(softc->device_stats, 0, DEVSTAT_TAG_NONE, DEVSTAT_NO_DATA, NULL, &io_req->start_time); break; } /* * In the normal case, take the completed I/O off of the * active queue and put it on the done queue. Notitfy the * user that we have a completed I/O. */ if ((io_req->flags & PASS_IO_ABANDONED) == 0) { TAILQ_REMOVE(&softc->active_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links); selwakeuppri(&softc->read_select, PRIBIO); KNOTE_LOCKED(&softc->read_select.si_note, 0); } else { /* * In the case of an abandoned I/O (final close * without fetching the I/O), take it off of the * abandoned queue and free it. */ TAILQ_REMOVE(&softc->abandoned_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); /* * Release the done_ccb here, since we may wind up * freeing the peripheral when we decrement the * reference count below. */ xpt_release_ccb(done_ccb); /* * If the abandoned queue is empty, we can release * our reference to the periph since we won't have * any more completions coming. */ if ((TAILQ_EMPTY(&softc->abandoned_queue)) && (softc->flags & PASS_FLAG_ABANDONED_REF_SET)) { softc->flags &= ~PASS_FLAG_ABANDONED_REF_SET; cam_periph_release_locked(periph); } /* * We have already released the CCB, so we can * return. */ return; } break; } } xpt_release_ccb(done_ccb); } static int passcreatezone(struct cam_periph *periph) { struct pass_softc *softc; int error; error = 0; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); KASSERT(((softc->flags & PASS_FLAG_ZONE_VALID) == 0), ("%s called when the pass(4) zone is valid!\n", __func__)); KASSERT((softc->pass_zone == NULL), ("%s called when the pass(4) zone is allocated!\n", __func__)); if ((softc->flags & PASS_FLAG_ZONE_INPROG) == 0) { /* * We're the first context through, so we need to create * the pass(4) UMA zone for I/O requests. */ softc->flags |= PASS_FLAG_ZONE_INPROG; /* * uma_zcreate() does a blocking (M_WAITOK) allocation, * so we cannot hold a mutex while we call it. */ cam_periph_unlock(periph); softc->pass_zone = uma_zcreate(softc->zone_name, sizeof(struct pass_io_req), NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/ 0); softc->pass_io_zone = uma_zcreate(softc->io_zone_name, softc->io_zone_size, NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/ 0); cam_periph_lock(periph); if ((softc->pass_zone == NULL) || (softc->pass_io_zone == NULL)) { if (softc->pass_zone == NULL) xpt_print(periph->path, "unable to allocate " "IO Req UMA zone\n"); else xpt_print(periph->path, "unable to allocate " "IO UMA zone\n"); softc->flags &= ~PASS_FLAG_ZONE_INPROG; goto bailout; } /* * Set the flags appropriately and notify any other waiters. */ softc->flags &= PASS_FLAG_ZONE_INPROG; softc->flags |= PASS_FLAG_ZONE_VALID; wakeup(&softc->pass_zone); } else { /* * In this case, the UMA zone has not yet been created, but * another context is in the process of creating it. We * need to sleep until the creation is either done or has * failed. */ while ((softc->flags & PASS_FLAG_ZONE_INPROG) && ((softc->flags & PASS_FLAG_ZONE_VALID) == 0)) { error = msleep(&softc->pass_zone, cam_periph_mtx(periph), PRIBIO, "paszon", 0); if (error != 0) goto bailout; } /* * If the zone creation failed, no luck for the user. */ if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0){ error = ENOMEM; goto bailout; } } bailout: return (error); } static void passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req) { union ccb *ccb; u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; int i, numbufs; ccb = &io_req->ccb; switch (ccb->ccb_h.func_code) { case XPT_DEV_MATCH: numbufs = min(io_req->num_bufs, 2); if (numbufs == 1) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; } break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: data_ptrs[0] = &ccb->csio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; case XPT_ATA_IO: data_ptrs[0] = &ccb->ataio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; case XPT_SMP_IO: numbufs = min(io_req->num_bufs, 2); data_ptrs[0] = &ccb->smpio.smp_request; data_ptrs[1] = &ccb->smpio.smp_response; break; case XPT_DEV_ADVINFO: numbufs = min(io_req->num_bufs, 1); data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; break; case XPT_NVME_IO: case XPT_NVME_ADMIN: data_ptrs[0] = &ccb->nvmeio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; default: /* allow ourselves to be swapped once again */ return; break; /* NOTREACHED */ } if (io_req->flags & PASS_IO_USER_SEG_MALLOC) { free(io_req->user_segptr, M_SCSIPASS); io_req->user_segptr = NULL; } /* * We only want to free memory we malloced. */ if (io_req->data_flags == CAM_DATA_VADDR) { for (i = 0; i < io_req->num_bufs; i++) { if (io_req->kern_bufs[i] == NULL) continue; free(io_req->kern_bufs[i], M_SCSIPASS); io_req->kern_bufs[i] = NULL; } } else if (io_req->data_flags == CAM_DATA_SG) { for (i = 0; i < io_req->num_kern_segs; i++) { if ((uint8_t *)(uintptr_t) io_req->kern_segptr[i].ds_addr == NULL) continue; uma_zfree(softc->pass_io_zone, (uint8_t *)(uintptr_t) io_req->kern_segptr[i].ds_addr); io_req->kern_segptr[i].ds_addr = 0; } } if (io_req->flags & PASS_IO_KERN_SEG_MALLOC) { free(io_req->kern_segptr, M_SCSIPASS); io_req->kern_segptr = NULL; } if (io_req->data_flags != CAM_DATA_PADDR) { for (i = 0; i < numbufs; i++) { /* * Restore the user's buffer pointers to their * previous values. */ if (io_req->user_bufs[i] != NULL) *data_ptrs[i] = io_req->user_bufs[i]; } } } static int passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req, ccb_flags direction) { - bus_size_t kern_watermark, user_watermark, len_copied, len_to_copy; + bus_size_t kern_watermark, user_watermark, len_to_copy; bus_dma_segment_t *user_sglist, *kern_sglist; int i, j, error; error = 0; kern_watermark = 0; user_watermark = 0; len_to_copy = 0; - len_copied = 0; user_sglist = io_req->user_segptr; kern_sglist = io_req->kern_segptr; for (i = 0, j = 0; i < io_req->num_user_segs && j < io_req->num_kern_segs;) { uint8_t *user_ptr, *kern_ptr; len_to_copy = min(user_sglist[i].ds_len -user_watermark, kern_sglist[j].ds_len - kern_watermark); user_ptr = (uint8_t *)(uintptr_t)user_sglist[i].ds_addr; user_ptr = user_ptr + user_watermark; kern_ptr = (uint8_t *)(uintptr_t)kern_sglist[j].ds_addr; kern_ptr = kern_ptr + kern_watermark; user_watermark += len_to_copy; kern_watermark += len_to_copy; if (direction == CAM_DIR_IN) { error = copyout(kern_ptr, user_ptr, len_to_copy); if (error != 0) { xpt_print(periph->path, "%s: copyout of %u " "bytes from %p to %p failed with " "error %d\n", __func__, len_to_copy, kern_ptr, user_ptr, error); goto bailout; } } else { error = copyin(user_ptr, kern_ptr, len_to_copy); if (error != 0) { xpt_print(periph->path, "%s: copyin of %u " "bytes from %p to %p failed with " "error %d\n", __func__, len_to_copy, user_ptr, kern_ptr, error); goto bailout; } } - len_copied += len_to_copy; - if (user_sglist[i].ds_len == user_watermark) { i++; user_watermark = 0; } if (kern_sglist[j].ds_len == kern_watermark) { j++; kern_watermark = 0; } } bailout: return (error); } static int passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req) { union ccb *ccb; struct pass_softc *softc; int numbufs, i; uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; uint32_t lengths[CAM_PERIPH_MAXMAPS]; uint32_t dirs[CAM_PERIPH_MAXMAPS]; uint32_t num_segs; uint16_t *seg_cnt_ptr; size_t maxmap; int error; cam_periph_assert(periph, MA_NOTOWNED); softc = periph->softc; error = 0; ccb = &io_req->ccb; maxmap = 0; num_segs = 0; seg_cnt_ptr = NULL; switch(ccb->ccb_h.func_code) { case XPT_DEV_MATCH: if (ccb->cdm.match_buf_len == 0) { printf("%s: invalid match buffer length 0\n", __func__); return(EINVAL); } if (ccb->cdm.pattern_buf_len > 0) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; lengths[0] = ccb->cdm.pattern_buf_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; lengths[1] = ccb->cdm.match_buf_len; dirs[1] = CAM_DIR_IN; numbufs = 2; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; lengths[0] = ccb->cdm.match_buf_len; dirs[0] = CAM_DIR_IN; numbufs = 1; } io_req->data_flags = CAM_DATA_VADDR; break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); /* * The user shouldn't be able to supply a bio. */ if ((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) return (EINVAL); io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK; data_ptrs[0] = &ccb->csio.data_ptr; lengths[0] = ccb->csio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; num_segs = ccb->csio.sglist_cnt; seg_cnt_ptr = &ccb->csio.sglist_cnt; numbufs = 1; maxmap = softc->maxio; break; case XPT_ATA_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); /* * We only support a single virtual address for ATA I/O. */ if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) return (EINVAL); io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = &ccb->ataio.data_ptr; lengths[0] = ccb->ataio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 1; maxmap = softc->maxio; break; case XPT_SMP_IO: io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = &ccb->smpio.smp_request; lengths[0] = ccb->smpio.smp_request_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = &ccb->smpio.smp_response; lengths[1] = ccb->smpio.smp_response_len; dirs[1] = CAM_DIR_IN; numbufs = 2; maxmap = softc->maxio; break; case XPT_DEV_ADVINFO: if (ccb->cdai.bufsiz == 0) return (0); io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; lengths[0] = ccb->cdai.bufsiz; dirs[0] = CAM_DIR_IN; numbufs = 1; break; case XPT_NVME_ADMIN: case XPT_NVME_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return (0); io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK; data_ptrs[0] = &ccb->nvmeio.data_ptr; lengths[0] = ccb->nvmeio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; num_segs = ccb->nvmeio.sglist_cnt; seg_cnt_ptr = &ccb->nvmeio.sglist_cnt; numbufs = 1; maxmap = softc->maxio; break; default: return(EINVAL); break; /* NOTREACHED */ } io_req->num_bufs = numbufs; /* * If there is a maximum, check to make sure that the user's * request fits within the limit. In general, we should only have * a maximum length for requests that go to hardware. Otherwise it * is whatever we're able to malloc. */ for (i = 0; i < numbufs; i++) { io_req->user_bufs[i] = *data_ptrs[i]; io_req->dirs[i] = dirs[i]; io_req->lengths[i] = lengths[i]; if (maxmap == 0) continue; if (lengths[i] <= maxmap) continue; xpt_print(periph->path, "%s: data length %u > max allowed %u " "bytes\n", __func__, lengths[i], maxmap); error = EINVAL; goto bailout; } switch (io_req->data_flags) { case CAM_DATA_VADDR: /* Map or copy the buffer into kernel address space */ for (i = 0; i < numbufs; i++) { uint8_t *tmp_buf; /* * If for some reason no length is specified, we * don't need to allocate anything. */ if (io_req->lengths[i] == 0) continue; tmp_buf = malloc(lengths[i], M_SCSIPASS, M_WAITOK | M_ZERO); io_req->kern_bufs[i] = tmp_buf; *data_ptrs[i] = tmp_buf; #if 0 xpt_print(periph->path, "%s: malloced %p len %u, user " "buffer %p, operation: %s\n", __func__, tmp_buf, lengths[i], io_req->user_bufs[i], (dirs[i] == CAM_DIR_IN) ? "read" : "write"); #endif /* * We only need to copy in if the user is writing. */ if (dirs[i] != CAM_DIR_OUT) continue; error = copyin(io_req->user_bufs[i], io_req->kern_bufs[i], lengths[i]); if (error != 0) { xpt_print(periph->path, "%s: copy of user " "buffer from %p to %p failed with " "error %d\n", __func__, io_req->user_bufs[i], io_req->kern_bufs[i], error); goto bailout; } } break; case CAM_DATA_PADDR: /* Pass down the pointer as-is */ break; case CAM_DATA_SG: { size_t sg_length, size_to_go, alloc_size; uint32_t num_segs_needed; /* * Copy the user S/G list in, and then copy in the * individual segments. */ /* * We shouldn't see this, but check just in case. */ if (numbufs != 1) { xpt_print(periph->path, "%s: cannot currently handle " "more than one S/G list per CCB\n", __func__); error = EINVAL; goto bailout; } /* * We have to have at least one segment. */ if (num_segs == 0) { xpt_print(periph->path, "%s: CAM_DATA_SG flag set, " "but sglist_cnt=0!\n", __func__); error = EINVAL; goto bailout; } /* * Make sure the user specified the total length and didn't * just leave it to us to decode the S/G list. */ if (lengths[0] == 0) { xpt_print(periph->path, "%s: no dxfer_len specified, " "but CAM_DATA_SG flag is set!\n", __func__); error = EINVAL; goto bailout; } /* * We allocate buffers in io_zone_size increments for an * S/G list. This will generally be maxphys. */ if (lengths[0] <= softc->io_zone_size) num_segs_needed = 1; else { num_segs_needed = lengths[0] / softc->io_zone_size; if ((lengths[0] % softc->io_zone_size) != 0) num_segs_needed++; } /* Figure out the size of the S/G list */ sg_length = num_segs * sizeof(bus_dma_segment_t); io_req->num_user_segs = num_segs; io_req->num_kern_segs = num_segs_needed; /* Save the user's S/G list pointer for later restoration */ io_req->user_bufs[0] = *data_ptrs[0]; /* * If we have enough segments allocated by default to handle * the length of the user's S/G list, */ if (num_segs > PASS_MAX_SEGS) { io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_USER_SEG_MALLOC; } else io_req->user_segptr = io_req->user_segs; error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length); if (error != 0) { xpt_print(periph->path, "%s: copy of user S/G list " "from %p to %p failed with error %d\n", __func__, *data_ptrs[0], io_req->user_segptr, error); goto bailout; } if (num_segs_needed > PASS_MAX_SEGS) { io_req->kern_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs_needed, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_KERN_SEG_MALLOC; } else { io_req->kern_segptr = io_req->kern_segs; } /* * Allocate the kernel S/G list. */ for (size_to_go = lengths[0], i = 0; size_to_go > 0 && i < num_segs_needed; i++, size_to_go -= alloc_size) { uint8_t *kern_ptr; alloc_size = min(size_to_go, softc->io_zone_size); kern_ptr = uma_zalloc(softc->pass_io_zone, M_WAITOK); io_req->kern_segptr[i].ds_addr = (bus_addr_t)(uintptr_t)kern_ptr; io_req->kern_segptr[i].ds_len = alloc_size; } if (size_to_go > 0) { printf("%s: size_to_go = %zu, software error!\n", __func__, size_to_go); error = EINVAL; goto bailout; } *data_ptrs[0] = (uint8_t *)io_req->kern_segptr; *seg_cnt_ptr = io_req->num_kern_segs; /* * We only need to copy data here if the user is writing. */ if (dirs[0] == CAM_DIR_OUT) error = passcopysglist(periph, io_req, dirs[0]); break; } case CAM_DATA_SG_PADDR: { size_t sg_length; /* * We shouldn't see this, but check just in case. */ if (numbufs != 1) { printf("%s: cannot currently handle more than one " "S/G list per CCB\n", __func__); error = EINVAL; goto bailout; } /* * We have to have at least one segment. */ if (num_segs == 0) { xpt_print(periph->path, "%s: CAM_DATA_SG_PADDR flag " "set, but sglist_cnt=0!\n", __func__); error = EINVAL; goto bailout; } /* * Make sure the user specified the total length and didn't * just leave it to us to decode the S/G list. */ if (lengths[0] == 0) { xpt_print(periph->path, "%s: no dxfer_len specified, " "but CAM_DATA_SG flag is set!\n", __func__); error = EINVAL; goto bailout; } /* Figure out the size of the S/G list */ sg_length = num_segs * sizeof(bus_dma_segment_t); io_req->num_user_segs = num_segs; io_req->num_kern_segs = io_req->num_user_segs; /* Save the user's S/G list pointer for later restoration */ io_req->user_bufs[0] = *data_ptrs[0]; if (num_segs > PASS_MAX_SEGS) { io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_USER_SEG_MALLOC; } else io_req->user_segptr = io_req->user_segs; io_req->kern_segptr = io_req->user_segptr; error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length); if (error != 0) { xpt_print(periph->path, "%s: copy of user S/G list " "from %p to %p failed with error %d\n", __func__, *data_ptrs[0], io_req->user_segptr, error); goto bailout; } break; } default: case CAM_DATA_BIO: /* * A user shouldn't be attaching a bio to the CCB. It * isn't a user-accessible structure. */ error = EINVAL; break; } bailout: if (error != 0) passiocleanup(softc, io_req); return (error); } static int passmemdone(struct cam_periph *periph, struct pass_io_req *io_req) { struct pass_softc *softc; int error; int i; error = 0; softc = (struct pass_softc *)periph->softc; switch (io_req->data_flags) { case CAM_DATA_VADDR: /* * Copy back to the user buffer if this was a read. */ for (i = 0; i < io_req->num_bufs; i++) { if (io_req->dirs[i] != CAM_DIR_IN) continue; error = copyout(io_req->kern_bufs[i], io_req->user_bufs[i], io_req->lengths[i]); if (error != 0) { xpt_print(periph->path, "Unable to copy %u " "bytes from %p to user address %p\n", io_req->lengths[i], io_req->kern_bufs[i], io_req->user_bufs[i]); goto bailout; } } break; case CAM_DATA_PADDR: /* Do nothing. The pointer is a physical address already */ break; case CAM_DATA_SG: /* * Copy back to the user buffer if this was a read. * Restore the user's S/G list buffer pointer. */ if (io_req->dirs[0] == CAM_DIR_IN) error = passcopysglist(periph, io_req, io_req->dirs[0]); break; case CAM_DATA_SG_PADDR: /* * Restore the user's S/G list buffer pointer. No need to * copy. */ break; default: case CAM_DATA_BIO: error = EINVAL; break; } bailout: /* * Reset the user's pointers to their original values and free * allocated memory. */ passiocleanup(softc, io_req); return (error); } static int passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; if ((error = passdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { error = cam_compat_ioctl(dev, cmd, addr, flag, td, passdoioctl); } return (error); } static int passdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; uint32_t priority; periph = (struct cam_periph *)dev->si_drv1; cam_periph_lock(periph); softc = (struct pass_softc *)periph->softc; error = 0; switch (cmd) { case CAMIOCOMMAND: { union ccb *inccb; union ccb *ccb; int ccb_malloced; inccb = (union ccb *)addr; #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (inccb->ccb_h.func_code == XPT_SCSI_IO) inccb->csio.bio = NULL; #endif if (inccb->ccb_h.flags & CAM_UNLOCKED) { error = EINVAL; break; } /* * Some CCB types, like scan bus and scan lun can only go * through the transport layer device. */ if (inccb->ccb_h.func_code & XPT_FC_XPT_ONLY) { xpt_print(periph->path, "CCB function code %#x is " "restricted to the XPT device\n", inccb->ccb_h.func_code); error = ENODEV; break; } /* Compatibility for RL/priority-unaware code. */ priority = inccb->ccb_h.pinfo.priority; if (priority <= CAM_PRIORITY_OOB) priority += CAM_PRIORITY_OOB + 1; /* * Non-immediate CCBs need a CCB from the per-device pool * of CCBs, which is scheduled by the transport layer. * Immediate CCBs and user-supplied CCBs should just be * malloced. */ if ((inccb->ccb_h.func_code & XPT_FC_QUEUED) && ((inccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0)) { ccb = cam_periph_getccb(periph, priority); ccb_malloced = 0; } else { ccb = xpt_alloc_ccb_nowait(); if (ccb != NULL) xpt_setup_ccb(&ccb->ccb_h, periph->path, priority); ccb_malloced = 1; } if (ccb == NULL) { xpt_print(periph->path, "unable to allocate CCB\n"); error = ENOMEM; break; } error = passsendccb(periph, ccb, inccb); if (ccb_malloced) xpt_free_ccb(ccb); else xpt_release_ccb(ccb); break; } case CAMIOQUEUE: { struct pass_io_req *io_req; union ccb **user_ccb, *ccb; xpt_opcode fc; #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { error = ENOTTY; goto bailout; } #endif if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0) { error = passcreatezone(periph); if (error != 0) goto bailout; } /* * We're going to do a blocking allocation for this I/O * request, so we have to drop the lock. */ cam_periph_unlock(periph); io_req = uma_zalloc(softc->pass_zone, M_WAITOK | M_ZERO); ccb = &io_req->ccb; user_ccb = (union ccb **)addr; /* * Unlike the CAMIOCOMMAND ioctl above, we only have a * pointer to the user's CCB, so we have to copy the whole * thing in to a buffer we have allocated (above) instead * of allowing the ioctl code to malloc a buffer and copy * it in. * * This is an advantage for this asynchronous interface, * since we don't want the memory to get freed while the * CCB is outstanding. */ #if 0 xpt_print(periph->path, "Copying user CCB %p to " "kernel address %p\n", *user_ccb, ccb); #endif error = copyin(*user_ccb, ccb, sizeof(*ccb)); if (error != 0) { xpt_print(periph->path, "Copy of user CCB %p to " "kernel address %p failed with error %d\n", *user_ccb, ccb, error); goto camioqueue_error; } #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.bio = NULL; #endif if (ccb->ccb_h.flags & CAM_UNLOCKED) { error = EINVAL; goto camioqueue_error; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) { if (ccb->csio.cdb_len > IOCDBLEN) { error = EINVAL; goto camioqueue_error; } error = copyin(ccb->csio.cdb_io.cdb_ptr, ccb->csio.cdb_io.cdb_bytes, ccb->csio.cdb_len); if (error != 0) goto camioqueue_error; ccb->ccb_h.flags &= ~CAM_CDB_POINTER; } /* * Some CCB types, like scan bus and scan lun can only go * through the transport layer device. */ if (ccb->ccb_h.func_code & XPT_FC_XPT_ONLY) { xpt_print(periph->path, "CCB function code %#x is " "restricted to the XPT device\n", ccb->ccb_h.func_code); error = ENODEV; goto camioqueue_error; } /* * Save the user's CCB pointer as well as his linked list * pointers and peripheral private area so that we can * restore these later. */ io_req->user_ccb_ptr = *user_ccb; io_req->user_periph_links = ccb->ccb_h.periph_links; io_req->user_periph_priv = ccb->ccb_h.periph_priv; /* * Now that we've saved the user's values, we can set our * own peripheral private entry. */ ccb->ccb_h.ccb_ioreq = io_req; /* Compatibility for RL/priority-unaware code. */ priority = ccb->ccb_h.pinfo.priority; if (priority <= CAM_PRIORITY_OOB) priority += CAM_PRIORITY_OOB + 1; /* * Setup fields in the CCB like the path and the priority. * The path in particular cannot be done in userland, since * it is a pointer to a kernel data structure. */ xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, priority, ccb->ccb_h.flags); /* * Setup our done routine. There is no way for the user to * have a valid pointer here. */ ccb->ccb_h.cbfcnp = passdone; fc = ccb->ccb_h.func_code; /* * If this function code has memory that can be mapped in * or out, we need to call passmemsetup(). */ if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO) || (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) { error = passmemsetup(periph, io_req); if (error != 0) goto camioqueue_error; } else io_req->mapinfo.num_bufs_used = 0; cam_periph_lock(periph); /* * Everything goes on the incoming queue initially. */ TAILQ_INSERT_TAIL(&softc->incoming_queue, io_req, links); /* * If the CCB is queued, and is not a user CCB, then * we need to allocate a slot for it. Call xpt_schedule() * so that our start routine will get called when a CCB is * available. */ if ((fc & XPT_FC_QUEUED) && ((fc & XPT_FC_USER_CCB) == 0)) { xpt_schedule(periph, priority); break; } /* * At this point, the CCB in question is either an * immediate CCB (like XPT_DEV_ADVINFO) or it is a user CCB * and therefore should be malloced, not allocated via a slot. * Remove the CCB from the incoming queue and add it to the * active queue. */ TAILQ_REMOVE(&softc->incoming_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links); xpt_action(ccb); /* * If this is not a queued CCB (i.e. it is an immediate CCB), * then it is already done. We need to put it on the done * queue for the user to fetch. */ if ((fc & XPT_FC_QUEUED) == 0) { TAILQ_REMOVE(&softc->active_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links); } break; camioqueue_error: uma_zfree(softc->pass_zone, io_req); cam_periph_lock(periph); break; } case CAMIOGET: { union ccb **user_ccb; struct pass_io_req *io_req; int old_error; #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { error = ENOTTY; goto bailout; } #endif user_ccb = (union ccb **)addr; old_error = 0; io_req = TAILQ_FIRST(&softc->done_queue); if (io_req == NULL) { error = ENOENT; break; } /* * Remove the I/O from the done queue. */ TAILQ_REMOVE(&softc->done_queue, io_req, links); /* * We have to drop the lock during the copyout because the * copyout can result in VM faults that require sleeping. */ cam_periph_unlock(periph); /* * Do any needed copies (e.g. for reads) and revert the * pointers in the CCB back to the user's pointers. */ error = passmemdone(periph, io_req); old_error = error; io_req->ccb.ccb_h.periph_links = io_req->user_periph_links; io_req->ccb.ccb_h.periph_priv = io_req->user_periph_priv; #if 0 xpt_print(periph->path, "Copying to user CCB %p from " "kernel address %p\n", *user_ccb, &io_req->ccb); #endif error = copyout(&io_req->ccb, *user_ccb, sizeof(union ccb)); if (error != 0) { xpt_print(periph->path, "Copy to user CCB %p from " "kernel address %p failed with error %d\n", *user_ccb, &io_req->ccb, error); } /* * Prefer the first error we got back, and make sure we * don't overwrite bad status with good. */ if (old_error != 0) error = old_error; cam_periph_lock(periph); /* * At this point, if there was an error, we could potentially * re-queue the I/O and try again. But why? The error * would almost certainly happen again. We might as well * not leak memory. */ uma_zfree(softc->pass_zone, io_req); break; } default: error = cam_periph_ioctl(periph, cmd, addr, passerror); break; } bailout: cam_periph_unlock(periph); return(error); } static int passpoll(struct cdev *dev, int poll_events, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int revents; periph = (struct cam_periph *)dev->si_drv1; softc = (struct pass_softc *)periph->softc; revents = poll_events & (POLLOUT | POLLWRNORM); if ((poll_events & (POLLIN | POLLRDNORM)) != 0) { cam_periph_lock(periph); if (!TAILQ_EMPTY(&softc->done_queue)) { revents |= poll_events & (POLLIN | POLLRDNORM); } cam_periph_unlock(periph); if (revents == 0) selrecord(td, &softc->read_select); } return (revents); } static int passkqfilter(struct cdev *dev, struct knote *kn) { struct cam_periph *periph; struct pass_softc *softc; periph = (struct cam_periph *)dev->si_drv1; softc = (struct pass_softc *)periph->softc; kn->kn_hook = (caddr_t)periph; kn->kn_fop = &passread_filtops; knlist_add(&softc->read_select.si_note, kn, 0); return (0); } static void passreadfiltdetach(struct knote *kn) { struct cam_periph *periph; struct pass_softc *softc; periph = (struct cam_periph *)kn->kn_hook; softc = (struct pass_softc *)periph->softc; knlist_remove(&softc->read_select.si_note, kn, 0); } static int passreadfilt(struct knote *kn, long hint) { struct cam_periph *periph; struct pass_softc *softc; int retval; periph = (struct cam_periph *)kn->kn_hook; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); if (TAILQ_EMPTY(&softc->done_queue)) retval = 0; else retval = 1; return (retval); } /* * Generally, "ccb" should be the CCB supplied by the kernel. "inccb" * should be the CCB that is copied in from the user. */ static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb) { struct pass_softc *softc; struct cam_periph_map_info mapinfo; uint8_t *cmd; xpt_opcode fc; int error; softc = (struct pass_softc *)periph->softc; /* * There are some fields in the CCB header that need to be * preserved, the rest we get from the user. */ xpt_merge_ccb(ccb, inccb); if (ccb->ccb_h.flags & CAM_CDB_POINTER) { cmd = __builtin_alloca(ccb->csio.cdb_len); error = copyin(ccb->csio.cdb_io.cdb_ptr, cmd, ccb->csio.cdb_len); if (error) return (error); ccb->csio.cdb_io.cdb_ptr = cmd; } /* * Let cam_periph_mapmem do a sanity check on the data pointer format. * Even if no data transfer is needed, it's a cheap check and it * simplifies the code. */ fc = ccb->ccb_h.func_code; if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO) || (fc == XPT_MMC_IO) || (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) { bzero(&mapinfo, sizeof(mapinfo)); /* * cam_periph_mapmem calls into proc and vm functions that can * sleep as well as trigger I/O, so we can't hold the lock. * Dropping it here is reasonably safe. */ cam_periph_unlock(periph); error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio); cam_periph_lock(periph); /* * cam_periph_mapmem returned an error, we can't continue. * Return the error to the user. */ if (error) return(error); } else /* Ensure that the unmap call later on is a no-op. */ mapinfo.num_bufs_used = 0; /* * If the user wants us to perform any error recovery, then honor * that request. Otherwise, it's up to the user to perform any * error recovery. */ cam_periph_runccb(ccb, (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ? passerror : NULL, /* cam_flags */ CAM_RETRY_SELTO, /* sense_flags */ SF_RETRY_UA | SF_NO_PRINT, softc->device_stats); cam_periph_unlock(periph); cam_periph_unmapmem(ccb, &mapinfo); cam_periph_lock(periph); ccb->ccb_h.cbfcnp = NULL; ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv; bcopy(ccb, inccb, sizeof(union ccb)); return(0); } static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { - struct cam_periph *periph; - struct pass_softc *softc; - - periph = xpt_path_periph(ccb->ccb_h.path); - softc = (struct pass_softc *)periph->softc; return(cam_periph_error(ccb, cam_flags, sense_flags)); }