Index: sys/cam/cam_periph.h =================================================================== --- sys/cam/cam_periph.h +++ sys/cam/cam_periph.h @@ -103,10 +103,14 @@ void *arg); typedef void periph_oninv_t (struct cam_periph *periph); typedef void periph_dtor_t (struct cam_periph *periph); +typedef void periph_alloc_rel_t (struct cam_periph *periph); +typedef void periph_status_t (struct cam_periph *periph); struct cam_periph { periph_start_t *periph_start; periph_oninv_t *periph_oninval; periph_dtor_t *periph_dtor; + periph_alloc_rel_t *periph_alloc_rel; + periph_status_t *periph_status; char *periph_name; struct cam_path *path; /* Compiled path to device */ void *softc; @@ -123,6 +127,7 @@ #define CAM_PERIPH_RUN_TASK 0x40 #define CAM_PERIPH_FREE 0x80 #define CAM_PERIPH_ANNOUNCED 0x100 +#define CAM_PERIPH_ALLOC_REF 0x200 uint32_t scheduled_priority; uint32_t immediate_priority; int periph_allocating; Index: sys/cam/cam_periph.c =================================================================== --- sys/cam/cam_periph.c +++ sys/cam/cam_periph.c @@ -179,19 +179,31 @@ */ if ((periph = cam_periph_find(path, name)) != NULL) { + if (periph->periph_status != NULL) + periph->periph_status(periph); + if ((periph->flags & CAM_PERIPH_INVALID) != 0 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; periph->deferred_callback = ac_callback; periph->deferred_ac = code; return (CAM_REQ_INPROG); + } else if ((periph->flags & CAM_PERIPH_INVALID) + && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND)) { + printf("%s: attempt to re-allocate " + "invalid device %s%d with new device already " + "found rejected flags %#x refcount %d\n", + __func__, periph->periph_name, + periph->unit_number, periph->flags, + periph->refcount); } else { - printf("cam_periph_alloc: attempt to re-allocate " + printf("%s: attempt to re-allocate " "valid device %s%d rejected flags %#x " - "refcount %d\n", periph->periph_name, + "refcount %d\n", __func__, periph->periph_name, periph->unit_number, periph->flags, periph->refcount); } + return (CAM_REQ_INVALID); } Index: sys/cam/cam_xpt.c =================================================================== --- sys/cam/cam_xpt.c +++ sys/cam/cam_xpt.c @@ -3193,9 +3193,26 @@ uint32_t prio; cam_periph_assert(periph, MA_OWNED); + /* + * If the peripheral has been invalidated, no need to continue, + * the device is gone. + */ + if (periph->flags & CAM_PERIPH_INVALID) + return; + if (periph->periph_allocating) return; periph->periph_allocating = 1; + + /* + * This flag is set by the peripheral on the initial probe, before + * any allocation happens. If it isn't set, that means that the + * peripheral doesn't acquire a reference for allocation, and we + * need to do it here. + */ + if ((periph->flags & CAM_PERIPH_ALLOC_REF) == 0) + cam_periph_doacquire(periph); + CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); device = periph->path->device; ccb = NULL; @@ -3238,6 +3255,24 @@ if (ccb != NULL) xpt_release_ccb(ccb); periph->periph_allocating = 0; + + /* + * If the peripheral doesn't have an allocation reference, we need + * to release here every time. If it does, and the peripheral was + * invalidated while we were allocating, we are responsible for + * releasing the allocation reference here. + */ + if (((periph->flags & CAM_PERIPH_ALLOC_REF) == 0) + || (periph->flags & CAM_PERIPH_INVALID)) { + /* + * If the peripheral has a routine for releasing its + * allocation reference, call it here. + */ + if (periph->periph_alloc_rel != NULL) + periph->periph_alloc_rel(periph); + else + cam_periph_release_locked(periph); + } } static void Index: sys/cam/scsi/scsi_da.c =================================================================== --- sys/cam/scsi/scsi_da.c +++ sys/cam/scsi/scsi_da.c @@ -94,6 +94,18 @@ } da_state; typedef enum { + DA_REF_NONE = 0x000, + DA_REF_OPEN = 0x001, + DA_REF_ALLOC = 0x002, + DA_REF_OTAG = 0x004, + DA_REF_GEOM = 0x008, + DA_REF_MEDIA = 0x010, + DA_REF_TUR = 0x020, + DA_REF_SYSCTL = 0x040, + DA_REF_REPROBE = 0x080, +} da_ref_src; + +typedef enum { DA_FLAG_PACK_INVALID = 0x000001, DA_FLAG_NEW_PACK = 0x000002, DA_FLAG_PACK_LOCKED = 0x000004, @@ -293,6 +305,7 @@ da_state state; da_flags flags; da_quirks quirks; + da_ref_src ref_src; int minimum_cmd_size; int error_inject; int trim_max_ranges; @@ -1305,6 +1318,11 @@ }, }; +static cam_status daacquire(struct cam_periph *periph, da_ref_src src); +static void darelease(struct cam_periph *periph, da_ref_src src, + int locked); +static void daallocrel(struct cam_periph *periph); +static void dastatus(struct cam_periph *periph); static disk_strategy_t dastrategy; static dumper_t dadump; static periph_init_t dainit; @@ -1412,6 +1430,69 @@ static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); +static cam_status +daacquire(struct cam_periph *periph, da_ref_src src) +{ + struct da_softc *softc; + cam_status status; + + softc = (struct da_softc *)periph->softc; + + KASSERT((softc->ref_src & src) == 0, + ("daacquire: src %#x is already held refcnt %d, mask %#x", + src, periph->refcount, softc->ref_src)); + status = cam_periph_acquire(periph); + if (status == CAM_REQ_CMP) + softc->ref_src |= src; + return (status); +} + +static void +darelease(struct cam_periph *periph, da_ref_src src, int locked) +{ + struct da_softc *softc; + struct mtx *mtx; + + softc = (struct da_softc *)periph->softc; + + mtx = cam_periph_mtx(periph); + + if (locked == 0) + mtx_lock(mtx); + + KASSERT((softc->ref_src & src) != 0, + ("darelease: src %#x is NOT held", src)); + + softc->ref_src &= ~src; + + cam_periph_release_locked(periph); + + if (locked == 0) + mtx_unlock(mtx); +} + +static void +daallocrel(struct cam_periph *periph) +{ + darelease(periph, DA_REF_ALLOC, /*locked*/ 1); +} + +static void +dastatus(struct cam_periph *periph) +{ + struct da_softc *softc; + + + cam_periph_assert(periph, MA_OWNED); + + softc = (struct da_softc *)periph->softc; + + printf("%s%u: ref src %#x, refcount %d, softc refcount %d\n", + periph->periph_name, periph->unit_number, softc->ref_src, + periph->refcount, softc->refcount); + +} + static int daopen(struct disk *dp) { @@ -1420,14 +1501,14 @@ int error; periph = (struct cam_periph *)dp->d_drv1; - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (daacquire(periph, DA_REF_OPEN) != CAM_REQ_CMP) { return (ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); - cam_periph_release(periph); + darelease(periph, DA_REF_OPEN, /*locked*/ 0); return (error); } @@ -1459,7 +1540,7 @@ cam_periph_unlock(periph); if (error != 0) - cam_periph_release(periph); + darelease(periph, DA_REF_OPEN, /*locked*/ 0); return (error); } @@ -1517,7 +1598,7 @@ while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); cam_periph_unlock(periph); - cam_periph_release(periph); + darelease(periph, DA_REF_OPEN, /*locked*/ 0); return (0); } @@ -1709,7 +1790,16 @@ struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; - cam_periph_release(periph); + + printf("(%s%d:%s%d:%d:%d:%jx): got GEOM disk gone callback\n", + periph->periph_name, periph->unit_number, + xpt_path_sim(periph->path)->sim_name, + xpt_path_sim(periph->path)->unit_number, + xpt_path_sim(periph->path)->bus_id, + xpt_path_target_id(periph->path), + (uintmax_t)xpt_path_lun_id(periph->path)); + + darelease(periph, DA_REF_GEOM, /*locked*/ 0); } static void @@ -1719,6 +1809,16 @@ softc = (struct da_softc *)periph->softc; + printf("(%s%d:%s%d:%d:%d:%jx): lost device\n", + periph->periph_name, periph->unit_number, + xpt_path_sim(periph->path)->sim_name, + xpt_path_sim(periph->path)->unit_number, + xpt_path_sim(periph->path)->bus_id, + xpt_path_target_id(periph->path), + (uintmax_t)xpt_path_lun_id(periph->path)); + + cam_periph_assert(periph, MA_OWNED); + /* * De-register any async callbacks. */ @@ -1737,6 +1837,45 @@ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); /* + * Stop the ordered tag callout. If it isn't pending or executing + * we can go ahead and release the reference. If it couldn't be + * cancelled, the callout will + */ + if (callout_stop(&softc->sendordered_c) != 0) + darelease(periph, DA_REF_OTAG, /*locked*/ 1); + + /* + * Drain the media poll callout. If it isn't pending or executing + * we can go ahead and release the reference. If it couldn't be + * cancelled, we'll get a callback when it is done. + */ + if (callout_stop(&softc->mediapoll_c) != 0) + darelease(periph, DA_REF_MEDIA, /*locked*/ 1); + + /* + * If we are not allocating, we have the responsibility to release + * the reference held for the allocation code (xpt_run_allocq()). + * If the allocation code is active, it will release the reference + * once it completes. + */ + if (periph->periph_allocating == 0) + darelease(periph, DA_REF_ALLOC, /*locked*/ 1); + + /* + * If this device went away during the initial probe, it still has + * an active hold. Release the hold now that it has been + * invalidated. + * + * XXX KDM make sure this doesn't open up any holes in the probe. + */ +#if 0 + if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { + softc->flags |= DA_FLAG_ANNOUNCED; + cam_periph_unhold(periph); + } +#endif + + /* * Tell GEOM that we've gone away, we'll get a callback when it is * done cleaning up its resources. */ @@ -1750,6 +1889,14 @@ softc = (struct da_softc *)periph->softc; + printf("(%s%d:%s%d:%d:%d:%jx): removing device entry\n", + periph->periph_name, periph->unit_number, + xpt_path_sim(periph->path)->sim_name, + xpt_path_sim(periph->path)->unit_number, + xpt_path_sim(periph->path)->bus_id, + xpt_path_target_id(periph->path), + (uintmax_t)xpt_path_lun_id(periph->path)); + cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); @@ -1768,9 +1915,7 @@ "can't remove sysctl context\n"); } - callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); - callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } @@ -1869,11 +2014,11 @@ } case AC_SCSI_AEN: softc = (struct da_softc *)periph->softc; - if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { - if (cam_periph_acquire(periph) == CAM_REQ_CMP) { - cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); - daschedule(periph); - } + if (!cam_iosched_has_work_flags(softc->cam_iosched, + DA_WORK_TUR)) { + cam_iosched_set_work_flags(softc->cam_iosched, + DA_WORK_TUR); + daschedule(periph); } /* FALLTHROUGH */ case AC_SENT_BDR: @@ -1915,7 +2060,7 @@ * periph was held for us when this task was enqueued */ if (periph->flags & CAM_PERIPH_INVALID) { - cam_periph_release(periph); + darelease(periph, DA_REF_SYSCTL, /*locked*/ 0); return; } @@ -1930,7 +2075,7 @@ CTLFLAG_RD, 0, tmpstr); if (softc->sysctl_tree == NULL) { printf("dasysctlinit: unable to allocate sysctl tree\n"); - cam_periph_release(periph); + darelease(periph, DA_REF_SYSCTL, /*locked*/ 0); return; } @@ -2012,7 +2157,7 @@ xpt_action((union ccb *)&cts); cam_periph_unlock(periph); if (cts.ccb_h.status != CAM_REQ_CMP) { - cam_periph_release(periph); + darelease(periph, DA_REF_SYSCTL, /*locked*/ 0); return; } if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { @@ -2063,7 +2208,7 @@ cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); - cam_periph_release(periph); + darelease(periph, DA_REF_SYSCTL, /*locked*/ 0); } static int @@ -2231,7 +2376,7 @@ softc->flags |= DA_FLAG_ANNOUNCED; cam_periph_unhold(periph); } else - cam_periph_release_locked(periph); + darelease(periph, DA_REF_REPROBE, /*locked*/ 1); } static void @@ -2401,6 +2546,8 @@ softc->rotating = 1; periph->softc = softc; + periph->periph_alloc_rel = daallocrel; + periph->periph_status = dastatus; /* * See if this device has any quirks. @@ -2450,13 +2597,44 @@ (void)cam_periph_hold(periph, PRIBIO); /* + * Acquire a reference for allocating. This will be released by + * the allocation code (xpt_run_allocq()) if the device goes away + * while we're allocating. Otherwise, it'll be released by + * daoninvalidate(). + */ + if (daacquire(periph, DA_REF_ALLOC) != CAM_REQ_CMP) { + cam_periph_unhold(periph); + xpt_print(periph->path, "%s: lost periph during " + "registration!\n", __func__); + return (CAM_REQ_CMP_ERR); + } + /* + * Tell the allocation code that we have acquired a reference for + * it that we will release if it is not active. + */ + periph->flags |= CAM_PERIPH_ALLOC_REF; + + /* + * Acquire a reference to the periph before we start the ordered + * tag callout. We'll release this reference once we have shut down + * the callout. + */ + if (daacquire(periph, DA_REF_OTAG) != CAM_REQ_CMP) { + cam_periph_unhold(periph); + xpt_print(periph->path, "%s: lost periph during " + "registration!\n", __func__); + return (CAM_REQ_CMP_ERR); + } + + /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ - callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); + callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), + CALLOUT_RETURNUNLOCKED); callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, - dasendorderedtag, softc); + dasendorderedtag, periph); cam_periph_unlock(periph); /* @@ -2545,10 +2723,11 @@ * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ - if (cam_periph_acquire(periph) != CAM_REQ_CMP) { + if (daacquire(periph, DA_REF_GEOM) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); + cam_periph_unhold(periph); return (CAM_REQ_CMP_ERR); } @@ -2575,14 +2754,28 @@ disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); /* - * Schedule a periodic media polling events. + * Acquire a reference to the periph before we start the media poll + * callout. We'll release this reference once we have shut down + * the callout. */ - callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); + if (daacquire(periph, DA_REF_MEDIA) != CAM_REQ_CMP) { + xpt_print(periph->path, "%s: lost periph during " + "registration!\n", __func__); + cam_periph_unhold(periph); + return (CAM_REQ_CMP_ERR); + } + + /* + * Schedule a periodic media polling event. + */ + callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), + CALLOUT_RETURNUNLOCKED); if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && - da_poll_period != 0) + da_poll_period != 0) { callout_reset(&softc->mediapoll_c, da_poll_period * hz, damediapoll, periph); + } xpt_schedule(periph, CAM_PRIORITY_DEV); @@ -2902,8 +3095,23 @@ more: bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { - if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { - cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); + /* + * Note that we're acquiring a reference to the + * peripheral if we need to send a media poll TUR. + * This keeps the device from going away while + * we have I/O outstanding. + * + * In the general I/O case, we're protected from + * the peripheral going away while I/O is + * outstanding by the acquire in daopen() and + * release in daclose(). GEOM shouldn't send a close + * while I/O is outstanding. + */ + if ((cam_iosched_has_work_flags(softc->cam_iosched, + DA_WORK_TUR)) + && (daacquire(periph, DA_REF_TUR) == CAM_REQ_CMP)) { + cam_iosched_clr_work_flags(softc->cam_iosched, + DA_WORK_TUR); scsi_test_unit_ready(&start_ccb->csio, /*retries*/ da_retry_count, dadone, @@ -2929,10 +3137,10 @@ } } - if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { - cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); - cam_periph_release_locked(periph); /* XXX is this still valid? I think so but unverified */ - } + if (cam_iosched_has_work_flags(softc->cam_iosched, + DA_WORK_TUR)) + cam_iosched_clr_work_flags(softc->cam_iosched, + DA_WORK_TUR); if ((bp->bio_flags & BIO_ORDERED) != 0 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { @@ -4346,7 +4554,7 @@ * we have successfully attached. */ /* increase the refcount */ - if (cam_periph_acquire(periph) == CAM_REQ_CMP) { + if (daacquire(periph, DA_REF_SYSCTL) == CAM_REQ_CMP) { taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); xpt_announce_periph(periph, announce_buf); @@ -5180,7 +5388,11 @@ /*getcount_only*/0); } xpt_release_ccb(done_ccb); - cam_periph_release_locked(periph); + /* + * Release the reference we acquired when we queued this + * CCB. + */ + darelease(periph, DA_REF_TUR, /*locked*/ 1); return; } default: @@ -5201,7 +5413,7 @@ if (softc->state != DA_STATE_NORMAL) return; - status = cam_periph_acquire(periph); + status = daacquire(periph, DA_REF_REPROBE); KASSERT(status == CAM_REQ_CMP, ("dareprobe: cam_periph_acquire failed")); @@ -5299,16 +5511,25 @@ struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; + if (periph->flags & CAM_PERIPH_INVALID) { + callout_deactivate(&softc->mediapoll_c); + cam_periph_unlock(periph); + darelease(periph, DA_REF_MEDIA, /*locked*/ 0); + return; + } + if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && LIST_EMPTY(&softc->pending_ccbs)) { - if (cam_periph_acquire(periph) == CAM_REQ_CMP) { - cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); - daschedule(periph); - } + cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); + daschedule(periph); } + /* Queue us up again */ if (da_poll_period != 0) - callout_schedule(&softc->mediapoll_c, da_poll_period * hz); + callout_schedule(&softc->mediapoll_c, + da_poll_period * hz); + + cam_periph_unlock(periph); } static void @@ -5469,8 +5690,19 @@ static void dasendorderedtag(void *arg) { - struct da_softc *softc = arg; + struct cam_periph *periph = arg; + struct da_softc *softc = periph->softc; + /* + * If the callout is pending, that means that it was rescheduled + * just before this instance was due to run. Normally the pending + * bit should be clear when we enter. + */ + if (callout_pending(&softc->sendordered_c)) { + cam_periph_unlock(periph); + return; + } + if (da_send_ordered) { if (!LIST_EMPTY(&softc->pending_ccbs)) { if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) @@ -5478,10 +5710,32 @@ softc->flags &= ~DA_FLAG_WAS_OTAG; } } - /* Queue us up again */ - callout_reset(&softc->sendordered_c, - (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, - dasendorderedtag, softc); + + /* + * Check to see whether this peripheral has been invalidated. If + * it has, and the callout has fired, it means that this callout + * was pending and could not be stopped. So, in that case, we need + * to release the reference held on the peripheral for this callout. + * + * This callout was initialized with the CALLOUT_RETURNUNLOCKED + * flag, because in the case where we're the last reference held + * on the peripheral, our call to cam_periph_release() may trigger + * a free of the peripheral driver. If we didn't return unlocked, + * the callout code would have to unlock the mutex after this call + * finishes. If this is the last reference, though, the mutex will + * have already been freed. + */ + if (periph->flags & CAM_PERIPH_INVALID) { + callout_deactivate(&softc->sendordered_c); + cam_periph_unlock(periph); + darelease(periph, DA_REF_OTAG, /*locked*/ 0); + } else { + /* Queue us up again */ + callout_reset(&softc->sendordered_c, + (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, + dasendorderedtag, periph); + cam_periph_unlock(periph); + } } /* Index: sys/geom/geom_dev.c =================================================================== --- sys/geom/geom_dev.c +++ sys/geom/geom_dev.c @@ -723,6 +723,8 @@ sc = cp->private; g_trace(G_T_TOPOLOGY, "g_dev_callback(%p(%s))", cp, cp->geom->name); + printf("%s: got DEVFS callback\n", cp->geom->name); + mtx_lock(&sc->sc_mtx); sc->sc_dev = NULL; sc->sc_alias = NULL; @@ -757,6 +759,8 @@ if (dev->si_flags & SI_DUMPDEV) (void)set_dumper(NULL, NULL, curthread); + printf("%s: calling destroy_dev_sched_cb\n", cp->geom->name); + /* Destroy the struct cdev *so we get no more requests */ destroy_dev_sched_cb(dev, g_dev_callback, cp); } Index: tests/sys/Makefile =================================================================== --- tests/sys/Makefile +++ tests/sys/Makefile @@ -19,6 +19,7 @@ TESTS_SUBDIRS+= vm # Items not integrated into kyua runs by default +SUBDIR+= devad2 SUBDIR+= pjdfstest .include Index: tests/sys/devad2/Makefile =================================================================== --- tests/sys/devad2/Makefile +++ tests/sys/devad2/Makefile @@ -0,0 +1,17 @@ +# $FreeBSD$ + +.include + +PROG_CXX= devad2 +SRCS= devad2_cam.cc devad2_main.cc devad2_devctl.cc +# DPADD= ${LIBCAM} ${LIBSBUF} ${LIBUTIL} ${LIBDEVCTL} ${LIBPTHREAD} +# LDADD= -lcam -lsbuf -lutil -lprivatedevdctl -lpthread +LIBADD=cam sbuf util devctl devdctl pthread +MAN= +# CFLAGS += -O0 -g + +TESTSDIR= ${TESTSBASE}/sys/devad2 +BINDIR= ${TESTSDIR} +ATF_TESTS_SH= devad2_test + +.include Index: tests/sys/devad2/devad2.h =================================================================== --- tests/sys/devad2/devad2.h +++ tests/sys/devad2/devad2.h @@ -0,0 +1,96 @@ +/*- + * Copyright (c) 2013, 2014 Spectra Logic Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ +#ifndef __DEVAD2_H_ +#define __DEVAD2_H_ + +struct devad_phy_args { + char enc_device[32]; + int phy; + int cycle_interval; + int flags; +#define DEVAD_PHY_FLAG_STOP 0x01 + pthread_t thread_data; + STAILQ_ENTRY(devad_phy_args) links; +}; + +struct devad_openclose_args { + char cam_device[32]; + int cycle_interval; + pthread_t thread_data; + int flags; +#define DEVAD_OC_FLAG_STOP 0x01 + STAILQ_ENTRY(devad_openclose_args) links; +}; + +#define DEVAD_READ_BUF_SIZE 131072 + +struct devad_disk { + char periph_name[16]; + unsigned int unit_number; + struct cam_device *dev; + struct devad_softc *softc; + uint32_t generation; + int gone; + + uint8_t read_buf[DEVAD_READ_BUF_SIZE]; + uint64_t cur_offset; + uint32_t secsize; + uint64_t max_sector; + union ccb *active_ccb; + + TAILQ_ENTRY(devad_disk) links; +}; + +struct devad_softc { + int num_disks; + int kq; + uint32_t xpt_generation; + uint64_t res_key; + pthread_mutex_t mutex; + TAILQ_HEAD(, devad_disk) disk_list; + STAILQ_HEAD(, devad_phy_args) phy_list; + STAILQ_HEAD(, devad_openclose_args) openclose_list; +}; + +__BEGIN_DECLS + +void devad_disk_remove(struct devad_softc *softc, struct devad_disk *disk); +int devad_disk_remove_device(struct devad_softc *softc, char *name, + unsigned int unit_number); +int devad_disk_probe_device(struct devad_softc *softc, char *name, + unsigned int unit_number); +int devad_disk_get_generation(uint32_t *generation); +int devad_disk_scan(struct devad_softc *softc, int print_devs); +void devad_disk_read_done(struct devad_disk *disk); + +__END_DECLS + +#endif /* __DEVAD2_H_ */ Index: tests/sys/devad2/devad2_cam.cc =================================================================== --- tests/sys/devad2/devad2_cam.cc +++ tests/sys/devad2/devad2_cam.cc @@ -0,0 +1,770 @@ +/* + * Copyright (c) 1997-2007 Kenneth D. Merry + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/*- + * Copyright (c) 2013, 2014 Spectra Logic Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "devad2.h" + +#define ERROR_BUF_LEN 512 +#define XPT_GEN_NAME "kern.cam.xpt_generation" + +/* Generically usefull offsets into the peripheral private area */ +#define ppriv_ptr0 periph_priv.entries[0].ptr +#define ppriv_ptr1 periph_priv.entries[1].ptr +#define ppriv_field0 periph_priv.entries[0].field +#define ppriv_field1 periph_priv.entries[1].field + +#define ccb_ptr ppriv_ptr0 + +void devad_disk_free(struct devad_disk *disk); + +int +devad_disk_read_start(struct devad_disk *disk, union ccb *ccb) +{ + int retval = 0; + + if (((disk->cur_offset / disk->secsize) + + (sizeof(disk->read_buf) / disk->secsize)) > disk->max_sector + 1) { + /* + * We're at the end of the disk. + */ + goto bailout; + } + if (ccb == NULL) { + ccb = cam_getccb(disk->dev); + if (ccb == NULL) + err(1, "Unable to allocate CCB"); + } + + bzero(&(&ccb->ccb_h)[1], + sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + + scsi_read_write(&ccb->csio, + /*retries*/ 2, + /*cbfcnp*/ NULL, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*read*/ SCSI_RW_READ, + /*byte2*/ 0, + /*minimum_cmd_size*/ 0, + /*lba*/ disk->cur_offset / disk->secsize, + /*block_count*/ sizeof(disk->read_buf) / disk->secsize, + /*data_ptr*/ disk->read_buf, + /*dxfer_len*/ sizeof(disk->read_buf), + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ 60000); + + ccb->ccb_h.flags |= CAM_DEV_QFRZDIS | CAM_PASS_ERR_RECOVER; + /* Save a pointer to the original CCB so that we can free it */ + ccb->ccb_h.ccb_ptr = ccb; + disk->active_ccb = ccb; + + if (ioctl(disk->dev->fd, CAMIOQUEUE, ccb) == -1) { + warn("error sending READ to %s%u", disk->periph_name, + disk->unit_number); + cam_freeccb(ccb); + } +bailout: + return (retval); +} + +void +devad_disk_read_done(struct devad_disk *disk) +{ + union ccb *ccb; + int retval; + + ccb = (union ccb *)calloc(sizeof(*ccb), 1); + if (ccb == NULL) + return; + + /* + * Currently there should only be one outstanding I/O at a time, + * but we put this in a while loop just in case. + */ + while ((retval = ioctl(disk->dev->fd, CAMIOGET, ccb)) != -1) { + union ccb *orig_ccb; + + orig_ccb = (union ccb *)ccb->ccb_h.ccb_ptr; + + bcopy(ccb, orig_ccb, sizeof(*ccb)); + + if ((orig_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + char error_buf[ERROR_BUF_LEN]; + + cam_error_string(disk->dev, orig_ccb, error_buf, + sizeof(error_buf), CAM_ESF_ALL, CAM_EPF_ALL); + warnx("%s", error_buf); + cam_freeccb(orig_ccb); + disk->active_ccb = NULL; + } else { + disk->cur_offset += orig_ccb->csio.dxfer_len - + orig_ccb->csio.resid; + if (disk->gone == 0) + retval = devad_disk_read_start(disk, orig_ccb); + } + } + if (disk->gone != 0) + devad_disk_free(disk); + + free(ccb); +} + +int +devad_disk_read_cap(struct devad_disk *disk) +{ + struct scsi_read_capacity_data rcap; + struct scsi_read_capacity_data_long rcaplong; + union ccb *ccb = NULL; + uint64_t maxsector; + uint32_t block_len; + int retval = 0; + + ccb = cam_getccb(disk->dev); + if (ccb == NULL) { + warnx("%s: error allocating ccb", __func__); + retval = 1; + goto bailout; + } + + bzero(&(&ccb->ccb_h)[1], + sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + + scsi_read_capacity(&ccb->csio, + /*retries*/ 2, + /*cbfcnp*/ NULL, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + &rcap, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ 5000); + + ccb->ccb_h.flags |= CAM_DEV_QFRZDIS | CAM_PASS_ERR_RECOVER; + + if (cam_send_ccb(disk->dev, ccb) != 0) { + warn("error sending READ CAPACITY to %s%u", disk->periph_name, + disk->unit_number); + retval = 1; + goto bailout; + } + + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + cam_error_print(disk->dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, + stderr); + retval = 1; + goto bailout; + } + + maxsector = scsi_4btoul(rcap.addr); + block_len = scsi_4btoul(rcap.length); + + /* + * A last block of 2^32-1 means that the true capacity is over 2TB, + * and we need to issue the long READ CAPACITY to get the real + * capacity. Otherwise, we're done. + */ + if (maxsector != 0xffffffff) + goto bailout; + + scsi_read_capacity_16(&ccb->csio, + /*retries*/ 2, + /*cbfcnp*/ NULL, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*lba*/ 0, + /*reladdr*/ 0, + /*pmi*/ 0, + (uint8_t*)&rcaplong, + sizeof(rcaplong), + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ 5000); + + /* Disable freezing the device queue */ + ccb->ccb_h.flags |= CAM_DEV_QFRZDIS | CAM_PASS_ERR_RECOVER; + + if (cam_send_ccb(disk->dev, ccb) < 0) { + warn("error sending READ CAPACITY (16) command"); + retval = 1; + goto bailout; + } + + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + cam_error_print(disk->dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, + stderr); + retval = 1; + goto bailout; + } + + maxsector = scsi_8btou64(rcaplong.addr); + block_len = scsi_4btoul(rcaplong.length); + +bailout: + if (retval == 0) { + disk->secsize = block_len; + disk->max_sector = maxsector; + } + if (ccb != NULL) + cam_freeccb(ccb); + + return (retval); +} + +/* + * Free storage allocated to a disk and close any file descriptors. The + * disk should be removed from the list first. + */ +void +devad_disk_free(struct devad_disk *disk) +{ + cam_close_device(disk->dev); + disk->dev = NULL; + cam_freeccb(disk->active_ccb); + disk->active_ccb = NULL; + free(disk); +} + +/* + * Remove a disk from the list and free it. + */ +void +devad_disk_remove(struct devad_softc *softc, struct devad_disk *disk) +{ + pthread_mutex_lock(&softc->mutex); + TAILQ_REMOVE(&softc->disk_list, disk, links); + softc->num_disks--; + disk->gone = 1; + pthread_mutex_unlock(&softc->mutex); +} + +/* + * Given a peripheral name and unit number, find a disk if it is in the + * list. + * + * Returns a disk structure on success, NULL on failure. + */ +struct devad_disk * +devad_disk_find_dev(struct devad_softc *softc, char *name, + unsigned int unit_number) +{ + struct devad_disk *disk; + + pthread_mutex_lock(&softc->mutex); + TAILQ_FOREACH(disk, &softc->disk_list, links) { + if ((disk->unit_number == unit_number) + && (strcmp(name, disk->periph_name) == 0)) { + pthread_mutex_unlock(&softc->mutex); + return (disk); + } + } + pthread_mutex_unlock(&softc->mutex); + + return (NULL); +} + + +/* + * Given a peripheral name and unit number, find a disk in the list if it + * exists, or allocate a new disk if it does not exist. + * + * Returns a disk structure on success, NULL on failure. + */ +struct devad_disk * +devad_disk_find_or_alloc(struct devad_softc *softc, char *name, + unsigned int unit_number) +{ + struct devad_disk *disk = NULL, *cur_disk = NULL; + struct cam_device *dev = NULL; + int retval = 0; + + dev = cam_open_spec_device(name, unit_number, O_RDWR, NULL); + if (dev == NULL) { + printf("%s\n", cam_errbuf); + retval = 1; + goto bailout; + } + + cur_disk = devad_disk_find_dev(softc, name, unit_number); + if (cur_disk != NULL) { + /* + * We already have another disk by the same name. Is this + * the same device? + */ + if ((dev->serial_num_len == 0) + && (cur_disk->dev->serial_num_len == 0)) { + /* + * No serial numbers, so no way to easily + * distinguish between these two. So we'll keep + * the current disk. + */ + disk = cur_disk; + cam_close_device(dev); + dev = disk->dev; + } else if (dev->serial_num_len != + cur_disk->dev->serial_num_len) { + /* + * These disks are different. The new one wins. + */ + devad_disk_remove(softc, cur_disk); + + } else if ((dev->serial_num_len != 0) + && (cur_disk->dev->serial_num_len != 0) + && (cur_disk->dev->serial_num_len == dev->serial_num_len) + && (bcmp(dev->serial_num, cur_disk->dev->serial_num, + dev->serial_num_len) == 0)) { + /* + * These disks are the same. The current one wins. + */ + disk = cur_disk; + cam_close_device(dev); + dev = disk->dev; + } else if ((dev->serial_num_len == 0) + || (cur_disk->dev->serial_num_len == 0)) { + /* + * The disk without a serial number loses. + */ + if (dev->serial_num_len == 0) { + disk = cur_disk; + cam_close_device(dev); + dev = disk->dev; + } else { + devad_disk_remove(softc, cur_disk); + } + } else { + /* + * These disks are different. This means that + * another device has appeared at the same name. + * We need to remove the existing device. + */ + devad_disk_remove(softc, cur_disk); + } + } + + if (disk == NULL) { + struct kevent ke; + disk = (struct devad_disk *)malloc(sizeof(*disk)); + if (disk == NULL) { + warn("unable to allocate %zu bytes", + sizeof(*disk)); + retval = 1; + goto bailout; + } + bzero(disk, sizeof(*disk)); + strlcpy(disk->periph_name, name, sizeof(disk->periph_name)); + disk->unit_number = unit_number; + disk->dev = dev; + disk->softc = softc; + /* + * This is just a placeholder to avoid divide by zero + * problems. We'll issue a READ CAPACITY shortly. + */ + disk->secsize = 512; + EV_SET(&ke, dev->fd, EVFILT_READ, EV_ADD| EV_ENABLE, 0, 0, + disk); + /* + * Add a read event for this device. It will be deleted + * automatically when the file descriptor is closed. + */ + if (kevent(softc->kq, &ke, 1, NULL, 0, NULL) == -1) { + /* + * XXX KDM should we just bail out here? We'll be + * able to queue commands but won't receive + * notification of + */ + warn("unable to register kevent!"); + } + pthread_mutex_lock(&softc->mutex); + TAILQ_INSERT_TAIL(&softc->disk_list, disk, links); + softc->num_disks++; + pthread_mutex_unlock(&softc->mutex); + } +bailout: + return (disk); +} + +int +devad_disk_remove_device(struct devad_softc *softc, char *name, + unsigned int unit_number) +{ + struct devad_disk *disk = NULL; + + disk = devad_disk_find_dev(softc, name, unit_number); + if (disk == NULL) + return (0); + + devad_disk_remove(softc, disk); + + return (0); +} + +/* + * Probe the given device, and add it to the list of disks. + * + * Returns 0 for success, non-zero for failure. + */ +int +devad_disk_probe_device(struct devad_softc *softc, char *name, + unsigned int unit_number) +{ + int retval = 0; + struct devad_disk *disk = NULL; + + /* + * See if we already have a device with the same name, and if so, + * whether it is unchanged. In any case, pass back a pointer to + * the new or existing disk structure. + */ + disk = devad_disk_find_or_alloc(softc, name, unit_number); + if (disk == NULL) { + retval = 1; + goto bailout; + } + + retval = devad_disk_read_cap(disk); + + /* + * Mark the current XPT generation, so we know that this disk is + * currently active. + */ + disk->generation = softc->xpt_generation; + + /* + * Starting offset for reads. + */ + disk->cur_offset = 0; + + retval = devad_disk_read_start(disk, NULL); + +bailout: + return (retval); +} + + +/* + * Given a completed XPT_DEV_MATCH CCB, probe the returned devices and + * optionally print them. + * + * Returns 0 for success, non-zero for failure. + */ +static int +devad_disk_process_matches(struct devad_softc *softc, union ccb *ccb, + int *need_close, int print_devs) +{ + unsigned int i; + int retval = 0; + + for (i = 0; i < ccb->cdm.num_matches; i++) { + switch (ccb->cdm.matches[i].type) { + case DEV_MATCH_DEVICE: { + struct device_match_result *dev_result; + uint8_t vendor[16], product[48], revision[16]; + char tmpstr[256]; + + dev_result = &ccb->cdm.matches[i].result.device_result; + + if (dev_result->protocol != PROTO_SCSI) + break; + + cam_strvis(vendor, + (const uint8_t *)dev_result->inq_data.vendor, + sizeof(dev_result->inq_data.vendor),sizeof(vendor)); + cam_strvis(product, + (const uint8_t *) dev_result->inq_data.product, + sizeof(dev_result->inq_data.product), + sizeof(product)); + cam_strvis(revision, + (const uint8_t *)dev_result->inq_data.revision, + sizeof(dev_result->inq_data.revision), + sizeof(revision)); + + if (print_devs == 0) + break; + + if (*need_close != 0) { + fprintf(stdout, ")\n"); + *need_close = 0; + } + + sprintf(tmpstr, "<%s %s %s>", vendor, product,revision); + fprintf(stdout, "%-33s at scbus%d target %d lun %jd (", + tmpstr, dev_result->path_id, + dev_result->target_id, + (intmax_t)dev_result->target_lun); + + *need_close = 1; + + break; + } + case DEV_MATCH_PERIPH: { + struct periph_match_result *periph_result; + + periph_result = + &ccb->cdm.matches[i].result.periph_result; + + retval = devad_disk_probe_device(softc, + periph_result->periph_name, + periph_result->unit_number); + if (retval != 0) + goto bailout; + + if (print_devs == 0) + break; + + if (*need_close > 1) + fprintf(stdout, ","); + + fprintf(stdout, "%s%d", + periph_result->periph_name, + periph_result->unit_number); + + (*need_close)++; + break; + } + default: + break; + } + } + +bailout: + + return (retval); +} + +/* + * Get the current CAM transport layer generation. The generation number + * is incremented every time a device or peripheral is added or removed. + * So if it is unchanged since the last check, the topology is unchanged. + * + * Returns 0 for success, non-zero for failure. + */ +int +devad_disk_get_generation(uint32_t *generation) +{ + int retval = 0; + uint32_t tmp_generation; + size_t gen_size; + + gen_size = sizeof(tmp_generation); + + retval = sysctlbyname(XPT_GEN_NAME, &tmp_generation, &gen_size,NULL, 0); + if (retval != 0) { + warn("sysctlbyname for %s failed", XPT_GEN_NAME); + return (retval); + } + + /* + * We assume that the generation is a uint32_t. If it isn't, we've + * got a problem. + */ + if (gen_size != sizeof(*generation)) { + warnx("XPT generation size %zu bytes != assumed size %zu bytes", + gen_size, sizeof(*generation)); + retval = 1; + } else { + *generation = tmp_generation; + } + + return (retval); +} + + +/* + * Scan all disks in the system and add them to our list. Remove any disks + * that are no longer present. + * + * Returns 0 for success, non-zero for failure. + */ +int +devad_disk_scan(struct devad_softc *softc, int print_devs) +{ + int fd = -1, retval = 0; + int need_close = 0; + ssize_t bufsize; + union ccb ccb; + struct dev_match_pattern patterns[2]; + + bzero(&ccb, sizeof(ccb)); + + retval = devad_disk_get_generation(&softc->xpt_generation); + if (retval != 0) + goto bailout; + + /* + * Since we don't know yet which devices are in the system, open + * the XPT device to send the device match CCB. + */ + fd = open(XPT_DEVICE, O_RDWR); + if (fd == -1) { + warn("cannot open %s", XPT_DEVICE); + retval = 1; + goto bailout; + } + + ccb.ccb_h.path_id = CAM_XPT_PATH_ID; + ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; + ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; + + ccb.ccb_h.func_code = XPT_DEV_MATCH; + /* + * We size the buffer for 100 matches at a time. + */ + bufsize = sizeof(struct dev_match_result) * 100; + ccb.cdm.match_buf_len = bufsize; + ccb.cdm.matches = (struct dev_match_result *)malloc(bufsize); + if (ccb.cdm.matches == NULL) { + warn("unable to malloc %zd bytes", bufsize); + retval = 1; + goto bailout; + } + + ccb.cdm.num_matches = 0; + + bzero(patterns, sizeof(patterns)); + + ccb.cdm.num_patterns = 2; + ccb.cdm.pattern_buf_len = sizeof(patterns); + ccb.cdm.patterns = patterns; + /* + * Match all da(4) peripheral drivers. + */ + patterns[0].type = DEV_MATCH_PERIPH; + snprintf(patterns[0].pattern.periph_pattern.periph_name, + sizeof(patterns[0].pattern.periph_pattern.periph_name), "da"); + patterns[0].pattern.periph_pattern.flags = PERIPH_MATCH_NAME; + /* + * And match all direct access devices. + */ + patterns[1].type = DEV_MATCH_DEVICE; + patterns[1].pattern.device_pattern.flags = DEV_MATCH_INQUIRY; + patterns[1].pattern.device_pattern.data.inq_pat.type = T_DIRECT; + patterns[1].pattern.device_pattern.data.inq_pat.media_type = + SIP_MEDIA_FIXED; + snprintf(patterns[1].pattern.device_pattern.data.inq_pat.vendor, + sizeof(patterns[1].pattern.device_pattern.data.inq_pat.vendor), + "*"); + snprintf(patterns[1].pattern.device_pattern.data.inq_pat.product, + sizeof(patterns[1].pattern.device_pattern.data.inq_pat.product), + "*"); + snprintf(patterns[1].pattern.device_pattern.data.inq_pat.revision, + sizeof(patterns[1].pattern.device_pattern.data.inq_pat.revision), + "*"); + + do { + if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { + warn("error sending CAMIOCOMMAND ioctl to %s", + XPT_DEVICE); + retval = 1; + goto bailout; + } + + if ((ccb.ccb_h.status != CAM_REQ_CMP) + || ((ccb.cdm.status != CAM_DEV_MATCH_LAST) + && (ccb.cdm.status != CAM_DEV_MATCH_MORE))) { + warnx("got CAM error %#x, CDM error %d", + ccb.ccb_h.status, ccb.cdm.status); + retval = 1; + goto bailout; + } + + retval = devad_disk_process_matches(softc, &ccb, &need_close, + print_devs); + + } while ((ccb.ccb_h.status == CAM_REQ_CMP) + && (ccb.cdm.status == CAM_DEV_MATCH_MORE)); + +bailout: + + if ((print_devs != 0) + && (need_close != 0)) + fprintf(stdout, ")\n"); + + if (fd != -1) + close(fd); + if (ccb.cdm.matches != NULL) + free(ccb.cdm.matches); + + return (retval); +} + Index: tests/sys/devad2/devad2_devctl.h =================================================================== --- tests/sys/devad2/devad2_devctl.h +++ tests/sys/devad2/devad2_devctl.h @@ -0,0 +1,96 @@ +/*- + * Copyright (c) 2014 Spectra Logic Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ +#ifndef __DEVAD2_DEVCTL_H__ +#define __DEVAD2_DEVCTL_H__ + +/*============================= Class Devinitions ============================*/ +/*------------------------------- EventConsumer ------------------------------*/ +class EventConsumer : public DevdCtl::Consumer +{ +public: + /** Return the HpdEventConsumer singleton. */ + static EventConsumer &Get(); + + static int Init(struct devad_softc *softc); + static struct devad_softc *GetSoftc(); +private: + EventConsumer(); + + virtual ~EventConsumer(); + + static EventConsumer *s_theEventConsumer; + static DevdCtl::EventFactory::Record s_registryEntries[]; + static struct devad_softc *s_softc; +}; + +//- HpdEventConsumer Static Inline Public Methods ------------------------------ +inline +EventConsumer & +EventConsumer::Get() +{ + return (*s_theEventConsumer); +} + +inline +struct devad_softc * +EventConsumer::GetSoftc() +{ + return (s_softc); +} + +/*--------------------------------- HpdEvent ---------------------------------*/ +class Event : public DevdCtl::Event +{ +public: + /** Specialized Event object factor for HPD events. */ + static BuildMethod Builder; + + virtual bool Process() const; + +protected: + /** Constructor */ + Event(DevdCtl::Event::Type, DevdCtl::NVPairMap &, const std::string &); +}; +/*--------------------------------- DevfsEvent -------------------------------*/ +class DevfsEvent : public DevdCtl::DevfsEvent +{ +public: + static BuildMethod Builder; + + virtual bool Process() const; + +protected: + DevfsEvent(DevdCtl::Event::Type, DevdCtl::NVPairMap &, + const std::string &, struct devad_softc *softc); + + static struct devad_softc *s_softc; +}; +#endif Index: tests/sys/devad2/devad2_devctl.cc =================================================================== --- tests/sys/devad2/devad2_devctl.cc +++ tests/sys/devad2/devad2_devctl.cc @@ -0,0 +1,189 @@ +/*- + * Copyright (c) 2014 Spectra Logic Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "devad2.h" +#include "devad2_devctl.h" + +/*============================ Namespace Control =============================*/ +using DevdCtl::EventFactory; +using std::string; + +/*------------------------------- EventConsumer ------------------------------*/ +//- EventConsumer Static Private Data ------------------------------------------ +EventConsumer* EventConsumer::s_theEventConsumer; +EventFactory::Record EventConsumer::s_registryEntries[] = +{ + { DevdCtl::Event::NOTIFY, "IFNET", &Event::Builder }, + { DevdCtl::Event::NOTIFY, "CAM", &Event::Builder }, + { DevdCtl::Event::NOTIFY, "DEVFS", &DevfsEvent::Builder } +}; +struct devad_softc *EventConsumer::s_softc; + +//- EventConsumer Static Public Methods ---------------------------------------- +int +EventConsumer::Init(struct devad_softc *softc) +{ + s_theEventConsumer = new EventConsumer(); + s_softc = softc; + + return (0); +} + +//- EventConsumer Private Methods ---------------------------------------------- +EventConsumer::EventConsumer() + : Consumer(/*default*/&Event::Builder, + s_registryEntries, + nitems(s_registryEntries)) +{ + if (s_theEventConsumer != NULL) + errx(1, "Multiple HpdEventConsumer instances created. Exiting"); + + s_theEventConsumer = this; +} + +EventConsumer::~EventConsumer() +{ +} + +/*---------------------------------- Event -----------------------------------*/ +//- Event Static Public Methods ------------------------------------------------ +DevdCtl::Event * +Event::Builder(DevdCtl::Event::Type type, DevdCtl::NVPairMap &nvpairs, + const string &eventString) +{ + return (new Event(type, nvpairs, eventString)); +} + +//- Event Virtual Public Methods ----------------------------------------------- +bool +Event::Process() const +{ + return (DevdCtl::Event::Process()); +} + +//- Event Protected Methods ---------------------------------------------------- +Event::Event(Event::Type type, DevdCtl::NVPairMap &nvpairs, + const string &eventString) + : DevdCtl::Event(type, nvpairs, eventString) +{ +} + +/*---------------------------------- DevfsEvent ---------------------------*/ +//- DevfsEvent Static Private Data ------------------------------------------ +struct devad_softc *DevfsEvent::s_softc; + +DevdCtl::Event * +DevfsEvent::Builder(DevdCtl::Event::Type type, DevdCtl::NVPairMap &nvpairs, + const string &eventString) +{ + return (new DevfsEvent(type, nvpairs, eventString, + EventConsumer::GetSoftc())); +} + +//- DevfsEvent Protected Methods -------------------------------------------- +DevfsEvent::DevfsEvent(Event::Type type, DevdCtl::NVPairMap &nvpairs, + const string &eventString, struct devad_softc *softc) + : DevdCtl::DevfsEvent(type, nvpairs, eventString) +{ + s_softc = softc; +} + +//- DevfsEvent Virtual Public Methods --------------------------------------- +bool +DevfsEvent::Process() const +{ + std::string device_name; + char tmpstr[32]; + int unit_number, is_add = 0; + int retval; + + if (!IsDiskDev()) + return (false); + + if (!IsWholeDev()) + return (false); + + device_name = Value("cdev"); + if (device_name.empty()) + return (false); + + retval = cam_get_device(device_name.c_str(), tmpstr, + sizeof(tmpstr), &unit_number); + if (retval != 0) + return (false); + + if (strcmp(tmpstr, "da") != 0) + return (false); + + if (Value("type") == "CREATE") { + is_add = 1; + } else if (Value("type") == "DESTROY") { + is_add = 0; + } else { + return (false); + } + + fprintf(stdout, "%s %s%u\n", (is_add) ? "Adding" : "Removing", + tmpstr, unit_number); + + if (is_add) + retval = devad_disk_probe_device(s_softc, tmpstr, unit_number); + else + retval = devad_disk_remove_device(s_softc, tmpstr, unit_number); + + if (retval == 0) + return (true); + else + return (false); +} Index: tests/sys/devad2/devad2_main.cc =================================================================== --- tests/sys/devad2/devad2_main.cc +++ tests/sys/devad2/devad2_main.cc @@ -0,0 +1,571 @@ +/*- + * Copyright (c) 2013, 2014 Spectra Logic Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +/* + * Spectra-specific inclusion of timespec*() + * Please see the mailing list thread regarding exposure: + * https://lists.freebsd.org/pipermail/svn-src-head/2013-February/045210.html + */ +#define SPECTRA_TIMESPEC 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "devad2.h" +#include "devad2_devctl.h" + +#ifndef timespeccmp +#define timespeccmp(tvp, uvp, cmp) \ + (((tvp)->tv_sec == (uvp)->tv_sec) ? \ + ((tvp)->tv_nsec cmp (uvp)->tv_nsec) : \ + ((tvp)->tv_sec cmp (uvp)->tv_sec)) +#endif + +#ifndef timespecadd +#define timespecadd(vvp, uvp) \ + do { \ + (vvp)->tv_sec += (uvp)->tv_sec; \ + (vvp)->tv_nsec += (uvp)->tv_nsec; \ + if ((vvp)->tv_nsec >= 1000000000) { \ + (vvp)->tv_sec++; \ + (vvp)->tv_nsec -= 1000000000; \ + } \ + } while (0) +#endif + +#ifndef timespecsub +#define timespecsub(vvp, uvp) \ + do { \ + (vvp)->tv_sec -= (uvp)->tv_sec; \ + (vvp)->tv_nsec -= (uvp)->tv_nsec; \ + if ((vvp)->tv_nsec < 0) { \ + (vvp)->tv_sec--; \ + (vvp)->tv_nsec += 1000000000; \ + } \ + } while (0) +#endif + + +void * +devad_openclose_thread(void *arg) +{ + struct devad_openclose_args *args; + int retval; + + args = (struct devad_openclose_args *)arg; + + while ((args->flags & DEVAD_OC_FLAG_STOP) == 0) { + struct cam_device *dev; + +#if 0 + fprintf(stdout, "opening %s\n", args->cam_device); +#endif + dev = cam_open_device(args->cam_device, O_RDWR); + if (dev == NULL) { + warnx("Can't open %s: %s", args->cam_device, + cam_errbuf); + goto bailout; + } + + if (args->cycle_interval != 0) { + retval = sleep(args->cycle_interval); + if (retval != 0) { + warn("sleep returned an error"); + goto bailout; + } + } + +#if 0 + fprintf(stdout, "closing %s\n", args->cam_device); +#endif + cam_close_device(dev); + } +bailout: + return (NULL); +} + +int +devad_phy_cycle(struct devad_phy_args *args, int on) +{ + struct smp_phy_control_request *request = NULL; + struct smp_phy_control_response *response = NULL; + struct cam_device *dev = NULL; + int retval = 0; + union ccb *ccb = NULL; + + dev = cam_open_device(args->enc_device, O_RDWR); + if (dev == NULL) { + warnx("Unable to open device %s: %s", args->enc_device, + cam_errbuf); + retval = 1; + goto bailout; + } + + fprintf(stdout, "Turning %s %d %s\n", args->enc_device, args->phy, + on ? "on" : "off"); + + ccb = cam_getccb(dev); + if (ccb == NULL) { + warnx("%s: error allocating CCB to send to %s", + __func__, args->enc_device); + retval = 1; + goto bailout; + } + bzero(&(&ccb->ccb_h)[1], + sizeof(union ccb) - sizeof(struct ccb_hdr)); + + request = (struct smp_phy_control_request *)malloc(sizeof(*request)); + if (request == NULL) { + warn("Unable to allocate %zu bytes", sizeof(*request)); + retval = 1; + goto bailout; + } + + response = (struct smp_phy_control_response *)malloc(sizeof(*response)); + if (response == NULL) { + warn("Unable to allocate %zu bytes", sizeof(*response)); + retval = 1; + goto bailout; + } + bzero(request, sizeof(*request)); + bzero(response, sizeof(*response)); + + smp_phy_control(&ccb->smpio, + /*retries*/ 0, + /*cbfcnp*/ NULL, + /*request*/ request, + /*request_len*/ sizeof(*request), + /*response*/ (uint8_t *)response, + /*response_len*/ sizeof(*response), + /*long_response*/ 0, + /*expected_exp_change_count*/ 0, + /*phy*/ args->phy, + /*phy_operation*/ (on != 0) ? SMP_PC_PHY_OP_LINK_RESET : + SMP_PC_PHY_OP_DISABLE, + /*update_pp_timeout_val*/ 0, + /*attached_device_name*/ 0, + /*prog_min_prl*/ 0, + /*prog_max_prl*/ 0, + /*slumber_partial*/ 0, + /*pp_timeout_value*/ 0, + /*timeout*/ 5000); + + retval = cam_send_ccb(dev, ccb); + if (retval != 0) { + warn("error sending SMP phy control command to %s", + args->enc_device); + } + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_NORMAL, stderr); + retval = 1; + } +bailout: + free(request); + free(response); + if (ccb != NULL) + cam_freeccb(ccb); + if (dev != NULL) + cam_close_device(dev); + return (retval); +} + +void * +devad_phy_thread(void *arg) +{ + struct devad_phy_args *args; + uint32_t startup_sleep; + int retval; + + args = (struct devad_phy_args *)arg; + + startup_sleep = arc4random() % 10; + + retval = sleep(startup_sleep); + + while ((args->flags & DEVAD_PHY_FLAG_STOP) == 0) { + retval = devad_phy_cycle(args, /*on*/ 0); + + if (retval != 0) + goto bailout; + + retval = sleep(args->cycle_interval); + if (retval != 0) { + warnx("%s: got signal while waiting", args->enc_device); + goto bailout; + } + + retval = devad_phy_cycle(args, /*on*/ 1); + if (retval != 0) + goto bailout; + + retval = sleep(args->cycle_interval); + if (retval != 0) { + warnx("%s: got signal while waiting", args->enc_device); + goto bailout; + } + } +bailout: + return (NULL); +} + +void +usage(void) +{ + printf("devad <-p enc,phy> [-t secs] [-i sleep_interval] [-o dev] " + "[-I openclose_interval]\n"); +} + +int +main(int argc, char **argv) +{ + struct devad_softc *softc; + struct devad_phy_args *phy_arg; + struct devad_openclose_args *openclose_arg; + struct devad_disk *disk; + struct kevent ke[10], ke_set; + struct timespec end_time; + uint64_t res_key = 0; + int num_phys = 0, num_openclose = 0, num_events; + int sleep_interval = 5; + int openclose_interval = 0; + int timed_run = 0; + int retval; + int i, c; + + softc = (struct devad_softc *)malloc(sizeof(*softc)); + if (softc == NULL) + err(1, "Unable to allocate %zu bytes", sizeof(*softc)); + + bzero(softc, sizeof(*softc)); + TAILQ_INIT(&softc->disk_list); + STAILQ_INIT(&softc->phy_list); + STAILQ_INIT(&softc->openclose_list); + pthread_mutex_init(&softc->mutex, NULL); + + while ((c = getopt(argc, argv, "i:I:o:p:t:")) != -1) { + switch (c) { + case 'i': + case 'I': { + char *endptr; + int tmp_interval; + + tmp_interval = strtol(optarg, &endptr, 0); + if (*endptr != '\0') + errx(1, "Invalid sleep interval %s", optarg); + else if (tmp_interval < 0) + errx(1, "Negative sleep interval %d not valid", + tmp_interval); + + if (c == 'i') + sleep_interval = tmp_interval; + else + openclose_interval = tmp_interval; + break; + } + case 'o': { + char *tmpstr; + + openclose_arg = (struct devad_openclose_args *) + malloc(sizeof(*openclose_arg)); + if (openclose_arg == NULL) + err(1, "Unable to malloc %zu bytes", + sizeof(*openclose_arg)); + bzero(openclose_arg, sizeof(*openclose_arg)); + strlcpy(openclose_arg->cam_device, optarg, + sizeof(openclose_arg->cam_device)); + STAILQ_INSERT_TAIL(&softc->openclose_list, + openclose_arg, links); + num_openclose++; + break; + } + case 'p': { + char *tmpstr, *tmpstr2, *endptr; + + phy_arg = (struct devad_phy_args *)malloc( + sizeof(*phy_arg)); + if (phy_arg == NULL) + err(1, "Unable to malloc %zu bytes", + sizeof(*phy_arg)); + + bzero(phy_arg, sizeof(*phy_arg)); + tmpstr = strdup(optarg); + if (tmpstr == NULL) + err(1, "Unable to allocate PHY argument " + "storage"); + tmpstr2 = strsep(&tmpstr, ","); + if ((tmpstr2 == NULL) || (*tmpstr2 == '\0')) + errx(1, "Invalid PHY argument %s", optarg); + strlcpy(phy_arg->enc_device, tmpstr2, + sizeof(phy_arg->enc_device)); + tmpstr2 = strsep(&tmpstr, ","); + if ((tmpstr2 == NULL) || (*tmpstr2 == '\0')) + errx(1, "Invalid PHY argument %s", optarg); + phy_arg->phy = strtol(tmpstr2, &endptr, 0); + if (*endptr != '\0') + errx(1, "Invalid PHY number %s", tmpstr2); + else if (phy_arg->phy < 0) + errx(1, "Invalid PHY number %d", phy_arg->phy); + + STAILQ_INSERT_TAIL(&softc->phy_list, phy_arg, links); + num_phys++; + break; + } + case 't': { + unsigned long execute_secs; + struct timespec add_time; + char *endptr; + + execute_secs = strtoul(optarg, &endptr, 0); + if (*endptr != '\0') + errx(1, "Invalid time limit %s", optarg); + retval = clock_gettime(CLOCK_MONOTONIC_FAST, &end_time); + if (retval != 0) + err(1, "Unable to get current time"); + add_time.tv_sec = execute_secs; + add_time.tv_nsec = 0; + timespecadd(&end_time, &add_time); + timed_run = 1; + break; + } + default: + break; + } + } + + if (num_phys == 0) { + warnx("you must specify at least one PHY"); + usage(); + exit (1); + } + + softc->res_key = res_key; + softc->kq = kqueue(); + if (softc->kq == -1) + err(1, "Unable to create kqueue"); + + /* + * Initial disk scan. Figure out what disks are here before we + * start cycling PHYs. + */ + retval = devad_disk_scan(softc, /*print_devs*/ 0); + if (retval != 0) + errx(1, "Initial disk scan failed"); + + /* + * Register for devd events. + */ + EventConsumer::Init(softc); + + if (!EventConsumer::Get().Connected()) + EventConsumer::Get().ConnectToDevd(); + + if (!EventConsumer::Get().Connected()) + errx(1, "Not connected to devd, cannot run test"); + + + bzero(&ke_set, sizeof(&ke_set)); + EV_SET(&ke_set, EventConsumer::Get().GetPollFd(), + EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0); + if (kevent(softc->kq, &ke_set, 1, NULL, 0, NULL) == -1) + err(1, "error registering kevent"); + + /* + * Start the PHY cycling threads. + */ + STAILQ_FOREACH(phy_arg, &softc->phy_list, links) { + + phy_arg->cycle_interval = sleep_interval; + + retval = pthread_create(&phy_arg->thread_data, NULL, + devad_phy_thread, phy_arg); + if (retval != 0) + err(1, "Unable to create PHY cycle thread"); + } + + /* + * Start the open/close threads. + */ + STAILQ_FOREACH(openclose_arg, &softc->openclose_list, links) { + openclose_arg->cycle_interval = openclose_interval; + + retval = pthread_create(&openclose_arg->thread_data, NULL, + devad_openclose_thread, + openclose_arg); + + if (retval != 0) + err(1, "Unable to create open/close thread"); + } + + /* + * Look for events and process them. + */ + for (;;) { + if (timed_run != 0) { + struct timespec cur_time; + + retval = clock_gettime(CLOCK_MONOTONIC_FAST, &cur_time); + if (retval != 0) + err(1, "clock_gettime(2) failed"); + + if (timespeccmp(&cur_time, &end_time, >)) + break; + } + + num_events = kevent(softc->kq, NULL, 0, ke, + sizeof(ke) / sizeof(ke[0]), NULL); + if (num_events == -1) + err(1, "Error returned from kevent"); + + for (i = 0; i < num_events; i++) { + if (ke[i].ident == EventConsumer::Get().GetPollFd()) + EventConsumer::Get().ProcessEvents(); + else { + struct devad_disk *disk, *disk2; + + disk = (struct devad_disk *)ke[i].udata; + + devad_disk_read_done(disk); + } + } + + } + + /* + * Cancel the PHY cycling threads. + */ + STAILQ_FOREACH(phy_arg, &softc->phy_list, links) { + +#if 0 + retval = pthread_cancel(phy_arg->thread_data); + if (retval != 0) + warn("Unable to cancel thread for PHY %d on %s", + phy_arg->phy, phy_arg->enc_device); +#endif + phy_arg->flags |= DEVAD_PHY_FLAG_STOP; + } + + /* + * Cancel the open/close threads. + */ + STAILQ_FOREACH(openclose_arg, &softc->openclose_list, links) { +#if 0 + retval = pthread_cancel(openclose_arg->thread_data); + if (retval != 0) + warn("Unable to cancel open/close thread for %s", + openclose_arg->cam_device); +#endif + openclose_arg->flags |= DEVAD_OC_FLAG_STOP; + } + + /* + * Wait for the PHY cycling threads to exit. + */ + STAILQ_FOREACH(phy_arg, &softc->phy_list, links) { + + retval = pthread_join(phy_arg->thread_data, NULL); + if (retval != 0) + warn("Unable to join thread for PHY %d on %s", + phy_arg->phy, phy_arg->enc_device); + } + + /* + * Wait for the open/close threads to exit. + */ + STAILQ_FOREACH(openclose_arg, &softc->openclose_list, links) { + retval = pthread_join(openclose_arg->thread_data, NULL); + if (retval != 0) + warn("Unable to join open/close thread for %s", + openclose_arg->cam_device); + } + + for (disk = TAILQ_FIRST(&softc->disk_list); disk != NULL; + disk = TAILQ_FIRST(&softc->disk_list)) { + devad_disk_remove(softc, disk); + } + + for (phy_arg = STAILQ_FIRST(&softc->phy_list); phy_arg != NULL; + phy_arg = STAILQ_FIRST(&softc->phy_list)) { + STAILQ_REMOVE_HEAD(&softc->phy_list, links); + free(phy_arg); + } + + for (openclose_arg = STAILQ_FIRST(&softc->openclose_list); + openclose_arg != NULL; + openclose_arg = STAILQ_FIRST(&softc->openclose_list)) { + STAILQ_REMOVE_HEAD(&softc->openclose_list, links); + free(openclose_arg); + } + + exit (0); +} Index: tests/sys/devad2/devad2_test.sh =================================================================== --- tests/sys/devad2/devad2_test.sh +++ tests/sys/devad2/devad2_test.sh @@ -0,0 +1,136 @@ +# +# Copyright (c) 2014 Spectra Logic Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions, and the following disclaimer, +# without modification. +# 2. Redistributions in binary form must reproduce at minimum a disclaimer +# substantially similar to the "NO WARRANTY" disclaimer below +# ("Disclaimer") and any redistribution must be conditioned upon +# including a substantially similar Disclaimer requirement for further +# binary redistribution. +# +# NO WARRANTY +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGES. +# +# Authors: Ken Merry (Spectra Logic Corporation) +# +# $FreeBSD$ + +# Default list of enclosure prcoessors on expanders. This is a space +# separated list of just the peripheral name and unit number. e.g.: +# "enc0 enc1 enc2" +export EXPANDER_LIST=${EXPANDER_LIST:-enc0} + +# Default runtime. 5 minutes should be long enough to cause a panic if +# enough disks are available. +export PHY_CYCLE_RUNTIME=300 + +# The test will take 10 seconds or so to exit after the runtime above +# expires. This is to insure that the ATF timeout is long enough to allow +# it to clean up. +export PHY_CYCLE_TIMEOUT=360 + +# Lookup the geom mode of a disk. Will return something like r1w1e1 +# $1: geom name of disk. eg "da1" +get_disk_mode() +{ + geom disk list "$1" | awk '/Mode:/ {print $2}' +} + +run_test() +{ + TEST_SECS=$1 + + FREE_PHYDEVS="" + + # Go through the list of expanders and find all of the disks that + # aren't opened exclusively. Record their expander and PHY number. + for EXP in ${EXPANDER_LIST} + do + PHYDEVS=`camcontrol smpphylist ${EXP} -q | awk ' + $NF ~ /[(,]da[0-9]+[),]/ { + match($NF, "da[0-9]+"); + print $1 "," substr($NF, RSTART, RLENGTH) + }'` + + for pair in $PHYDEVS; do + # Exclude disks that are opened in exclusive mode + # from testing + # XXX Due to ZFS not opening disks in exclusive mode + # (BUG25571) this test won't detect disks being used + # by ZFS, only UFS or other filesystems. + phy=`echo $pair | cut -d , -f 1` + dev=`echo $pair | cut -d , -f 2` + if get_disk_mode $dev | grep -q 'e0'; then + ENC_PHY="${EXP},${phy}" + FREE_PHYDEVS="$FREE_PHYDEVS $ENC_PHY" + fi + done + done + + num_freephys=`echo $FREE_PHYDEVS | wc -w` + if [ $num_freephys -le 0 ]; then + atf_skip "No PHYs detected on expander ${EXPANDER}, cannot run test" + fi + + DEVAD_PHYS="" + + for i in ${FREE_PHYDEVS} + do + DEVAD_PHYS="$DEVAD_PHYS -p $i" + done + + SRC_DIR=$(atf_get_srcdir) + + $SRC_DIR/devad2 -t $TEST_SECS $DEVAD_PHYS + if [ $? -eq 0 ]; then + atf_pass + else + atf_fail "devad2 test had non-zero exit status" + fi + +} + +atf_test_case phy_cycle_and_read +phy_cycle_and_read_head() +{ + EXP_REQ="" + for i in ${EXPANDER_LIST} + do + EXP_REQ="$EXP_REQ /dev/${i}" + done + + atf_set "descr" "Test that cycles PHYs and reads for 5 minutes" + atf_set "require.files" "${EXP_REQ}" + atf_set "require.user" "root" + atf_set "require.config" "allow_devfs_side_effects" + atf_set "timeout" $PHY_CYCLE_TIMEOUT +} + +phy_cycle_and_read_body() +{ + run_test $PHY_CYCLE_RUNTIME +} + +# +# ATF Test Program Initialization +# +atf_init_test_cases() +{ + atf_add_test_case phy_cycle_and_read +}