Page MenuHomeFreeBSD

D53222.id164635.diff
No OneTemporary

D53222.id164635.diff

diff --git a/usr.sbin/bhyve/pci_virtio_scsi.c b/usr.sbin/bhyve/pci_virtio_scsi.c
--- a/usr.sbin/bhyve/pci_virtio_scsi.c
+++ b/usr.sbin/bhyve/pci_virtio_scsi.c
@@ -247,6 +247,15 @@
uint8_t sense[];
} __attribute__((packed));
+enum pci_vtscsi_walk {
+ PCI_VTSCSI_WALK_CONTINUE = 0,
+ PCI_VTSCSI_WALK_STOP,
+};
+
+typedef enum pci_vtscsi_walk pci_vtscsi_walk_t;
+typedef pci_vtscsi_walk_t pci_vtscsi_walk_request_queue_cb_t(
+ struct pci_vtscsi_queue *, struct pci_vtscsi_request *, void *);
+
static void *pci_vtscsi_proc(void *);
static void pci_vtscsi_reset(void *);
static void pci_vtscsi_neg_features(void *, uint64_t);
@@ -259,6 +268,18 @@
static inline int pci_vtscsi_get_lun(struct pci_vtscsi_softc *,
const uint8_t *);
+static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_abort_task;
+static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_abort_task_set;
+static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_clear_aca;
+static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_clear_task_set;
+static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_i_t_nexus_reset;
+static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_lun_reset;
+static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_query_task;
+static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_query_task_set;
+
+static pci_vtscsi_walk_t pci_vtscsi_walk_request_queue(
+ struct pci_vtscsi_queue *, pci_vtscsi_walk_request_queue_cb_t *, void *);
+
static void pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *, void *);
static void pci_vtscsi_an_handle(struct pci_vtscsi_softc *, void *);
static void pci_vtscsi_control_handle(struct pci_vtscsi_softc *, void *,
@@ -316,8 +337,10 @@
while (STAILQ_EMPTY(&q->vsq_requests) && !worker->vsw_exiting)
pthread_cond_wait(&q->vsq_cv, &q->vsq_rmtx);
- if (worker->vsw_exiting)
- break;
+ if (worker->vsw_exiting) {
+ pthread_mutex_unlock(&q->vsq_rmtx);
+ return (NULL);
+ }
req = pci_vtscsi_get_request(&q->vsq_requests);
pthread_mutex_unlock(&q->vsq_rmtx);
@@ -334,9 +357,6 @@
pci_vtscsi_return_request(q, req, iolen);
}
-
- pthread_mutex_unlock(&q->vsq_rmtx);
- return (NULL);
}
static void
@@ -445,6 +465,252 @@
return (((lun[2] << 8) | lun[3]) & 0x3fff);
}
+/*
+ * ABORT TASK: Abort the specifed task queued for this LUN.
+ *
+ * We can stop once we have found the specified task queued for this LUN.
+ */
+static pci_vtscsi_walk_t
+pci_vtscsi_tmf_handle_abort_task(struct pci_vtscsi_queue *q,
+ struct pci_vtscsi_request *req, void *arg)
+{
+ struct pci_vtscsi_ctrl_tmf *tmf = arg;
+
+ assert(tmf->subtype == VIRTIO_SCSI_T_TMF_ABORT_TASK);
+
+ if (pci_vtscsi_get_target(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_target(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ if (pci_vtscsi_get_lun(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_lun(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ if (tmf->id != req->vsr_cmd_rd->id)
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ req->vsr_cmd_wr->response = VIRTIO_SCSI_S_ABORTED;
+ STAILQ_REMOVE(&q->vsq_requests, req, pci_vtscsi_request, vsr_link);
+ pci_vtscsi_return_request(q, req, 0);
+
+ return (PCI_VTSCSI_WALK_STOP);
+}
+
+/*
+ * ABORT TASK SET: Abort all tasks queued for this LUN.
+ */
+static pci_vtscsi_walk_t
+pci_vtscsi_tmf_handle_abort_task_set(struct pci_vtscsi_queue *q,
+ struct pci_vtscsi_request *req, void *arg)
+{
+ struct pci_vtscsi_ctrl_tmf *tmf = arg;
+
+ assert(tmf->subtype == VIRTIO_SCSI_T_TMF_ABORT_TASK_SET);
+
+ if (pci_vtscsi_get_target(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_target(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ if (pci_vtscsi_get_lun(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_lun(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ req->vsr_cmd_wr->response = VIRTIO_SCSI_S_ABORTED;
+ STAILQ_REMOVE(&q->vsq_requests, req, pci_vtscsi_request, vsr_link);
+ pci_vtscsi_return_request(q, req, 0);
+
+ return (PCI_VTSCSI_WALK_CONTINUE);
+}
+
+/*
+ * CLEAR ACA: Clear ACA (auto contingent allegiance) state.
+ */
+static pci_vtscsi_walk_t
+pci_vtscsi_tmf_handle_clear_aca(struct pci_vtscsi_queue *q __unused,
+ struct pci_vtscsi_request *req __unused, void *arg)
+{
+ struct pci_vtscsi_ctrl_tmf *tmf = arg;
+
+ assert(tmf->subtype == VIRTIO_SCSI_T_TMF_CLEAR_ACA);
+
+ /*
+ * We don't implement handling of NACA=1 in the CONTROL byte at all.
+ *
+ * Thus, we probably should start filtering NORMACA in INQUIRY and
+ * reject any command that sets NACA=1.
+ *
+ * In any case, there isn't anything we need to do with our queued
+ * requests, so stop right here.
+ */
+
+ return (PCI_VTSCSI_WALK_STOP);
+}
+
+/*
+ * CLEAR TASK SET: Clear all tasks queued for this LUN.
+ *
+ * All tasks in our queue were placed there by us, so there can be no other
+ * I_T nexus involved. Hence, this is handled the same as ABORT TASK SET.
+ */
+static pci_vtscsi_walk_t
+pci_vtscsi_tmf_handle_clear_task_set(struct pci_vtscsi_queue *q,
+ struct pci_vtscsi_request *req, void *arg)
+{
+ struct pci_vtscsi_ctrl_tmf *tmf = arg;
+
+ assert(tmf->subtype == VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET);
+
+ if (pci_vtscsi_get_target(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_target(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ if (pci_vtscsi_get_lun(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_lun(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ req->vsr_cmd_wr->response = VIRTIO_SCSI_S_ABORTED;
+ STAILQ_REMOVE(&q->vsq_requests, req, pci_vtscsi_request, vsr_link);
+ pci_vtscsi_return_request(q, req, 0);
+
+ return (PCI_VTSCSI_WALK_CONTINUE);
+}
+
+/*
+ * I_T NEXUS RESET: Abort all tasks queued for any LUN of this target.
+ */
+static pci_vtscsi_walk_t
+pci_vtscsi_tmf_handle_i_t_nexus_reset(struct pci_vtscsi_queue *q,
+ struct pci_vtscsi_request *req, void *arg)
+{
+ struct pci_vtscsi_ctrl_tmf *tmf = arg;
+
+ assert(tmf->subtype == VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET);
+
+ if (pci_vtscsi_get_target(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_target(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ /*
+ * T10 "06-026r4 SAM-4 TASK ABORTED status clarifications" indicates
+ * that we should actually return ABORTED here, but other documents
+ * such as the VirtIO spec suggest RESET.
+ */
+ req->vsr_cmd_wr->response = VIRTIO_SCSI_S_RESET;
+ STAILQ_REMOVE(&q->vsq_requests, req, pci_vtscsi_request, vsr_link);
+ pci_vtscsi_return_request(q, req, 0);
+
+ return (PCI_VTSCSI_WALK_CONTINUE);
+}
+
+/*
+ * LOGICAL UNIT RESET: Abort all tasks queued for this LUN.
+ */
+static pci_vtscsi_walk_t
+pci_vtscsi_tmf_handle_lun_reset(struct pci_vtscsi_queue *q,
+ struct pci_vtscsi_request *req, void *arg)
+{
+ struct pci_vtscsi_ctrl_tmf *tmf = arg;
+
+ assert(tmf->subtype == VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET);
+
+ if (pci_vtscsi_get_target(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_target(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ if (pci_vtscsi_get_lun(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_lun(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ /*
+ * T10 "06-026r4 SAM-4 TASK ABORTED status clarifications" indicates
+ * that we should actually return ABORTED here, but other documents
+ * such as the VirtIO spec suggest RESET.
+ */
+ req->vsr_cmd_wr->response = VIRTIO_SCSI_S_RESET;
+ STAILQ_REMOVE(&q->vsq_requests, req, pci_vtscsi_request, vsr_link);
+ pci_vtscsi_return_request(q, req, 0);
+
+ return (PCI_VTSCSI_WALK_CONTINUE);
+}
+
+/*
+ * QUERY TASK: Is the specified task present in this LUN?
+ *
+ * We can stop once we have found the specified task queued for this LUN.
+ */
+static pci_vtscsi_walk_t
+pci_vtscsi_tmf_handle_query_task(struct pci_vtscsi_queue *q,
+ struct pci_vtscsi_request *req, void *arg)
+{
+ struct pci_vtscsi_ctrl_tmf *tmf = arg;
+
+ assert(tmf->subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK);
+
+ if (pci_vtscsi_get_target(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_target(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ if (pci_vtscsi_get_lun(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_lun(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ if (tmf->id != req->vsr_cmd_rd->id)
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ tmf->response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ return (PCI_VTSCSI_WALK_STOP);
+}
+
+/*
+ * QUERY TASK SET: Are there any tasks present in this LUN?
+ *
+ * We can stop as soon as we've found at least one task queued for this LUN.
+ */
+static pci_vtscsi_walk_t
+pci_vtscsi_tmf_handle_query_task_set(struct pci_vtscsi_queue *q,
+ struct pci_vtscsi_request *req, void *arg)
+{
+ struct pci_vtscsi_ctrl_tmf *tmf = arg;
+
+ assert(tmf->subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET);
+
+ if (pci_vtscsi_get_target(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_target(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ if (pci_vtscsi_get_lun(q->vsq_sc, tmf->lun) !=
+ pci_vtscsi_get_lun(q->vsq_sc, req->vsr_cmd_rd->lun))
+ return (PCI_VTSCSI_WALK_CONTINUE);
+
+ tmf->response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ return (PCI_VTSCSI_WALK_STOP);
+}
+
+static pci_vtscsi_walk_t
+pci_vtscsi_walk_request_queue(struct pci_vtscsi_queue *q,
+ pci_vtscsi_walk_request_queue_cb_t cb, void *arg)
+{
+ struct pci_vtscsi_request *req, *tmp;
+
+ STAILQ_FOREACH_SAFE(req, &q->vsq_requests, vsr_link, tmp) {
+ if (cb(q, req, arg) == PCI_VTSCSI_WALK_STOP)
+ return (PCI_VTSCSI_WALK_STOP);
+ }
+
+ return (PCI_VTSCSI_WALK_CONTINUE);
+}
+
+static pci_vtscsi_walk_request_queue_cb_t *pci_vtscsi_tmf_handler_cb[] = {
+ pci_vtscsi_tmf_handle_abort_task,
+ pci_vtscsi_tmf_handle_abort_task_set,
+ pci_vtscsi_tmf_handle_clear_aca,
+ pci_vtscsi_tmf_handle_clear_task_set,
+ pci_vtscsi_tmf_handle_i_t_nexus_reset,
+ pci_vtscsi_tmf_handle_lun_reset,
+ pci_vtscsi_tmf_handle_query_task,
+ pci_vtscsi_tmf_handle_query_task_set
+};
+
static void
pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *sc, void *buf)
{
@@ -478,13 +744,41 @@
DPRINTF("TMF request tgt %d, lun %d, subtype %d, id %lu",
target, pci_vtscsi_get_lun(sc, tmf->lun), tmf->subtype, tmf->id);
+ /*
+ * Lock out all the worker threads from processing any waiting requests
+ * while we're processing the TMF request. This also effectively blocks
+ * pci_vtscsi_requestq_notify() from adding any new requests to the
+ * request queue. This does not prevent any requests currently being
+ * processed by the backend from being completed and returned, which
+ * we must guarantee to adhere to the ordering requirements for any
+ * TMF function which aborts tasks.
+ */
+ for (int i = 0; i < VTSCSI_REQUESTQ; i++) {
+ struct pci_vtscsi_queue *q = &sc->vss_queues[i];
+
+ pthread_mutex_lock(&q->vsq_rmtx);
+ }
+
+ /*
+ * CTL may set response to FAILURE for the TMF request.
+ *
+ * The default response of all TMF functions is FUNCTION COMPLETE if
+ * there was no error, regardless of whether it actually succeeded or
+ * not. The two notable exceptions are QUERY TASK and QUERY TASK SET,
+ * which will explicitly return FUNCTION SUCCEEDED if the specified
+ * task or any task was active in the target/LUN, respectively.
+ *
+ * Thus, we will call CTL first. Only if the response we get is
+ * FUNCTION COMPLETE we'll continue processing the TMF function
+ * on our queues.
+ */
io = ctl_scsi_alloc_io(sc->vss_iid);
if (io == NULL) {
WPRINTF("failed to allocate ctl_io: err=%d (%s)",
errno, strerror(errno));
tmf->response = VIRTIO_SCSI_S_FAILURE;
- return;
+ goto out;
}
ctl_scsi_zero_io(io);
@@ -539,11 +833,54 @@
}
err = ioctl(fd, CTL_IO, io);
- if (err != 0)
+ if (err != 0) {
WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
+ tmf->response = VIRTIO_SCSI_S_FAILURE;
+ } else {
+ tmf->response = io->taskio.task_status;
+ }
- tmf->response = io->taskio.task_status;
ctl_scsi_free_io(io);
+
+ if (tmf->response != VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
+ /*
+ * If this is either a FAILURE or FUNCTION REJECTED, we must
+ * not continue to process the TMF function on our queued
+ * requests.
+ *
+ * If it is FUNCTION SUCCEEDED, we do not need to process the
+ * TMF function on our queued requests.
+ *
+ * If it is anything else, log a warning, but handle it the
+ * same as above.
+ */
+ if (tmf->response != VIRTIO_SCSI_S_FAILURE &&
+ tmf->response != VIRTIO_SCSI_S_FUNCTION_REJECTED &&
+ tmf->response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) {
+ WPRINTF("pci_vtscsi_tmf_hdl: unexpected response from "
+ "CTL: %d", tmf->response);
+ }
+ } else {
+ pci_vtscsi_walk_t ret = PCI_VTSCSI_WALK_CONTINUE;
+ int i;
+
+ for (i = 0;
+ i < VTSCSI_REQUESTQ && ret != PCI_VTSCSI_WALK_STOP;
+ i++) {
+ struct pci_vtscsi_queue *q = &sc->vss_queues[i];
+
+ ret = pci_vtscsi_walk_request_queue(q,
+ pci_vtscsi_tmf_handler_cb[tmf->subtype], tmf);
+ }
+ }
+
+out:
+ /* Unlock the request queues before we return. */
+ for (int i = 0; i < VTSCSI_REQUESTQ; i++) {
+ struct pci_vtscsi_queue *q = &sc->vss_queues[i];
+
+ pthread_mutex_unlock(&q->vsq_rmtx);
+ }
}
static void

File Metadata

Mime Type
text/plain
Expires
Sun, Feb 8, 4:39 PM (6 h, 41 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28474486
Default Alt Text
D53222.id164635.diff (13 KB)

Event Timeline