Page MenuHomeFreeBSD

D53223.id172278.diff
No OneTemporary

D53223.id172278.diff

diff --git a/usr.sbin/bhyve/Makefile b/usr.sbin/bhyve/Makefile
--- a/usr.sbin/bhyve/Makefile
+++ b/usr.sbin/bhyve/Makefile
@@ -53,6 +53,7 @@
pci_virtio_net.c \
pci_virtio_rnd.c \
pci_virtio_scsi.c \
+ pci_virtio_scsi_ctl.c \
pci_xhci.c \
qemu_fwcfg.c \
qemu_loader.c \
diff --git a/usr.sbin/bhyve/bhyve.8 b/usr.sbin/bhyve/bhyve.8
--- a/usr.sbin/bhyve/bhyve.8
+++ b/usr.sbin/bhyve/bhyve.8
@@ -666,8 +666,9 @@
.Bl -bullet
.Sm off
.It
-.Oo Cm target Ns = Ns Oo ID : Oc Ar /dev/cam/ctl Oo Ar pp Cm \&. Ar vp Oc Oc
+.Oo Cm target Ns = Ns Oo ID : Oc Ar path Ns Oc
.Oo Cm \&, Ar scsi-device-options Oc
+.Oo Cm \&, Ar backend-specific-options Oc
.Sm on
.El
.Pp
@@ -693,25 +694,28 @@
.Pa target
.Ar ID Ns s
must be unique per instance.
-The
+The meaning of the
.Ar path
-must point to a valid CAM target layer
-.Po CTL
-.Pc
-device node.
-If no
-.Pa target
-is configured, a single default target backed by
-.Sy /dev/cam/ctl
-will be created.
+argument is specific to each backend:
+.Bl -column "Backend" "/dev/cam/ctl[pp.vp]"
+.It Sy Backend Ta Sy Path Ta Sy Description
+.It ctl Ta Pa /dev/cam/ctl Ns Oo Ar pp . Ns Ar vp Oc Ta
+The path of a CAM target layer (CTL) device node.
+If no target is configured, a single target backed by
+.Qq /dev/cam/ctl
+will be configured by default.
+.El
.Pp
The
.Ar scsi-device-options
are:
.Bl -tag -width 10n
-.It Cm iid= Ns Ar IID
-Initiator ID to use when sending requests to specified CTL port.
-The default value is 0.
+.It Cm backend= Ns Ar backend
+The virtio-scsi backend to use.
+The backend name is case-insensitive.
+There is currently only one backend
+.Qq ctl ,
+which is also the default backend.
.It Li bootindex= Ns Ar index
Add the device to the boot order at
.Ar index .
@@ -719,6 +723,18 @@
The guest firmware may ignore or not support this fw_cfg file.
In that case, this feature doesn't work as expected.
.El
+.Pp
+The
+.Ar backend-specific-options
+for the
+.Sy CTL
+backend are:
+.Bl -tag -width 10n
+.It Cm iid= Ns Ar IID
+Initiator ID to use when sending requests to
+.Sy CTL .
+The default value is 0.
+.El
.Ss 9P device backends
.Bl -bullet
.Sm off
diff --git a/usr.sbin/bhyve/bhyve_config.5 b/usr.sbin/bhyve/bhyve_config.5
--- a/usr.sbin/bhyve/bhyve_config.5
+++ b/usr.sbin/bhyve/bhyve_config.5
@@ -762,13 +762,14 @@
The largest supported MTU advertised to the guest.
.El
.Ss VirtIO SCSI Settings
-.Bl -column "target" "integer" "/dev/cam/ctl"
+.Bl -column "backend" "string" "/dev/cam/ctl"
.It Sy Name Ta Sy Format Ta Sy Default Ta Sy Description
-.It Va iid Ta integer Ta 0 Ta
-Initiator ID to use when sending requests to the CTL port.
-.It Va target Ta Oo Va ID : Oc Ns path Ta Sy /dev/cam/ctl Ta
-The path of a CAM target layer (CTL) device to use:
-.Pa /dev/cam/ctl Ns Oo Ar pp . Ns Ar vp Oc
+.It Va backend Ta string Ta ctl Ta
+The virtio-scsi backend to use (case-insensitive).
+.It Va target Ta Oo Va ID : Oc Ns path Ta /dev/cam/ctl Ta
+The backend
+.Ar path
+of a target to configure.
Optionally, a numeric target
.Ar ID
in the range from 0 to 255 may be specified before the
@@ -789,6 +790,22 @@
The target
.Ar ID Ns s
must be unique within each virtio-scsi instance.
+.Pp
+The meaning of the
+.Ar path
+argument is specific to each backend:
+.Bl -column "Backend" "/dev/cam/ctl[pp.vp]"
+.It Sy Backend Ta Sy Path Ta Sy Description
+.It ctl Ta Pa /dev/cam/ctl Ns Oo Ar pp . Ns Ar vp Oc Ta
+The path of a CAM target layer (CTL) device to configure as a target.
+.El
+.Pp
+The following backend-specific variables are supported for VirtIO SCSI:
+.Bl -column "Backend" "Name" "integer" "Default"
+.It Sy Backend Ta Sy Name Ta Sy Format Ta Sy Default Ta Sy Description
+.It ctl Ta Va iid Ta integer Ta 0 Ta
+Initiator ID to use when sending requests to CTL.
+.El
.Sh SEE ALSO
.Xr expand_number 3 ,
.Xr getaddrinfo 3 ,
diff --git a/usr.sbin/bhyve/pci_virtio_scsi.h b/usr.sbin/bhyve/pci_virtio_scsi.h
new file mode 100644
--- /dev/null
+++ b/usr.sbin/bhyve/pci_virtio_scsi.h
@@ -0,0 +1,394 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>.
+ * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>.
+ * Copyright (c) 2026 Hans Rosenfeld
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _PCI_VIRTIO_SCSI_H_
+#define _PCI_VIRTIO_SCSI_H_
+
+extern int pci_vtscsi_debug;
+
+#define WPRINTF(msg, params...) PRINTLN("virtio-scsi: " msg, ##params)
+#define DPRINTF(msg, params...) if (pci_vtscsi_debug) WPRINTF(msg, ##params)
+
+/* Absolute limits given by the VirtIO SCSI spec */
+#define VIRTIO_SCSI_MAX_CHANNEL 0
+#define VIRTIO_SCSI_MAX_TARGET 255
+#define VIRTIO_SCSI_MAX_LUN 16383
+
+/* Features specific to VirtIO SCSI, none of which we currently support */
+#define VIRTIO_SCSI_F_INOUT (1 << 0)
+#define VIRTIO_SCSI_F_HOTPLUG (1 << 1)
+#define VIRTIO_SCSI_F_CHANGE (1 << 2)
+
+/* Limits which we set. These should really be configurable. */
+#define VTSCSI_RINGSZ 64
+#define VTSCSI_REQUESTQ 1
+#define VTSCSI_THR_PER_Q 16
+#define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2)
+#define VTSCSI_MAXSEG 64
+
+/*
+ * Device-specific config space registers
+ *
+ * The guest driver may try to modify cdb_size and sense_size by writing the
+ * respective config space registers. Since we currently ignore all writes to
+ * config space, these macros are essentially constant.
+ */
+#define VTSCSI_IN_HEADER_LEN(_sc) \
+ (sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size)
+
+#define VTSCSI_OUT_HEADER_LEN(_sc) \
+ (sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size)
+
+struct pci_vtscsi_config {
+ uint32_t num_queues;
+ uint32_t seg_max;
+ uint32_t max_sectors;
+ uint32_t cmd_per_lun;
+ uint32_t event_info_size;
+ uint32_t sense_size;
+ uint32_t cdb_size;
+ uint16_t max_channel;
+ uint16_t max_target;
+ uint32_t max_lun;
+} __attribute__((packed));
+
+
+/*
+ * I/O request state and I/O request queues
+ *
+ * In addition to the control queue and notification queues, each virtio-scsi
+ * device instance has at least one I/O request queue, the state of which is
+ * is kept in an array of struct pci_vtscsi_queue in the device softc.
+ *
+ * Currently there is only one I/O request queue, but it's trivial to support
+ * more than one.
+ *
+ * Each pci_vtscsi_queue has VTSCSI_RINGSZ pci_vtscsi_request structures pre-
+ * allocated on vsq_free_requests. For each I/O request coming in on the I/O
+ * virtqueue, the request queue handler will take a pci_vtscsi_request off
+ * vsq_free_requests, fills in the data from the I/O virtqueue, puts it on
+ * vsq_requests, and signals vsq_cv.
+ *
+ * There are VTSCSI_THR_PER_Q worker threads for each pci_vtscsi_queue which
+ * wait on vsq_cv. When signalled, they repeatedly take one pci_vtscsi_request
+ * off vsq_requests, construct a ctl_io for it, and hand it off to the CTL ioctl
+ * Interface, which processes it synchronously. After completion of the request,
+ * the pci_vtscsi_request is re-initialized and put back onto vsq_free_requests.
+ *
+ * The worker threads exit when vsq_cv is signalled after vsw_exiting was set.
+ *
+ * There are three mutexes to coordinate the accesses to an I/O request queue:
+ * - vsq_rmtx protects vsq_requests and must be held when waiting on vsq_cv
+ * - vsq_fmtx protects vsq_free_requests
+ * - vsq_qmtx must be held when operating on the underlying virtqueue, vsq_vq
+ */
+STAILQ_HEAD(pci_vtscsi_req_queue, pci_vtscsi_request);
+
+struct pci_vtscsi_queue {
+ struct pci_vtscsi_softc *vsq_sc;
+ struct vqueue_info *vsq_vq;
+ pthread_mutex_t vsq_rmtx;
+ pthread_mutex_t vsq_fmtx;
+ pthread_mutex_t vsq_qmtx;
+ pthread_cond_t vsq_cv;
+ struct pci_vtscsi_req_queue vsq_requests;
+ struct pci_vtscsi_req_queue vsq_free_requests;
+ LIST_HEAD(, pci_vtscsi_worker) vsq_workers;
+};
+
+struct pci_vtscsi_worker {
+ struct pci_vtscsi_queue *vsw_queue;
+ pthread_t vsw_thread;
+ bool vsw_exiting;
+ LIST_ENTRY(pci_vtscsi_worker) vsw_link;
+};
+
+struct pci_vtscsi_request {
+ struct pci_vtscsi_queue *vsr_queue;
+ struct iovec vsr_iov[VTSCSI_MAXSEG +
+ SPLIT_IOV_ADDL_IOV];
+ struct iovec *vsr_iov_in;
+ struct iovec *vsr_iov_out;
+ struct iovec *vsr_data_iov_in;
+ struct iovec *vsr_data_iov_out;
+ struct pci_vtscsi_req_cmd_rd *vsr_cmd_rd;
+ struct pci_vtscsi_req_cmd_wr *vsr_cmd_wr;
+ void *vsr_backend;
+ size_t vsr_niov_in;
+ size_t vsr_niov_out;
+ size_t vsr_data_niov_in;
+ size_t vsr_data_niov_out;
+ uint32_t vsr_idx;
+ STAILQ_ENTRY(pci_vtscsi_request) vsr_link;
+};
+
+/*
+ * Per-target state.
+ */
+struct pci_vtscsi_target {
+ uint8_t vst_target;
+ int vst_fd;
+ int vst_max_sectors;
+};
+
+/*
+ * Per-device softc
+ */
+struct pci_vtscsi_softc {
+ struct virtio_softc vss_vs;
+ struct vqueue_info vss_vq[VTSCSI_MAXQ];
+ struct pci_vtscsi_queue vss_queues[VTSCSI_REQUESTQ];
+ pthread_mutex_t vss_mtx;
+ uint32_t vss_features;
+ size_t vss_num_target;
+ struct pci_vtscsi_config vss_config;
+ struct pci_vtscsi_target *vss_targets;
+ struct pci_vtscsi_backend *vss_backend;
+};
+
+/*
+ * VirtIO-SCSI Task Management Function control requests
+ */
+#define VIRTIO_SCSI_T_TMF 0
+#define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
+#define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
+#define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
+#define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
+#define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
+#define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
+#define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
+#define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
+
+#define VIRTIO_SCSI_T_TMF_MAX_FUNC VIRTIO_SCSI_T_TMF_QUERY_TASK_SET
+
+/* command-specific response values */
+#define VIRTIO_SCSI_S_FUNCTION_COMPLETE 0
+#define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
+#define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
+
+struct pci_vtscsi_ctrl_tmf {
+ const uint32_t type;
+ const uint32_t subtype;
+ const uint8_t lun[8];
+ const uint64_t id;
+ uint8_t response;
+} __attribute__((packed));
+
+
+/*
+ * VirtIO-SCSI Asynchronous Notification control requests
+ */
+#define VIRTIO_SCSI_T_AN_QUERY 1
+#define VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2
+#define VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT 4
+#define VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST 8
+#define VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE 16
+#define VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST 32
+#define VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY 64
+
+struct pci_vtscsi_ctrl_an {
+ const uint32_t type;
+ const uint8_t lun[8];
+ const uint32_t event_requested;
+ uint32_t event_actual;
+ uint8_t response;
+} __attribute__((packed));
+
+/* command-specific response values */
+#define VIRTIO_SCSI_S_OK 0
+#define VIRTIO_SCSI_S_OVERRUN 1
+#define VIRTIO_SCSI_S_ABORTED 2
+#define VIRTIO_SCSI_S_BAD_TARGET 3
+#define VIRTIO_SCSI_S_RESET 4
+#define VIRTIO_SCSI_S_BUSY 5
+#define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
+#define VIRTIO_SCSI_S_TARGET_FAILURE 7
+#define VIRTIO_SCSI_S_NEXUS_FAILURE 8
+#define VIRTIO_SCSI_S_FAILURE 9
+#define VIRTIO_SCSI_S_INCORRECT_LUN 12
+
+struct pci_vtscsi_event {
+ uint32_t event;
+ uint8_t lun[8];
+ uint32_t reason;
+} __attribute__((packed));
+
+/*
+ * VirtIO-SCSI I/O requests
+ */
+struct pci_vtscsi_req_cmd_rd {
+ const uint8_t lun[8];
+ const uint64_t id;
+ const uint8_t task_attr;
+ const uint8_t prio;
+ const uint8_t crn;
+ const uint8_t cdb[];
+} __attribute__((packed));
+
+/* task_attr */
+#define VIRTIO_SCSI_S_SIMPLE 0
+#define VIRTIO_SCSI_S_ORDERED 1
+#define VIRTIO_SCSI_S_HEAD 2
+#define VIRTIO_SCSI_S_ACA 3
+
+struct pci_vtscsi_req_cmd_wr {
+ uint32_t sense_len;
+ uint32_t residual;
+ uint16_t status_qualifier;
+ uint8_t status;
+ uint8_t response;
+ uint8_t sense[];
+} __attribute__((packed));
+
+/*
+ * Backend interface
+ */
+struct pci_vtscsi_backend {
+ const char *vsb_name;
+ int (*vsb_init)(struct pci_vtscsi_softc *,
+ struct pci_vtscsi_backend *, nvlist_t *);
+ int (*vsb_open)(struct pci_vtscsi_softc *, const char *,
+ long);
+ void (*vsb_reset)(struct pci_vtscsi_softc *);
+
+ void* (*vsb_req_alloc)(struct pci_vtscsi_softc *);
+ void (*vsb_req_clear)(void *);
+ void (*vsb_req_free)(void *);
+
+ void (*vsb_tmf_hdl)(struct pci_vtscsi_softc *, int,
+ struct pci_vtscsi_ctrl_tmf *);
+ void (*vsb_an_hdl)(struct pci_vtscsi_softc *, int,
+ struct pci_vtscsi_ctrl_an *);
+ int (*vsb_req_hdl)(struct pci_vtscsi_softc *, int,
+ struct pci_vtscsi_request *);
+};
+#define PCI_VTSCSI_BACKEND_SET(x) DATA_SET(pci_vtscsi_backend_set, x)
+
+/*
+ * LUN address parsing
+ *
+ * The LUN address consists of 8 bytes. While the spec describes this as 0x01,
+ * followed by the target byte, followed by a "single-level LUN structure",
+ * this is actually the same as a hierarchical LUN address as defined by SAM-5,
+ * consisting of four levels of addressing, where in each level the two MSB of
+ * byte 0 select the address mode used in the remaining bits and bytes.
+ *
+ *
+ * Only the first two levels are acutally used by virtio-scsi:
+ *
+ * Level 1: 0x01, 0xTT: Peripheral Device Addressing: Bus 1, Target 0-255
+ * Level 2: 0xLL, 0xLL: Peripheral Device Addressing: Bus MBZ, LUN 0-255
+ * or: Flat Space Addressing: LUN (0-16383)
+ * Level 3 and 4: not used, MBZ
+ *
+ *
+ * Alternatively, the first level may contain an extended LUN address to select
+ * the REPORT_LUNS well-known logical unit:
+ *
+ * Level 1: 0xC1, 0x01: Extended LUN Adressing, Well-Known LUN 1 (REPORT_LUNS)
+ * Level 2, 3, and 4: not used, MBZ
+ *
+ * The virtio spec says that we SHOULD implement the REPORT_LUNS well-known
+ * logical unit but we currently don't.
+ *
+ * According to the virtio spec, these are the only LUNS address formats to be
+ * used with virtio-scsi.
+ */
+
+/*
+ * Check that the given LUN address conforms to the virtio spec, does not
+ * address an unknown target, and especially does not address the REPORT_LUNS
+ * well-known logical unit.
+ */
+static inline bool
+pci_vtscsi_check_lun(struct pci_vtscsi_softc *sc, const uint8_t *lun)
+{
+ if (lun[0] == 0xC1)
+ return (false);
+
+ if (lun[0] != 0x01)
+ return (false);
+
+ if (lun[1] >= sc->vss_num_target)
+ return (false);
+
+ if (lun[1] != sc->vss_targets[lun[1]].vst_target)
+ return (false);
+
+ if (sc->vss_targets[lun[1]].vst_fd < 0)
+ return (false);
+
+ if (lun[2] != 0x00 && (lun[2] & 0xc0) != 0x40)
+ return (false);
+
+ if (lun[4] != 0 || lun[5] != 0 || lun[6] != 0 || lun[7] != 0)
+ return (false);
+
+ return (true);
+}
+
+/*
+ * Get the target id from a LUN address.
+ *
+ * Every code path using this function must have called pci_vtscsi_check_lun()
+ * before to make sure the LUN address is valid.
+ */
+static inline uint8_t
+pci_vtscsi_get_target(struct pci_vtscsi_softc *sc, const uint8_t *lun)
+{
+ assert(lun[0] == 0x01);
+ assert(lun[1] < sc->vss_num_target);
+ assert(lun[1] == sc->vss_targets[lun[1]].vst_target);
+ assert(sc->vss_targets[lun[1]].vst_fd >= 0);
+ assert(lun[2] == 0x00 || (lun[2] & 0xc0) == 0x40);
+
+ return (lun[1]);
+}
+
+/*
+ * Get the LUN id from a LUN address.
+ *
+ * Every code path using this function must have called pci_vtscsi_check_lun()
+ * before to make sure the LUN address is valid.
+ */
+static inline uint16_t
+pci_vtscsi_get_lun(struct pci_vtscsi_softc *sc, const uint8_t *lun)
+{
+ assert(lun[0] == 0x01);
+ assert(lun[1] < sc->vss_num_target);
+ assert(lun[1] == sc->vss_targets[lun[1]].vst_target);
+ assert(sc->vss_targets[lun[1]].vst_fd >= 0);
+ assert(lun[2] == 0x00 || (lun[2] & 0xc0) == 0x40);
+
+ return (((lun[2] << 8) | lun[3]) & 0x3fff);
+}
+
+#endif /* _PCI_VIRTIO_SCSI_H_ */
diff --git a/usr.sbin/bhyve/pci_virtio_scsi.c b/usr.sbin/bhyve/pci_virtio_scsi.c
--- a/usr.sbin/bhyve/pci_virtio_scsi.c
+++ b/usr.sbin/bhyve/pci_virtio_scsi.c
@@ -35,7 +35,6 @@
#include <sys/uio.h>
#include <sys/time.h>
#include <sys/queue.h>
-#include <sys/sbuf.h>
#include <errno.h>
#include <fcntl.h>
@@ -48,16 +47,6 @@
#include <pthread.h>
#include <pthread_np.h>
-#include <cam/scsi/scsi_all.h>
-#include <cam/scsi/scsi_message.h>
-#include <cam/ctl/ctl.h>
-#include <cam/ctl/ctl_io.h>
-#include <cam/ctl/ctl_backend.h>
-#include <cam/ctl/ctl_ioctl.h>
-#include <cam/ctl/ctl_util.h>
-#include <cam/ctl/ctl_scsi_all.h>
-#include <camlib.h>
-
#include "bhyverun.h"
#include "config.h"
#include "debug.h"
@@ -65,216 +54,7 @@
#include "virtio.h"
#include "iov.h"
-#define VTSCSI_RINGSZ 64
-#define VTSCSI_REQUESTQ 1
-#define VTSCSI_THR_PER_Q 16
-#define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2)
-#define VTSCSI_MAXSEG 64
-
-#define VTSCSI_IN_HEADER_LEN(_sc) \
- (sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size)
-
-#define VTSCSI_OUT_HEADER_LEN(_sc) \
- (sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size)
-
-#define VIRTIO_SCSI_MAX_CHANNEL 0
-#define VIRTIO_SCSI_MAX_TARGET 255
-#define VIRTIO_SCSI_MAX_LUN 16383
-
-#define VIRTIO_SCSI_F_INOUT (1 << 0)
-#define VIRTIO_SCSI_F_HOTPLUG (1 << 1)
-#define VIRTIO_SCSI_F_CHANGE (1 << 2)
-
-static int pci_vtscsi_debug = 0;
-#define WPRINTF(msg, params...) PRINTLN("virtio-scsi: " msg, ##params)
-#define DPRINTF(msg, params...) if (pci_vtscsi_debug) WPRINTF(msg, ##params)
-
-struct pci_vtscsi_config {
- uint32_t num_queues;
- uint32_t seg_max;
- uint32_t max_sectors;
- uint32_t cmd_per_lun;
- uint32_t event_info_size;
- uint32_t sense_size;
- uint32_t cdb_size;
- uint16_t max_channel;
- uint16_t max_target;
- uint32_t max_lun;
-} __attribute__((packed));
-
-/*
- * I/O request state and I/O request queues
- *
- * In addition to the control queue and notification queues, each virtio-scsi
- * device instance has at least one I/O request queue, the state of which is
- * is kept in an array of struct pci_vtscsi_queue in the device softc.
- *
- * Currently there is only one I/O request queue, but it's trivial to support
- * more than one.
- *
- * Each pci_vtscsi_queue has VTSCSI_RINGSZ pci_vtscsi_request structures pre-
- * allocated on vsq_free_requests. For each I/O request coming in on the I/O
- * virtqueue, the request queue handler will take a pci_vtscsi_request off
- * vsq_free_requests, fills in the data from the I/O virtqueue, puts it on
- * vsq_requests, and signals vsq_cv.
- *
- * There are VTSCSI_THR_PER_Q worker threads for each pci_vtscsi_queue which
- * wait on vsq_cv. When signalled, they repeatedly take one pci_vtscsi_request
- * off vsq_requests, construct a ctl_io for it, and hand it off to the CTL ioctl
- * Interface, which processes it synchronously. After completion of the request,
- * the pci_vtscsi_request is re-initialized and put back onto vsq_free_requests.
- *
- * The worker threads exit when vsq_cv is signalled after vsw_exiting was set.
- *
- * There are three mutexes to coordinate the accesses to an I/O request queue:
- * - vsq_rmtx protects vsq_requests and must be held when waiting on vsq_cv
- * - vsq_fmtx protects vsq_free_requests
- * - vsq_qmtx must be held when operating on the underlying virtqueue, vsq_vq
- */
-STAILQ_HEAD(pci_vtscsi_req_queue, pci_vtscsi_request);
-
-struct pci_vtscsi_queue {
- struct pci_vtscsi_softc * vsq_sc;
- struct vqueue_info * vsq_vq;
- pthread_mutex_t vsq_rmtx;
- pthread_mutex_t vsq_fmtx;
- pthread_mutex_t vsq_qmtx;
- pthread_cond_t vsq_cv;
- struct pci_vtscsi_req_queue vsq_requests;
- struct pci_vtscsi_req_queue vsq_free_requests;
- LIST_HEAD(, pci_vtscsi_worker) vsq_workers;
-};
-
-struct pci_vtscsi_worker {
- struct pci_vtscsi_queue * vsw_queue;
- pthread_t vsw_thread;
- bool vsw_exiting;
- LIST_ENTRY(pci_vtscsi_worker) vsw_link;
-};
-
-struct pci_vtscsi_request {
- struct pci_vtscsi_queue * vsr_queue;
- struct iovec vsr_iov[VTSCSI_MAXSEG + SPLIT_IOV_ADDL_IOV];
- struct iovec * vsr_iov_in;
- struct iovec * vsr_iov_out;
- struct iovec * vsr_data_iov_in;
- struct iovec * vsr_data_iov_out;
- struct pci_vtscsi_req_cmd_rd * vsr_cmd_rd;
- struct pci_vtscsi_req_cmd_wr * vsr_cmd_wr;
- union ctl_io * vsr_ctl_io;
- size_t vsr_niov_in;
- size_t vsr_niov_out;
- size_t vsr_data_niov_in;
- size_t vsr_data_niov_out;
- uint32_t vsr_idx;
- STAILQ_ENTRY(pci_vtscsi_request) vsr_link;
-};
-
-struct pci_vtscsi_target {
- uint8_t vst_target;
- int vst_fd;
- int vst_max_sectors;
-};
-
-/*
- * Per-device softc
- */
-struct pci_vtscsi_softc {
- struct virtio_softc vss_vs;
- struct vqueue_info vss_vq[VTSCSI_MAXQ];
- struct pci_vtscsi_queue vss_queues[VTSCSI_REQUESTQ];
- pthread_mutex_t vss_mtx;
- int vss_iid;
- int vss_ctl_fd;
- uint32_t vss_features;
- size_t vss_num_target;
- struct pci_vtscsi_config vss_config;
- struct pci_vtscsi_target *vss_targets;
-};
-
-#define VIRTIO_SCSI_T_TMF 0
-#define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
-#define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
-#define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
-#define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
-#define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
-#define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
-#define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
-#define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
-
-#define VIRTIO_SCSI_T_TMF_MAX_FUNC VIRTIO_SCSI_T_TMF_QUERY_TASK_SET
-
-/* command-specific response values */
-#define VIRTIO_SCSI_S_FUNCTION_COMPLETE 0
-#define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
-#define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
-
-struct pci_vtscsi_ctrl_tmf {
- const uint32_t type;
- const uint32_t subtype;
- const uint8_t lun[8];
- const uint64_t id;
- uint8_t response;
-} __attribute__((packed));
-
-#define VIRTIO_SCSI_T_AN_QUERY 1
-#define VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2
-#define VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT 4
-#define VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST 8
-#define VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE 16
-#define VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST 32
-#define VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY 64
-
-struct pci_vtscsi_ctrl_an {
- const uint32_t type;
- const uint8_t lun[8];
- const uint32_t event_requested;
- uint32_t event_actual;
- uint8_t response;
-} __attribute__((packed));
-
-/* command-specific response values */
-#define VIRTIO_SCSI_S_OK 0
-#define VIRTIO_SCSI_S_OVERRUN 1
-#define VIRTIO_SCSI_S_ABORTED 2
-#define VIRTIO_SCSI_S_BAD_TARGET 3
-#define VIRTIO_SCSI_S_RESET 4
-#define VIRTIO_SCSI_S_BUSY 5
-#define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
-#define VIRTIO_SCSI_S_TARGET_FAILURE 7
-#define VIRTIO_SCSI_S_NEXUS_FAILURE 8
-#define VIRTIO_SCSI_S_FAILURE 9
-#define VIRTIO_SCSI_S_INCORRECT_LUN 12
-
-/* task_attr */
-#define VIRTIO_SCSI_S_SIMPLE 0
-#define VIRTIO_SCSI_S_ORDERED 1
-#define VIRTIO_SCSI_S_HEAD 2
-#define VIRTIO_SCSI_S_ACA 3
-
-struct pci_vtscsi_event {
- uint32_t event;
- uint8_t lun[8];
- uint32_t reason;
-} __attribute__((packed));
-
-struct pci_vtscsi_req_cmd_rd {
- const uint8_t lun[8];
- const uint64_t id;
- const uint8_t task_attr;
- const uint8_t prio;
- const uint8_t crn;
- const uint8_t cdb[];
-} __attribute__((packed));
-
-struct pci_vtscsi_req_cmd_wr {
- uint32_t sense_len;
- uint32_t residual;
- uint16_t status_qualifier;
- uint8_t status;
- uint8_t response;
- uint8_t sense[];
-} __attribute__((packed));
+#include "pci_virtio_scsi.h"
enum pci_vtscsi_walk {
PCI_VTSCSI_WALK_CONTINUE = 0,
@@ -285,19 +65,14 @@
typedef pci_vtscsi_walk_t pci_vtscsi_walk_request_queue_cb_t(
struct pci_vtscsi_queue *, struct pci_vtscsi_request *, void *);
+static void pci_vtscsi_print_supported_backends(void);
+
static void *pci_vtscsi_proc(void *);
static void pci_vtscsi_reset(void *);
static void pci_vtscsi_neg_features(void *, uint64_t);
static int pci_vtscsi_cfgread(void *, int, int, uint32_t *);
static int pci_vtscsi_cfgwrite(void *, int, int, uint32_t);
-static inline bool pci_vtscsi_check_lun(struct pci_vtscsi_softc *,
- const uint8_t *);
-static inline uint8_t pci_vtscsi_get_target(struct pci_vtscsi_softc *,
- const uint8_t *);
-static inline uint16_t pci_vtscsi_get_lun(struct pci_vtscsi_softc *,
- const uint8_t *);
-
static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_abort_task;
static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_abort_task_set;
static pci_vtscsi_walk_request_queue_cb_t pci_vtscsi_tmf_handle_clear_aca;
@@ -319,7 +94,8 @@
static struct pci_vtscsi_request *pci_vtscsi_alloc_request(
struct pci_vtscsi_softc *);
-static void pci_vtscsi_free_request(struct pci_vtscsi_request *);
+static void pci_vtscsi_free_request(struct pci_vtscsi_softc *,
+ struct pci_vtscsi_request *);
static struct pci_vtscsi_request *pci_vtscsi_get_request(
struct pci_vtscsi_req_queue *);
static void pci_vtscsi_put_request(struct pci_vtscsi_req_queue *,
@@ -340,6 +116,8 @@
struct pci_vtscsi_queue *, int);
static int pci_vtscsi_init(struct pci_devinst *, nvlist_t *);
+SET_DECLARE(pci_vtscsi_backend_set, struct pci_vtscsi_backend);
+
static struct virtio_consts vtscsi_vi_consts = {
.vc_name = "vtscsi",
.vc_nvq = VTSCSI_MAXQ,
@@ -351,6 +129,25 @@
.vc_hv_caps = VIRTIO_RING_F_INDIRECT_DESC,
};
+int pci_vtscsi_debug = 0;
+
+
+static void
+pci_vtscsi_print_supported_backends(void)
+{
+ struct pci_vtscsi_backend **vbpp;
+
+ if (SET_COUNT(pci_vtscsi_backend_set) == 0) {
+ printf("No virtio-scsi backends available");
+ return;
+ }
+
+ SET_FOREACH(vbpp, pci_vtscsi_backend_set) {
+ struct pci_vtscsi_backend *vbp = *vbpp;
+ printf("%s\n", vbp->vsb_name);
+ }
+}
+
static void *
pci_vtscsi_proc(void *arg)
{
@@ -406,8 +203,7 @@
.num_queues = VTSCSI_REQUESTQ,
/* Leave room for the request and the response. */
.seg_max = VTSCSI_MAXSEG - 2,
- /* CTL apparently doesn't have a limit here */
- .max_sectors = INT32_MAX,
+ .max_sectors = 0, /* overridden by backend reset() */
.cmd_per_lun = 1,
.event_info_size = sizeof(struct pci_vtscsi_event),
.sense_size = 96,
@@ -416,6 +212,8 @@
.max_target = MAX(1, sc->vss_num_target) - 1,
.max_lun = VIRTIO_SCSI_MAX_LUN
};
+
+ sc->vss_backend->vsb_reset(sc);
}
static void
@@ -444,105 +242,6 @@
return (0);
}
-/*
- * LUN address parsing
- *
- * The LUN address consists of 8 bytes. While the spec describes this as 0x01,
- * followed by the target byte, followed by a "single-level LUN structure",
- * this is actually the same as a hierarchical LUN address as defined by SAM-5,
- * consisting of four levels of addressing, where in each level the two MSB of
- * byte 0 select the address mode used in the remaining bits and bytes.
- *
- *
- * Only the first two levels are acutally used by virtio-scsi:
- *
- * Level 1: 0x01, 0xTT: Peripheral Device Addressing: Bus 1, Target 0-255
- * Level 2: 0xLL, 0xLL: Peripheral Device Addressing: Bus MBZ, LUN 0-255
- * or: Flat Space Addressing: LUN (0-16383)
- * Level 3 and 4: not used, MBZ
- *
- *
- * Alternatively, the first level may contain an extended LUN address to select
- * the REPORT_LUNS well-known logical unit:
- *
- * Level 1: 0xC1, 0x01: Extended LUN Adressing, Well-Known LUN 1 (REPORT_LUNS)
- * Level 2, 3, and 4: not used, MBZ
- *
- * The virtio spec says that we SHOULD implement the REPORT_LUNS well-known
- * logical unit but we currently don't.
- *
- * According to the virtio spec, these are the only LUNS address formats to be
- * used with virtio-scsi.
- */
-
-/*
- * Check that the given LUN address conforms to the virtio spec, does not
- * address an unknown target, and especially does not address the REPORT_LUNS
- * well-known logical unit.
- */
-static inline bool
-pci_vtscsi_check_lun(struct pci_vtscsi_softc *sc, const uint8_t *lun)
-{
- if (lun[0] == 0xC1)
- return (false);
-
- if (lun[0] != 0x01)
- return (false);
-
- if (lun[1] >= sc->vss_num_target)
- return (false);
-
- if (lun[1] != sc->vss_targets[lun[1]].vst_target)
- return (false);
-
- if (sc->vss_targets[lun[1]].vst_fd < 0)
- return (false);
-
- if (lun[2] != 0x00 && (lun[2] & 0xc0) != 0x40)
- return (false);
-
- if (lun[4] != 0 || lun[5] != 0 || lun[6] != 0 || lun[7] != 0)
- return (false);
-
- return (true);
-}
-
-/*
- * Get the target id from a LUN address.
- *
- * Every code path using this function must have called pci_vtscsi_check_lun()
- * before to make sure the LUN address is valid.
- */
-static inline uint8_t
-pci_vtscsi_get_target(struct pci_vtscsi_softc *sc, const uint8_t *lun)
-{
- assert(lun[0] == 0x01);
- assert(lun[1] < sc->vss_num_target);
- assert(lun[1] == sc->vss_targets[lun[1]].vst_target);
- assert(sc->vss_targets[lun[1]].vst_fd >= 0);
- assert(lun[2] == 0x00 || (lun[2] & 0xc0) == 0x40);
-
- return (lun[1]);
-}
-
-/*
- * Get the LUN id from a LUN address.
- *
- * Every code path using this function must have called pci_vtscsi_check_lun()
- * before to make sure the LUN address is valid.
- */
-static inline uint16_t
-pci_vtscsi_get_lun(struct pci_vtscsi_softc *sc, const uint8_t *lun)
-{
- assert(lun[0] == 0x01);
- assert(lun[1] < sc->vss_num_target);
- assert(lun[1] == sc->vss_targets[lun[1]].vst_target);
- assert(sc->vss_targets[lun[1]].vst_fd >= 0);
- assert(lun[2] == 0x00 || (lun[2] & 0xc0) == 0x40);
-
- return (((lun[2] << 8) | lun[3]) & 0x3fff);
-}
-
/*
* ABORT TASK: Abort the specifed task queued for this LUN.
*
@@ -793,9 +492,7 @@
pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *sc,
struct pci_vtscsi_ctrl_tmf *tmf)
{
- union ctl_io *io;
uint8_t target;
- int err;
int fd;
if (tmf->subtype > VIRTIO_SCSI_T_TMF_MAX_FUNC) {
@@ -827,9 +524,9 @@
* while we're processing the TMF request. This also effectively blocks
* pci_vtscsi_requestq_notify() from adding any new requests to the
* request queue. This does not prevent any requests currently being
- * processed by CTL from being completed and returned, which we must
- * guarantee to adhere to the ordering requirements for any TMF function
- * which aborts tasks.
+ * processed by the backend from being completed and returned, which we
+ * must guarantee to adhere to the ordering requirements for any TMF
+ * function which aborts tasks.
*/
for (int i = 0; i < VTSCSI_REQUESTQ; i++) {
struct pci_vtscsi_queue *q = &sc->vss_queues[i];
@@ -838,7 +535,7 @@
}
/*
- * CTL may set response to FAILURE for the TMF request.
+ * The backend may set response to FAILURE for the TMF request.
*
* The default response of all TMF functions is FUNCTION COMPLETE if
* there was no error, regardless of whether it actually succeeded or
@@ -846,79 +543,11 @@
* which will explicitly return FUNCTION SUCCEEDED if the specified
* task or any task was active in the target/LUN, respectively.
*
- * Thus, we will call CTL first. Only if the response we get is
- * FUNCTION COMPLETE we'll continue processing the TMF function
- * on our queues.
+ * Thus, we will call the backend first. Only if the response we get
+ * is FUNCTION COMPLETE we'll continue processing the TMF function on
+ * our queues.
*/
- io = ctl_scsi_alloc_io(sc->vss_iid);
- if (io == NULL) {
- WPRINTF("failed to allocate ctl_io: err=%d (%s)",
- errno, strerror(errno));
-
- tmf->response = VIRTIO_SCSI_S_FAILURE;
- goto out;
- }
-
- ctl_scsi_zero_io(io);
-
- io->io_hdr.io_type = CTL_IO_TASK;
- io->io_hdr.nexus.initid = sc->vss_iid;
- io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(sc, tmf->lun);
- io->taskio.tag_type = CTL_TAG_SIMPLE;
- io->taskio.tag_num = tmf->id;
- io->io_hdr.flags |= CTL_FLAG_USER_TAG;
-
- switch (tmf->subtype) {
- case VIRTIO_SCSI_T_TMF_ABORT_TASK:
- io->taskio.task_action = CTL_TASK_ABORT_TASK;
- break;
-
- case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
- io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
- break;
-
- case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
- io->taskio.task_action = CTL_TASK_CLEAR_ACA;
- break;
-
- case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
- io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET;
- break;
-
- case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
- io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
- break;
-
- case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
- io->taskio.task_action = CTL_TASK_LUN_RESET;
- break;
-
- case VIRTIO_SCSI_T_TMF_QUERY_TASK:
- io->taskio.task_action = CTL_TASK_QUERY_TASK;
- break;
-
- case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
- io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
- break;
- }
-
- if (pci_vtscsi_debug) {
- struct sbuf *sb = sbuf_new_auto();
- ctl_io_sbuf(io, sb);
- sbuf_finish(sb);
- DPRINTF("%s", sbuf_data(sb));
- sbuf_delete(sb);
- }
-
- err = ioctl(fd, CTL_IO, io);
- if (err != 0) {
- WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
- tmf->response = VIRTIO_SCSI_S_FAILURE;
- } else {
- tmf->response = io->taskio.task_status;
- }
-
- ctl_scsi_free_io(io);
+ sc->vss_backend->vsb_tmf_hdl(sc, fd, tmf);
if (tmf->response != VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
/*
@@ -936,7 +565,7 @@
tmf->response != VIRTIO_SCSI_S_FUNCTION_REJECTED &&
tmf->response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) {
WPRINTF("pci_vtscsi_tmf_hdl: unexpected response from "
- "CTL: %d", tmf->response);
+ "backend: %d", tmf->response);
}
} else {
pci_vtscsi_walk_t ret = PCI_VTSCSI_WALK_CONTINUE;
@@ -952,7 +581,6 @@
}
}
-out:
/* Unlock the request queues before we return. */
for (int i = 0; i < VTSCSI_REQUESTQ; i++) {
struct pci_vtscsi_queue *q = &sc->vss_queues[i];
@@ -965,6 +593,7 @@
pci_vtscsi_an_handle(struct pci_vtscsi_softc *sc, struct pci_vtscsi_ctrl_an *an)
{
int target;
+ int fd;
if (pci_vtscsi_check_lun(sc, an->lun) == false) {
DPRINTF("AN request to invalid LUN %.2hhx%.2hhx-%.2hhx%.2hhx-"
@@ -977,10 +606,12 @@
target = pci_vtscsi_get_target(sc, an->lun);
+ fd = sc->vss_targets[target].vst_fd;
+
DPRINTF("AN request tgt %d, lun %d, event requested %x",
target, pci_vtscsi_get_lun(sc, an->lun), an->event_requested);
- an->response = VIRTIO_SCSI_S_FAILURE;
+ sc->vss_backend->vsb_an_hdl(sc, fd, an);
}
static void
@@ -1031,10 +662,9 @@
if (req->vsr_cmd_wr == NULL)
goto alloc_fail;
- req->vsr_ctl_io = ctl_scsi_alloc_io(sc->vss_iid);
- if (req->vsr_ctl_io == NULL)
+ req->vsr_backend = sc->vss_backend->vsb_req_alloc(sc);
+ if (req->vsr_backend == NULL)
goto alloc_fail;
- ctl_scsi_zero_io(req->vsr_ctl_io);
return (req);
@@ -1042,16 +672,17 @@
EPRINTLN("failed to allocate request: %s", strerror(errno));
if (req != NULL)
- pci_vtscsi_free_request(req);
+ pci_vtscsi_free_request(sc, req);
return (NULL);
}
static void
-pci_vtscsi_free_request(struct pci_vtscsi_request *req)
+pci_vtscsi_free_request(struct pci_vtscsi_softc *sc,
+ struct pci_vtscsi_request *req)
{
- if (req->vsr_ctl_io != NULL)
- ctl_scsi_free_io(req->vsr_ctl_io);
+ if (req->vsr_backend != NULL)
+ sc->vss_backend->vsb_req_free(req->vsr_backend);
if (req->vsr_cmd_rd != NULL)
free(req->vsr_cmd_rd);
if (req->vsr_cmd_wr != NULL)
@@ -1192,9 +823,10 @@
pci_vtscsi_return_request(struct pci_vtscsi_queue *q,
struct pci_vtscsi_request *req, int iolen)
{
+ struct pci_vtscsi_softc *sc = q->vsq_sc;
void *cmd_rd = req->vsr_cmd_rd;
void *cmd_wr = req->vsr_cmd_wr;
- void *ctl_io = req->vsr_ctl_io;
+ void *backend = req->vsr_backend;
int idx = req->vsr_idx;
DPRINTF("request <idx=%d> completed, response %d", idx,
@@ -1203,7 +835,7 @@
iolen += buf_to_iov(cmd_wr, VTSCSI_OUT_HEADER_LEN(q->vsq_sc),
req->vsr_iov_out, req->vsr_niov_out);
- ctl_scsi_zero_io(req->vsr_ctl_io);
+ sc->vss_backend->vsb_req_clear(backend);
memset(cmd_rd, 0, VTSCSI_IN_HEADER_LEN(q->vsq_sc));
memset(cmd_wr, 0, VTSCSI_OUT_HEADER_LEN(q->vsq_sc));
@@ -1211,7 +843,7 @@
req->vsr_cmd_rd = cmd_rd;
req->vsr_cmd_wr = cmd_wr;
- req->vsr_ctl_io = ctl_io;
+ req->vsr_backend = backend;
pthread_mutex_lock(&q->vsq_fmtx);
pci_vtscsi_put_request(&q->vsq_free_requests, req);
@@ -1227,81 +859,7 @@
pci_vtscsi_request_handle(struct pci_vtscsi_softc *sc, int fd,
struct pci_vtscsi_request *req)
{
- union ctl_io *io = req->vsr_ctl_io;
- void *ext_data_ptr = NULL;
- uint32_t ext_data_len = 0, ext_sg_entries = 0;
- int err, nxferred;
-
- io->io_hdr.nexus.initid = sc->vss_iid;
- io->io_hdr.nexus.targ_lun =
- pci_vtscsi_get_lun(sc, req->vsr_cmd_rd->lun);
-
- io->io_hdr.io_type = CTL_IO_SCSI;
-
- if (req->vsr_data_niov_in > 0) {
- ext_data_ptr = (void *)req->vsr_data_iov_in;
- ext_sg_entries = req->vsr_data_niov_in;
- ext_data_len = count_iov(req->vsr_data_iov_in,
- req->vsr_data_niov_in);
- io->io_hdr.flags |= CTL_FLAG_DATA_OUT;
- } else if (req->vsr_data_niov_out > 0) {
- ext_data_ptr = (void *)req->vsr_data_iov_out;
- ext_sg_entries = req->vsr_data_niov_out;
- ext_data_len = count_iov(req->vsr_data_iov_out,
- req->vsr_data_niov_out);
- io->io_hdr.flags |= CTL_FLAG_DATA_IN;
- }
-
- io->scsiio.sense_len = sc->vss_config.sense_size;
- io->scsiio.tag_num = req->vsr_cmd_rd->id;
- io->io_hdr.flags |= CTL_FLAG_USER_TAG;
- switch (req->vsr_cmd_rd->task_attr) {
- case VIRTIO_SCSI_S_ORDERED:
- io->scsiio.tag_type = CTL_TAG_ORDERED;
- break;
- case VIRTIO_SCSI_S_HEAD:
- io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
- break;
- case VIRTIO_SCSI_S_ACA:
- io->scsiio.tag_type = CTL_TAG_ACA;
- break;
- case VIRTIO_SCSI_S_SIMPLE:
- default:
- io->scsiio.tag_type = CTL_TAG_SIMPLE;
- break;
- }
- io->scsiio.ext_sg_entries = ext_sg_entries;
- io->scsiio.ext_data_ptr = ext_data_ptr;
- io->scsiio.ext_data_len = ext_data_len;
- io->scsiio.ext_data_filled = 0;
- io->scsiio.cdb_len = sc->vss_config.cdb_size;
- memcpy(io->scsiio.cdb, req->vsr_cmd_rd->cdb, sc->vss_config.cdb_size);
-
- if (pci_vtscsi_debug) {
- struct sbuf *sb = sbuf_new_auto();
- ctl_io_sbuf(io, sb);
- sbuf_finish(sb);
- DPRINTF("%s", sbuf_data(sb));
- sbuf_delete(sb);
- }
-
- err = ioctl(fd, CTL_IO, io);
- if (err != 0) {
- WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
- req->vsr_cmd_wr->response = VIRTIO_SCSI_S_FAILURE;
- } else {
- req->vsr_cmd_wr->sense_len =
- MIN(io->scsiio.sense_len, sc->vss_config.sense_size);
- req->vsr_cmd_wr->residual = ext_data_len -
- io->scsiio.ext_data_filled;
- req->vsr_cmd_wr->status = io->scsiio.scsi_status;
- req->vsr_cmd_wr->response = VIRTIO_SCSI_S_OK;
- memcpy(&req->vsr_cmd_wr->sense, &io->scsiio.sense_data,
- req->vsr_cmd_wr->sense_len);
- }
-
- nxferred = io->scsiio.ext_data_filled;
- return (nxferred);
+ return (sc->vss_backend->vsb_req_hdl(sc, fd, req));
}
static void
@@ -1480,10 +1038,13 @@
targets = create_relative_config_node(nvl, "target");
- /* Handle legacy form (0). */
- if (opts == NULL) {
- pci_vtscsi_add_target_config(targets, "/dev/cam/ctl", 0);
+ /* Legacy form (0) is handled in pci_vtscsi_init(). */
+ if (opts == NULL)
return (0);
+
+ if (strcmp("help", opts) == 0) {
+ pci_vtscsi_print_supported_backends();
+ exit(0);
}
n = strcspn(opts, ",=");
@@ -1579,6 +1140,7 @@
const char *value;
const char *errstr;
uint64_t target;
+ int ret;
assert(type == NV_TYPE_STRING);
@@ -1590,23 +1152,21 @@
sc->vss_targets[target].vst_target = target;
/*
- * 'value' contains the CTL device node path of this target.
+ * 'value' contains the backend path. Call the backend to open it.
*/
value = nvlist_get_string(parent, name);
- sc->vss_targets[target].vst_fd = open(value, O_RDWR);
- if (sc->vss_targets[target].vst_fd < 0) {
+ ret = sc->vss_backend->vsb_open(sc, value, target);
+ if (ret != 0)
EPRINTLN("cannot open target %lu at %s: %s", target, value,
strerror(errno));
- return (-1);
- }
-
- return (0);
+ return (ret);
}
static int
pci_vtscsi_init(struct pci_devinst *pi, nvlist_t *nvl)
{
struct pci_vtscsi_softc *sc;
+ struct pci_vtscsi_backend *backend, **vbpp;
const char *value;
int err;
@@ -1614,10 +1174,6 @@
if (sc == NULL)
return (-1);
- value = get_config_value_node(nvl, "iid");
- if (value != NULL)
- sc->vss_iid = strtoul(value, NULL, 10);
-
value = get_config_value_node(nvl, "bootindex");
if (value != NULL) {
if (pci_emul_add_boot_device(pi, atoi(value))) {
@@ -1627,6 +1183,35 @@
}
}
+ value = get_config_value_node(nvl, "backend");
+ if (value == NULL) {
+ if (SET_COUNT(pci_vtscsi_backend_set) == 0) {
+ WPRINTF("No virtio-scsi backends available");
+ errno = EINVAL;
+ goto fail;
+ }
+ backend = SET_ITEM(pci_vtscsi_backend_set, 0);
+ } else {
+ backend = NULL;
+ SET_FOREACH(vbpp, pci_vtscsi_backend_set) {
+ if (strcasecmp(value, (*vbpp)->vsb_name) == 0) {
+ backend = *vbpp;
+ break;
+ }
+ }
+ if (backend == NULL) {
+ WPRINTF("No such virtio-scsi backend: %s", value);
+ errno = EINVAL;
+ goto fail;
+ }
+ }
+
+ err = backend->vsb_init(sc, backend, nvl);
+ if (err != 0) {
+ errno = EINVAL;
+ goto fail;
+ }
+
nvl = find_relative_config_node(nvl, "target");
if (nvl != NULL) {
err = walk_config_nodes("", nvl, sc, pci_vtscsi_count_targets);
diff --git a/usr.sbin/bhyve/pci_virtio_scsi_ctl.c b/usr.sbin/bhyve/pci_virtio_scsi_ctl.c
new file mode 100644
--- /dev/null
+++ b/usr.sbin/bhyve/pci_virtio_scsi_ctl.c
@@ -0,0 +1,374 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>.
+ * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>.
+ * Copyright (c) 2026 Hans Rosenfeld
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/linker_set.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/time.h>
+#include <sys/queue.h>
+#include <sys/sbuf.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <pthread.h>
+#include <pthread_np.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <camlib.h>
+
+#include "bhyverun.h"
+#include "config.h"
+#include "debug.h"
+#include "pci_emul.h"
+#include "virtio.h"
+#include "iov.h"
+#include "pci_virtio_scsi.h"
+
+struct vtscsi_ctl_backend {
+ struct pci_vtscsi_backend vcb_backend;
+ int vcb_iid;
+};
+
+static int vtscsi_ctl_init(struct pci_vtscsi_softc *,
+ struct pci_vtscsi_backend *, nvlist_t *);
+static int vtscsi_ctl_open(struct pci_vtscsi_softc *, const char *, long);
+static void vtscsi_ctl_reset(struct pci_vtscsi_softc *);
+
+static void *vtscsi_ctl_req_alloc(struct pci_vtscsi_softc *);
+static void vtscsi_ctl_req_clear(void *);
+static void vtscsi_ctl_req_free(void *);
+
+static void vtscsi_ctl_tmf_hdl(struct pci_vtscsi_softc *, int,
+ struct pci_vtscsi_ctrl_tmf *);
+static void vtscsi_ctl_an_hdl(struct pci_vtscsi_softc *, int,
+ struct pci_vtscsi_ctrl_an *);
+static int vtscsi_ctl_req_hdl(struct pci_vtscsi_softc *, int,
+ struct pci_vtscsi_request *);
+
+static int
+vtscsi_ctl_count_targets(const char *prefix __unused,
+ const nvlist_t *parent __unused, const char *name __unused, int type,
+ void *arg)
+{
+ int *count = arg;
+
+ if (type != NV_TYPE_STRING) {
+ EPRINTLN("invalid target \"%s\" type: not a string", name);
+ errno = EINVAL;
+ return (-1);
+ }
+
+ (*count)++;
+
+ return (0);
+}
+
+static int
+vtscsi_ctl_init(struct pci_vtscsi_softc *sc, struct pci_vtscsi_backend *backend,
+ nvlist_t *nvl)
+{
+ int count = 0;
+ int ret = 0;
+ struct vtscsi_ctl_backend *ctl_backend;
+ const char *value;
+
+ ctl_backend = calloc(1, sizeof(struct vtscsi_ctl_backend));
+ if (ctl_backend == NULL) {
+ EPRINTLN("failed to allocate backend data: %s",
+ strerror(errno));
+ return (-1);
+ }
+
+ ctl_backend->vcb_backend = *backend;
+ sc->vss_backend = &ctl_backend->vcb_backend;
+
+ value = get_config_value_node(nvl, "iid");
+ if (value != NULL)
+ ctl_backend->vcb_iid = strtoul(value, NULL, 10);
+
+ /*
+ * Count configured targets. If no targets were configured, use
+ * /dev/cam/ctl to remain compatible with previous versions.
+ */
+ nvl = find_relative_config_node(nvl, "target");
+ if (nvl != NULL)
+ ret = walk_config_nodes("", nvl, &count,
+ vtscsi_ctl_count_targets);
+
+ if (ret != 0)
+ return (ret);
+
+ if (count == 0)
+ set_config_value_node(nvl, "0", "/dev/cam/ctl");
+
+ return (0);
+}
+
+static int
+vtscsi_ctl_open(struct pci_vtscsi_softc *sc __unused, const char *devname,
+ long target)
+{
+ struct pci_vtscsi_target *tgt = &sc->vss_targets[target];
+
+ tgt->vst_fd = open(devname, O_RDWR);
+ if (tgt->vst_fd < 0)
+ return (-1);
+
+ return (0);
+}
+
+static void
+vtscsi_ctl_reset(struct pci_vtscsi_softc *sc __unused)
+{
+ /*
+ * There doesn't seem to be a limit to the maximum number of
+ * sectors CTL can transfer in one request.
+ */
+ sc->vss_config.max_sectors = INT32_MAX;
+}
+
+static void *vtscsi_ctl_req_alloc(struct pci_vtscsi_softc *sc)
+{
+ struct vtscsi_ctl_backend *ctl = (struct vtscsi_ctl_backend *)sc;
+ union ctl_io *io = ctl_scsi_alloc_io(ctl->vcb_iid);
+
+ if (io != NULL)
+ ctl_scsi_zero_io(io);
+
+ return (io);
+}
+
+static void vtscsi_ctl_req_clear(void *io)
+{
+ ctl_scsi_zero_io(io);
+}
+
+static void vtscsi_ctl_req_free(void *io)
+{
+ ctl_scsi_free_io(io);
+}
+
+static void
+vtscsi_ctl_tmf_hdl(struct pci_vtscsi_softc *sc, int fd,
+ struct pci_vtscsi_ctrl_tmf *tmf)
+{
+ struct vtscsi_ctl_backend *ctl;
+ union ctl_io *io;
+ int err;
+
+ ctl = (struct vtscsi_ctl_backend *)sc->vss_backend;
+
+ io = vtscsi_ctl_req_alloc(sc);
+ if (io == NULL) {
+ tmf->response = VIRTIO_SCSI_S_FAILURE;
+ return;
+ }
+ vtscsi_ctl_req_clear(io);
+
+ io->io_hdr.io_type = CTL_IO_TASK;
+ io->io_hdr.nexus.initid = ctl->vcb_iid;
+ io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(sc, tmf->lun);
+ io->taskio.tag_type = CTL_TAG_SIMPLE;
+ io->taskio.tag_num = tmf->id;
+ io->io_hdr.flags |= CTL_FLAG_USER_TAG;
+
+ switch (tmf->subtype) {
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK:
+ io->taskio.task_action = CTL_TASK_ABORT_TASK;
+ break;
+
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
+ io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
+ break;
+
+ case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
+ io->taskio.task_action = CTL_TASK_CLEAR_ACA;
+ break;
+
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
+ io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET;
+ break;
+
+ case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
+ io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
+ break;
+
+ case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
+ io->taskio.task_action = CTL_TASK_LUN_RESET;
+ break;
+
+ case VIRTIO_SCSI_T_TMF_QUERY_TASK:
+ io->taskio.task_action = CTL_TASK_QUERY_TASK;
+ break;
+
+ case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
+ io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
+ break;
+ }
+
+ if (pci_vtscsi_debug) {
+ struct sbuf *sb = sbuf_new_auto();
+ ctl_io_sbuf(io, sb);
+ sbuf_finish(sb);
+ DPRINTF("%s", sbuf_data(sb));
+ sbuf_delete(sb);
+ }
+
+ err = ioctl(fd, CTL_IO, io);
+ if (err != 0) {
+ WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
+ tmf->response = VIRTIO_SCSI_S_FAILURE;
+ } else {
+ tmf->response = io->taskio.task_status;
+ }
+ vtscsi_ctl_req_free(io);
+}
+
+static void
+vtscsi_ctl_an_hdl(struct pci_vtscsi_softc *sc __unused, int fd __unused,
+ struct pci_vtscsi_ctrl_an *an)
+{
+ an->response = VIRTIO_SCSI_S_FAILURE;
+}
+
+static int
+vtscsi_ctl_req_hdl(struct pci_vtscsi_softc *sc, int fd,
+ struct pci_vtscsi_request *req)
+{
+ union ctl_io *io = req->vsr_backend;
+ void *ext_data_ptr = NULL;
+ uint32_t ext_data_len = 0, ext_sg_entries = 0;
+ struct vtscsi_ctl_backend *ctl;
+ int err, nxferred;
+
+ ctl = (struct vtscsi_ctl_backend *)sc->vss_backend;
+
+ io->io_hdr.nexus.initid = ctl->vcb_iid;
+ io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(sc,
+ req->vsr_cmd_rd->lun);
+
+ io->io_hdr.io_type = CTL_IO_SCSI;
+
+ if (req->vsr_data_niov_in > 0) {
+ ext_data_ptr = (void *)req->vsr_data_iov_in;
+ ext_sg_entries = req->vsr_data_niov_in;
+ ext_data_len = count_iov(req->vsr_data_iov_in,
+ req->vsr_data_niov_in);
+ io->io_hdr.flags |= CTL_FLAG_DATA_OUT;
+ } else if (req->vsr_data_niov_out > 0) {
+ ext_data_ptr = (void *)req->vsr_data_iov_out;
+ ext_sg_entries = req->vsr_data_niov_out;
+ ext_data_len = count_iov(req->vsr_data_iov_out,
+ req->vsr_data_niov_out);
+ io->io_hdr.flags |= CTL_FLAG_DATA_IN;
+ }
+
+ io->scsiio.sense_len = sc->vss_config.sense_size;
+ io->scsiio.tag_num = req->vsr_cmd_rd->id;
+ io->io_hdr.flags |= CTL_FLAG_USER_TAG;
+ switch (req->vsr_cmd_rd->task_attr) {
+ case VIRTIO_SCSI_S_ORDERED:
+ io->scsiio.tag_type = CTL_TAG_ORDERED;
+ break;
+ case VIRTIO_SCSI_S_HEAD:
+ io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
+ break;
+ case VIRTIO_SCSI_S_ACA:
+ io->scsiio.tag_type = CTL_TAG_ACA;
+ break;
+ case VIRTIO_SCSI_S_SIMPLE:
+ default:
+ io->scsiio.tag_type = CTL_TAG_SIMPLE;
+ break;
+ }
+ io->scsiio.ext_sg_entries = ext_sg_entries;
+ io->scsiio.ext_data_ptr = ext_data_ptr;
+ io->scsiio.ext_data_len = ext_data_len;
+ io->scsiio.ext_data_filled = 0;
+ io->scsiio.cdb_len = sc->vss_config.cdb_size;
+ memcpy(io->scsiio.cdb, req->vsr_cmd_rd->cdb, sc->vss_config.cdb_size);
+
+ if (pci_vtscsi_debug) {
+ struct sbuf *sb = sbuf_new_auto();
+ ctl_io_sbuf(io, sb);
+ sbuf_finish(sb);
+ DPRINTF("%s", sbuf_data(sb));
+ sbuf_delete(sb);
+ }
+
+ err = ioctl(fd, CTL_IO, io);
+ if (err != 0) {
+ WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
+ req->vsr_cmd_wr->response = VIRTIO_SCSI_S_FAILURE;
+ } else {
+ req->vsr_cmd_wr->sense_len =
+ MIN(io->scsiio.sense_len, sc->vss_config.sense_size);
+ req->vsr_cmd_wr->residual = ext_data_len -
+ io->scsiio.ext_data_filled;
+ req->vsr_cmd_wr->status = io->scsiio.scsi_status;
+ req->vsr_cmd_wr->response = VIRTIO_SCSI_S_OK;
+ memcpy(&req->vsr_cmd_wr->sense, &io->scsiio.sense_data,
+ req->vsr_cmd_wr->sense_len);
+ }
+
+ nxferred = io->scsiio.ext_data_filled;
+ return (nxferred);
+}
+
+static const struct pci_vtscsi_backend vtscsi_ctl_backend = {
+ .vsb_name = "ctl",
+ .vsb_init = vtscsi_ctl_init,
+ .vsb_open = vtscsi_ctl_open,
+ .vsb_reset = vtscsi_ctl_reset,
+
+ .vsb_req_alloc = vtscsi_ctl_req_alloc,
+ .vsb_req_clear = vtscsi_ctl_req_clear,
+ .vsb_req_free = vtscsi_ctl_req_free,
+
+ .vsb_tmf_hdl = vtscsi_ctl_tmf_hdl,
+ .vsb_an_hdl = vtscsi_ctl_an_hdl,
+ .vsb_req_hdl = vtscsi_ctl_req_hdl
+};
+PCI_VTSCSI_BACKEND_SET(vtscsi_ctl_backend);

File Metadata

Mime Type
text/plain
Expires
Sat, Feb 28, 3:06 PM (3 h, 32 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29072982
Default Alt Text
D53223.id172278.diff (51 KB)

Event Timeline