Page MenuHomeFreeBSD

D54073.id172279.diff
No OneTemporary

D54073.id172279.diff

diff --git a/usr.sbin/bhyve/bhyve.8 b/usr.sbin/bhyve/bhyve.8
--- a/usr.sbin/bhyve/bhyve.8
+++ b/usr.sbin/bhyve/bhyve.8
@@ -722,6 +722,18 @@
A fw_cfg file is used to specify the boot order.
The guest firmware may ignore or not support this fw_cfg file.
In that case, this feature doesn't work as expected.
+.It Li ctl_ringsz= Ns Ar ringsz
+The ring size to use for the control queue.
+.It Li evt_ringsz= Ns Ar ringsz
+The ring size to use for the event queue.
+.It Li req_ringsz= Ns Ar ringsz
+The ring size to use for each I/O request queues.
+.It Li num_queues= Ns Ar num
+The number of I/O request queues to use.
+.It Li seg_max= Ns Ar num
+The maximum number of segments allowed in a single command.
+.It Li thr_per_q= Ns Ar num
+The number of parallel request processing threads per I/O request queue.
.El
.Pp
The
diff --git a/usr.sbin/bhyve/bhyve_config.5 b/usr.sbin/bhyve/bhyve_config.5
--- a/usr.sbin/bhyve/bhyve_config.5
+++ b/usr.sbin/bhyve/bhyve_config.5
@@ -762,10 +762,22 @@
The largest supported MTU advertised to the guest.
.El
.Ss VirtIO SCSI Settings
-.Bl -column "backend" "string" "/dev/cam/ctl"
+.Bl -column "num_queues" "Format" "Default"
.It Sy Name Ta Sy Format Ta Sy Default Ta Sy Description
.It Va backend Ta string Ta ctl Ta
The virtio-scsi backend to use (case-insensitive).
+.It Va ctl_ringsz Ta integer Ta 64 Ta
+The ring size of the control queue.
+.It Va evt_ringsz Ta integer Ta 64 Ta
+The ring size of the event queue.
+.It Va req_ringsz Ta integer Ta 64 Ta
+The ring size of each I/O request queue.
+.It Va num_queues Ta integer Ta 1 Ta
+The number of I/O request queues.
+.It Va seg_max Ta integer Ta 64 Ta
+The maximum number of segments allowed in a single command.
+.It Va thr_per_q Ta integer Ta 16 Ta
+The number of parallel request processing threads per I/O request queue.
.It Va target Ta Oo Va ID : Oc Ns path Ta /dev/cam/ctl Ta
The backend
.Ar path
diff --git a/usr.sbin/bhyve/pci_virtio_scsi.h b/usr.sbin/bhyve/pci_virtio_scsi.h
--- a/usr.sbin/bhyve/pci_virtio_scsi.h
+++ b/usr.sbin/bhyve/pci_virtio_scsi.h
@@ -32,6 +32,8 @@
#ifndef _PCI_VIRTIO_SCSI_H_
#define _PCI_VIRTIO_SCSI_H_
+#include "iov.h"
+
extern int pci_vtscsi_debug;
#define WPRINTF(msg, params...) PRINTLN("virtio-scsi: " msg, ##params)
@@ -41,18 +43,31 @@
#define VIRTIO_SCSI_MAX_CHANNEL 0
#define VIRTIO_SCSI_MAX_TARGET 255
#define VIRTIO_SCSI_MAX_LUN 16383
+#define VIRTIO_SCSI_HDR_SEG 2
+#define VIRTIO_SCSI_ADDL_Q 2
/* Features specific to VirtIO SCSI, none of which we currently support */
#define VIRTIO_SCSI_F_INOUT (1 << 0)
#define VIRTIO_SCSI_F_HOTPLUG (1 << 1)
#define VIRTIO_SCSI_F_CHANGE (1 << 2)
-/* Limits which we set. These should really be configurable. */
-#define VTSCSI_RINGSZ 64
-#define VTSCSI_REQUESTQ 1
-#define VTSCSI_THR_PER_Q 16
-#define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2)
-#define VTSCSI_MAXSEG 64
+/* Default limits which we set. All of these are configurable. */
+#define VTSCSI_DEF_RINGSZ 64
+#define VTSCSI_MIN_RINGSZ 4
+#define VTSCSI_MAX_RINGSZ 4096
+
+#define VTSCSI_DEF_THR_PER_Q 16
+#define VTSCSI_MIN_THR_PER_Q 1
+#define VTSCSI_MAX_THR_PER_Q 256
+
+#define VTSCSI_DEF_MAXSEG 64
+#define VTSCSI_MIN_MAXSEG (VIRTIO_SCSI_HDR_SEG + 1)
+#define VTSCSI_MAX_MAXSEG \
+ (4096 - VIRTIO_SCSI_HDR_SEG - SPLIT_IOV_ADDL_IOV)
+
+#define VTSCSI_DEF_REQUESTQ 1
+#define VTSCSI_MIN_REQUESTQ 1
+#define VTSCSI_MAX_REQUESTQ (32 - VIRTIO_SCSI_ADDL_Q)
/*
* Device-specific config space registers
@@ -91,17 +106,17 @@
* Currently there is only one I/O request queue, but it's trivial to support
* more than one.
*
- * Each pci_vtscsi_queue has VTSCSI_RINGSZ pci_vtscsi_request structures pre-
- * allocated on vsq_free_requests. For each I/O request coming in on the I/O
- * virtqueue, the request queue handler will take a pci_vtscsi_request off
- * vsq_free_requests, fills in the data from the I/O virtqueue, puts it on
- * vsq_requests, and signals vsq_cv.
+ * Each pci_vtscsi_queue has configurable number of pci_vtscsi_request
+ * structures pre-allocated on vsq_free_requests. For each I/O request
+ * coming in on the I/O virtqueue, the request queue handler will take a
+ * pci_vtscsi_request off vsq_free_requests, fills in the data from the
+ * I/O virtqueue, puts it on vsq_requests, and signals vsq_cv.
*
- * There are VTSCSI_THR_PER_Q worker threads for each pci_vtscsi_queue which
- * wait on vsq_cv. When signalled, they repeatedly take one pci_vtscsi_request
- * off vsq_requests, construct a ctl_io for it, and hand it off to the CTL ioctl
- * Interface, which processes it synchronously. After completion of the request,
- * the pci_vtscsi_request is re-initialized and put back onto vsq_free_requests.
+ * Each pci_vtscsi_queue will have a configurable number of worker threads,
+ * which wait on vsq_cv. When signalled, they repeatedly take a single
+ * pci_vtscsi_request off vsq_requests and hand it to the backend, which
+ * processes it synchronously. After completion, the pci_vtscsi_request
+ * is re-initialized and put back onto vsq_free_requests.
*
* The worker threads exit when vsq_cv is signalled after vsw_exiting was set.
*
@@ -109,6 +124,23 @@
* - vsq_rmtx protects vsq_requests and must be held when waiting on vsq_cv
* - vsq_fmtx protects vsq_free_requests
* - vsq_qmtx must be held when operating on the underlying virtqueue, vsq_vq
+ *
+ * The I/O vectors for each request are kept in the preallocated iovec array
+ * vsr_iov, and pointers to the respective header/data in/out portions are set
+ * up to point into the array when the request is queued for processing.
+ *
+ * The number of iovecs preallocated for vsr_iov is derived from the configured
+ * 'seg_max' parameter defined by the virtio spec:
+ * - 'seg_max' parameter specifies the maximum number of I/O data vectors
+ * we support in any request
+ * - we need 2 additional iovecs for the I/O headers (VIRTIO_SCSI_HDR_SEG)
+ * - we need another 2 additional iovecs for split_iov() (SPLIT_IOV_ADDL_IOV)
+ *
+ * The only time we explicitly need the full size of vsr_iov after preallocation
+ * is during re-initialization after completing a request, and implicitly in the
+ * calls to split_iov() the set up the pointers. In all other cases, we use only
+ * 'seg_max' + VIRTIO_SCSI_HDR_SEG, and we advertise only 'seg_max' to the guest
+ * in accordance to the virtio spec.
*/
STAILQ_HEAD(pci_vtscsi_req_queue, pci_vtscsi_request);
@@ -133,8 +165,7 @@
struct pci_vtscsi_request {
struct pci_vtscsi_queue *vsr_queue;
- struct iovec vsr_iov[VTSCSI_MAXSEG +
- SPLIT_IOV_ADDL_IOV];
+ struct iovec *vsr_iov;
struct iovec *vsr_iov_in;
struct iovec *vsr_iov_out;
struct iovec *vsr_data_iov_in;
@@ -164,11 +195,17 @@
*/
struct pci_vtscsi_softc {
struct virtio_softc vss_vs;
- struct vqueue_info vss_vq[VTSCSI_MAXQ];
- struct pci_vtscsi_queue vss_queues[VTSCSI_REQUESTQ];
+ struct virtio_consts vss_vi_consts;
+ struct vqueue_info *vss_vq;
+ struct pci_vtscsi_queue *vss_queues;
pthread_mutex_t vss_mtx;
uint32_t vss_features;
size_t vss_num_target;
+ uint32_t vss_ctl_ringsz;
+ uint32_t vss_evt_ringsz;
+ uint32_t vss_req_ringsz;
+ uint32_t vss_thr_per_q;
+ struct pci_vtscsi_config vss_default_config;
struct pci_vtscsi_config vss_config;
struct pci_vtscsi_target *vss_targets;
struct pci_vtscsi_backend *vss_backend;
diff --git a/usr.sbin/bhyve/pci_virtio_scsi.c b/usr.sbin/bhyve/pci_virtio_scsi.c
--- a/usr.sbin/bhyve/pci_virtio_scsi.c
+++ b/usr.sbin/bhyve/pci_virtio_scsi.c
@@ -120,7 +120,7 @@
static struct virtio_consts vtscsi_vi_consts = {
.vc_name = "vtscsi",
- .vc_nvq = VTSCSI_MAXQ,
+ .vc_nvq = VTSCSI_DEF_REQUESTQ + VIRTIO_SCSI_ADDL_Q,
.vc_cfgsize = sizeof(struct pci_vtscsi_config),
.vc_reset = pci_vtscsi_reset,
.vc_cfgread = pci_vtscsi_cfgread,
@@ -129,6 +129,20 @@
.vc_hv_caps = VIRTIO_RING_F_INDIRECT_DESC,
};
+static const struct pci_vtscsi_config vtscsi_config = {
+ .num_queues = VTSCSI_DEF_REQUESTQ,
+ /* Leave room for the request and the response. */
+ .seg_max = VTSCSI_DEF_MAXSEG - VIRTIO_SCSI_HDR_SEG,
+ .max_sectors = 0,
+ .cmd_per_lun = 1,
+ .event_info_size = sizeof(struct pci_vtscsi_event),
+ .sense_size = 96,
+ .cdb_size = 32,
+ .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
+ .max_target = VIRTIO_SCSI_MAX_TARGET,
+ .max_lun = VIRTIO_SCSI_MAX_LUN
+};
+
int pci_vtscsi_debug = 0;
@@ -199,19 +213,9 @@
vi_reset_dev(&sc->vss_vs);
/* initialize config structure */
- sc->vss_config = (struct pci_vtscsi_config){
- .num_queues = VTSCSI_REQUESTQ,
- /* Leave room for the request and the response. */
- .seg_max = VTSCSI_MAXSEG - 2,
- .max_sectors = 0, /* overridden by backend reset() */
- .cmd_per_lun = 1,
- .event_info_size = sizeof(struct pci_vtscsi_event),
- .sense_size = 96,
- .cdb_size = 32,
- .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
- .max_target = MAX(1, sc->vss_num_target) - 1,
- .max_lun = VIRTIO_SCSI_MAX_LUN
- };
+ sc->vss_config = sc->vss_default_config;
+
+ sc->vss_config.max_target = MAX(1, sc->vss_num_target) - 1;
sc->vss_backend->vsb_reset(sc);
}
@@ -528,7 +532,7 @@
* must guarantee to adhere to the ordering requirements for any TMF
* function which aborts tasks.
*/
- for (int i = 0; i < VTSCSI_REQUESTQ; i++) {
+ for (uint32_t i = 0; i < sc->vss_config.num_queues; i++) {
struct pci_vtscsi_queue *q = &sc->vss_queues[i];
pthread_mutex_lock(&q->vsq_rmtx);
@@ -569,20 +573,21 @@
}
} else {
pci_vtscsi_walk_t ret = PCI_VTSCSI_WALK_CONTINUE;
- int i;
+ uint32_t i;
- for (i = 0;
- i < VTSCSI_REQUESTQ && ret != PCI_VTSCSI_WALK_STOP;
- i++) {
+ for (i = 0; i < sc->vss_config.num_queues; i++) {
struct pci_vtscsi_queue *q = &sc->vss_queues[i];
ret = pci_vtscsi_walk_request_queue(q,
pci_vtscsi_tmf_handler_cb[tmf->subtype], tmf);
+
+ if (ret == PCI_VTSCSI_WALK_STOP)
+ break;
}
}
/* Unlock the request queues before we return. */
- for (int i = 0; i < VTSCSI_REQUESTQ; i++) {
+ for (uint32_t i = 0; i < sc->vss_config.num_queues; i++) {
struct pci_vtscsi_queue *q = &sc->vss_queues[i];
pthread_mutex_unlock(&q->vsq_rmtx);
@@ -655,6 +660,11 @@
if (req == NULL)
goto alloc_fail;
+ req->vsr_iov = calloc(sc->vss_config.seg_max + VIRTIO_SCSI_HDR_SEG +
+ SPLIT_IOV_ADDL_IOV, sizeof(struct iovec));
+ if (req->vsr_iov == NULL)
+ goto alloc_fail;
+
req->vsr_cmd_rd = calloc(1, VTSCSI_IN_HEADER_LEN(sc));
if (req->vsr_cmd_rd == NULL)
goto alloc_fail;
@@ -687,6 +697,8 @@
free(req->vsr_cmd_rd);
if (req->vsr_cmd_wr != NULL)
free(req->vsr_cmd_wr);
+ if (req->vsr_iov != NULL)
+ free(req->vsr_iov);
free(req);
}
@@ -714,18 +726,22 @@
static void
pci_vtscsi_queue_request(struct pci_vtscsi_softc *sc, struct vqueue_info *vq)
{
- struct pci_vtscsi_queue *q = &sc->vss_queues[vq->vq_num - 2];
+ struct pci_vtscsi_queue *q;
struct pci_vtscsi_request *req;
struct vi_req vireq;
- int n;
+ int n, numseg;
+
+ q = &sc->vss_queues[vq->vq_num - VIRTIO_SCSI_ADDL_Q];
pthread_mutex_lock(&q->vsq_fmtx);
req = pci_vtscsi_get_request(&q->vsq_free_requests);
assert(req != NULL);
pthread_mutex_unlock(&q->vsq_fmtx);
- n = vq_getchain(vq, req->vsr_iov, VTSCSI_MAXSEG, &vireq);
- assert(n >= 1 && n <= VTSCSI_MAXSEG);
+ numseg = (int)(sc->vss_config.seg_max + VIRTIO_SCSI_HDR_SEG);
+
+ n = vq_getchain(vq, req->vsr_iov, numseg, &vireq);
+ assert(n >= 1 && n <= numseg);
req->vsr_idx = vireq.idx;
req->vsr_queue = q;
@@ -824,6 +840,7 @@
struct pci_vtscsi_request *req, int iolen)
{
struct pci_vtscsi_softc *sc = q->vsq_sc;
+ void *iov = req->vsr_iov;
void *cmd_rd = req->vsr_cmd_rd;
void *cmd_wr = req->vsr_cmd_wr;
void *backend = req->vsr_backend;
@@ -837,10 +854,13 @@
sc->vss_backend->vsb_req_clear(backend);
+ memset(iov, 0, sizeof(struct iovec) * (sc->vss_config.seg_max +
+ VIRTIO_SCSI_HDR_SEG + SPLIT_IOV_ADDL_IOV));
memset(cmd_rd, 0, VTSCSI_IN_HEADER_LEN(q->vsq_sc));
memset(cmd_wr, 0, VTSCSI_OUT_HEADER_LEN(q->vsq_sc));
memset(req, 0, sizeof(struct pci_vtscsi_request));
+ req->vsr_iov = iov;
req->vsr_cmd_rd = cmd_rd;
req->vsr_cmd_wr = cmd_wr;
req->vsr_backend = backend;
@@ -865,18 +885,17 @@
static void
pci_vtscsi_controlq_notify(void *vsc, struct vqueue_info *vq)
{
- struct pci_vtscsi_softc *sc;
- struct iovec iov[VTSCSI_MAXSEG];
+ struct pci_vtscsi_softc *sc = vsc;
+ int numseg = (int)(sc->vss_config.seg_max + VIRTIO_SCSI_HDR_SEG);
+ struct iovec iov[numseg];
struct vi_req req;
void *buf = NULL;
size_t bufsize;
int n;
- sc = vsc;
-
while (vq_has_descs(vq)) {
- n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &req);
- assert(n >= 1 && n <= VTSCSI_MAXSEG);
+ n = vq_getchain(vq, iov, numseg, &req);
+ assert(n >= 1 && n <= numseg);
bufsize = iov_to_buf(iov, n, &buf);
pci_vtscsi_control_handle(sc, buf, bufsize);
@@ -911,10 +930,10 @@
{
struct pci_vtscsi_worker *workers;
char tname[MAXCOMLEN + 1];
- int i;
+ uint32_t i;
queue->vsq_sc = sc;
- queue->vsq_vq = &sc->vss_vq[num + 2];
+ queue->vsq_vq = &sc->vss_vq[num];
pthread_mutex_init(&queue->vsq_rmtx, NULL);
pthread_mutex_init(&queue->vsq_fmtx, NULL);
@@ -924,7 +943,7 @@
STAILQ_INIT(&queue->vsq_free_requests);
LIST_INIT(&queue->vsq_workers);
- for (i = 0; i < VTSCSI_RINGSZ; i++) {
+ for (i = 0; i < sc->vss_req_ringsz; i++) {
struct pci_vtscsi_request *req;
req = pci_vtscsi_alloc_request(sc);
@@ -934,11 +953,11 @@
pci_vtscsi_put_request(&queue->vsq_free_requests, req);
}
- workers = calloc(VTSCSI_THR_PER_Q, sizeof(struct pci_vtscsi_worker));
+ workers = calloc(sc->vss_thr_per_q, sizeof(struct pci_vtscsi_worker));
if (workers == NULL)
return (-1);
- for (i = 0; i < VTSCSI_THR_PER_Q; i++) {
+ for (i = 0; i < sc->vss_thr_per_q; i++) {
workers[i].vsw_queue = queue;
pthread_create(&workers[i].vsw_thread, NULL, &pci_vtscsi_proc,
@@ -1162,18 +1181,48 @@
return (ret);
}
+static int
+pci_vtscsi_get_config_num(nvlist_t *nvl, const char *name, uint32_t lim_lo,
+ uint32_t lim_hi, uint32_t *res)
+{
+ const char *value;
+ const char *errstr;
+ long long val;
+
+ value = get_config_value_node(nvl, name);
+ if (value == NULL)
+ return (0);
+
+ val = strtonumx(value, lim_lo, lim_hi, &errstr, 0);
+ if (errstr != NULL) {
+ EPRINTLN("Invalid value for %s: %s", name, value);
+ return (-1);
+ }
+
+ *res = (uint32_t)val;
+ return (0);
+}
+
static int
pci_vtscsi_init(struct pci_devinst *pi, nvlist_t *nvl)
{
struct pci_vtscsi_softc *sc;
struct pci_vtscsi_backend *backend, **vbpp;
const char *value;
+ uint32_t val;
int err;
sc = calloc(1, sizeof(struct pci_vtscsi_softc));
if (sc == NULL)
return (-1);
+ sc->vss_vi_consts = vtscsi_vi_consts;
+ sc->vss_ctl_ringsz = VTSCSI_DEF_RINGSZ;
+ sc->vss_evt_ringsz = VTSCSI_DEF_RINGSZ;
+ sc->vss_req_ringsz = VTSCSI_DEF_RINGSZ;
+ sc->vss_thr_per_q = VTSCSI_DEF_THR_PER_Q;
+ sc->vss_default_config = vtscsi_config;
+
value = get_config_value_node(nvl, "bootindex");
if (value != NULL) {
if (pci_emul_add_boot_device(pi, atoi(value))) {
@@ -1183,6 +1232,60 @@
}
}
+ val = vtscsi_config.seg_max;
+ if (pci_vtscsi_get_config_num(nvl, "seg_max", VTSCSI_MIN_MAXSEG,
+ VTSCSI_MAX_MAXSEG, &val) != 0)
+ goto fail;
+ sc->vss_default_config.seg_max = val;
+
+ val = vtscsi_config.num_queues;
+ if (pci_vtscsi_get_config_num(nvl, "num_queues", VTSCSI_MIN_REQUESTQ,
+ VTSCSI_MAX_REQUESTQ, &val) != 0)
+ goto fail;
+ sc->vss_default_config.num_queues = val;
+
+ /*
+ * num_queues is only the number of request queues, but nvq must
+ * account for the control and event queues.
+ */
+ sc->vss_vi_consts.vc_nvq = val + VIRTIO_SCSI_ADDL_Q;
+
+ /*
+ * Allocate queues early, so that they're there for the call to
+ * vi_softc_linkup().
+ */
+ sc->vss_vq = calloc(sc->vss_vi_consts.vc_nvq,
+ sizeof(struct vqueue_info));
+ if (sc->vss_vq == NULL) {
+ EPRINTLN("can't allocate space for %d virtqueues",
+ sc->vss_vi_consts.vc_nvq);
+ goto fail;
+ }
+
+ sc->vss_queues = calloc(sc->vss_default_config.num_queues,
+ sizeof(struct pci_vtscsi_queue));
+ if (sc->vss_queues == NULL) {
+ EPRINTLN("can't allocate space for %d request queues",
+ sc->vss_config.num_queues);
+ goto fail;
+ }
+
+ if (pci_vtscsi_get_config_num(nvl, "ctl_ringsz", VTSCSI_MIN_RINGSZ,
+ VTSCSI_MAX_RINGSZ, &sc->vss_ctl_ringsz) != 0)
+ goto fail;
+
+ if (pci_vtscsi_get_config_num(nvl, "evt_ringsz", VTSCSI_MIN_RINGSZ,
+ VTSCSI_MAX_RINGSZ, &sc->vss_evt_ringsz) != 0)
+ goto fail;
+
+ if (pci_vtscsi_get_config_num(nvl, "req_ringsz", VTSCSI_MIN_RINGSZ,
+ VTSCSI_MAX_RINGSZ, &sc->vss_req_ringsz) != 0)
+ goto fail;
+
+ if (pci_vtscsi_get_config_num(nvl, "thr_per_q", VTSCSI_MIN_THR_PER_Q,
+ VTSCSI_MAX_THR_PER_Q, &sc->vss_thr_per_q) != 0)
+ goto fail;
+
value = get_config_value_node(nvl, "backend");
if (value == NULL) {
if (SET_COUNT(pci_vtscsi_backend_set) == 0) {
@@ -1248,7 +1351,7 @@
pthread_mutex_init(&sc->vss_mtx, NULL);
- vi_softc_linkup(&sc->vss_vs, &vtscsi_vi_consts, sc, pi, sc->vss_vq);
+ vi_softc_linkup(&sc->vss_vs, &sc->vss_vi_consts, sc, pi, sc->vss_vq);
sc->vss_vs.vs_mtx = &sc->vss_mtx;
/*
@@ -1264,20 +1367,22 @@
pci_vtscsi_reset(sc);
pthread_mutex_unlock(&sc->vss_mtx);
- /* controlq */
- sc->vss_vq[0].vq_qsize = VTSCSI_RINGSZ;
+ /* virtqueue 0: control queue */
+ sc->vss_vq[0].vq_qsize = sc->vss_ctl_ringsz;
sc->vss_vq[0].vq_notify = pci_vtscsi_controlq_notify;
- /* eventq */
- sc->vss_vq[1].vq_qsize = VTSCSI_RINGSZ;
+ /* virtqueue 1: event queue */
+ sc->vss_vq[1].vq_qsize = sc->vss_evt_ringsz;
sc->vss_vq[1].vq_notify = pci_vtscsi_eventq_notify;
- /* request queues */
- for (int i = 2; i < VTSCSI_MAXQ; i++) {
- sc->vss_vq[i].vq_qsize = VTSCSI_RINGSZ;
+ /* virtqueue 2-n: request queues */
+ for (int i = VIRTIO_SCSI_ADDL_Q; i < sc->vss_vi_consts.vc_nvq; i++) {
+ int rq = i - VIRTIO_SCSI_ADDL_Q;
+
+ sc->vss_vq[i].vq_qsize = sc->vss_req_ringsz;
sc->vss_vq[i].vq_notify = pci_vtscsi_requestq_notify;
- err = pci_vtscsi_init_queue(sc, &sc->vss_queues[i - 2], i - 2);
+ err = pci_vtscsi_init_queue(sc, &sc->vss_queues[rq], i);
if (err != 0) {
free(sc->vss_targets);
goto fail;

File Metadata

Mime Type
text/plain
Expires
Fri, Mar 13, 2:08 PM (8 h, 24 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29563786
Default Alt Text
D54073.id172279.diff (17 KB)

Event Timeline