Index: usr.sbin/bhyve/pci_virtio_9p.c =================================================================== --- usr.sbin/bhyve/pci_virtio_9p.c +++ usr.sbin/bhyve/pci_virtio_9p.c @@ -197,27 +197,20 @@ struct iovec iov[VT9P_MAX_IOV]; struct pci_vt9p_softc *sc; struct pci_vt9p_request *preq; - uint16_t idx, n, i; + struct vi_req req; uint16_t flags[VT9P_MAX_IOV]; + uint16_t n; sc = vsc; while (vq_has_descs(vq)) { - n = vq_getchain(vq, &idx, iov, VT9P_MAX_IOV, flags); + n = vq_getchain(vq, iov, VT9P_MAX_IOV, &req); preq = calloc(1, sizeof(struct pci_vt9p_request)); preq->vsr_sc = sc; - preq->vsr_idx = idx; + preq->vsr_idx = req.idx; preq->vsr_iov = iov; preq->vsr_niov = n; - preq->vsr_respidx = 0; - - /* Count readable descriptors */ - for (i = 0; i < n; i++) { - if (flags[i] & VRING_DESC_F_WRITE) - break; - - preq->vsr_respidx++; - } + preq->vsr_respidx = req.wr_iov - iov; for (int i = 0; i < n; i++) { DPRINTF(("vt9p: vt9p_notify(): desc%d base=%p, " Index: usr.sbin/bhyve/pci_virtio_block.c =================================================================== --- usr.sbin/bhyve/pci_virtio_block.c +++ usr.sbin/bhyve/pci_virtio_block.c @@ -308,11 +308,11 @@ int err; ssize_t iolen; int writeop, type; + struct vi_req req; struct iovec iov[BLOCKIF_IOV_MAX + 2]; - uint16_t idx, flags[BLOCKIF_IOV_MAX + 2]; struct virtio_blk_discard_write_zeroes *discard; - n = vq_getchain(vq, &idx, iov, BLOCKIF_IOV_MAX + 2, flags); + n = vq_getchain(vq, iov, BLOCKIF_IOV_MAX + 2, &req); /* * The first descriptor will be the read-only fixed header, @@ -324,16 +324,16 @@ */ assert(n >= 2 && n <= BLOCKIF_IOV_MAX + 2); - io = &sc->vbsc_ios[idx]; - assert((flags[0] & VRING_DESC_F_WRITE) == 0); - assert(iov[0].iov_len == sizeof(struct virtio_blk_hdr)); + io = &sc->vbsc_ios[req.idx]; + assert(req.rd_niov != 0); + assert(req.rd_iov[0].iov_len == sizeof(struct virtio_blk_hdr)); vbh = (struct virtio_blk_hdr *)iov[0].iov_base; memcpy(&io->io_req.br_iov, &iov[1], sizeof(struct iovec) * (n - 2)); io->io_req.br_iovcnt = n - 2; io->io_req.br_offset = vbh->vbh_sector * VTBLK_BSIZE; io->io_status = (uint8_t *)iov[--n].iov_base; - assert(iov[n].iov_len == 1); - assert(flags[n] & VRING_DESC_F_WRITE); + assert(req.wr_niov != 0); + assert(req.wr_iov[req.wr_niov - 1].iov_len == 1); /* * XXX @@ -342,16 +342,10 @@ */ type = vbh->vbh_type & ~VBH_FLAG_BARRIER; writeop = (type == VBH_OP_WRITE || type == VBH_OP_DISCARD); + assert(n == (writeop ? req.rd_niov : req.wr_niov)); iolen = 0; for (i = 1; i < n; i++) { - /* - * - write op implies read-only descriptor, - * - read/ident op implies write-only descriptor, - * therefore test the inverse of the descriptor bit - * to the op. - */ - assert(((flags[i] & VRING_DESC_F_WRITE) == 0) == writeop); iolen += iov[i].iov_len; } io->io_req.br_resid = iolen; Index: usr.sbin/bhyve/pci_virtio_console.c =================================================================== --- usr.sbin/bhyve/pci_virtio_console.c +++ usr.sbin/bhyve/pci_virtio_console.c @@ -415,10 +415,10 @@ struct pci_vtcon_port *port; struct pci_vtcon_sock *sock = (struct pci_vtcon_sock *)arg; struct vqueue_info *vq; + struct vi_req req; struct iovec iov; static char dummybuf[2048]; int len, n; - uint16_t idx; port = sock->vss_port; vq = pci_vtcon_port_to_vq(port, true); @@ -441,7 +441,7 @@ } do { - n = vq_getchain(vq, &idx, &iov, 1, NULL); + n = vq_getchain(vq, &iov, 1, &req); len = readv(sock->vss_conn_fd, &iov, n); if (len == 0 || (len < 0 && errno == EWOULDBLOCK)) { @@ -453,7 +453,7 @@ return; } - vq_relchain(vq, idx, len); + vq_relchain(vq, req.idx, len); } while (vq_has_descs(vq)); vq_endchains(vq, 1); @@ -572,8 +572,8 @@ struct pci_vtcon_control *ctrl, const void *payload, size_t len) { struct vqueue_info *vq; + struct vi_req req; struct iovec iov; - uint16_t idx; int n; vq = pci_vtcon_port_to_vq(&sc->vsc_control_port, true); @@ -581,7 +581,7 @@ if (!vq_has_descs(vq)) return; - n = vq_getchain(vq, &idx, &iov, 1, NULL); + n = vq_getchain(vq, &iov, 1, &req); assert(n == 1); @@ -590,7 +590,7 @@ memcpy(iov.iov_base + sizeof(struct pci_vtcon_control), payload, len); - vq_relchain(vq, idx, sizeof(struct pci_vtcon_control) + len); + vq_relchain(vq, req.idx, sizeof(struct pci_vtcon_control) + len); vq_endchains(vq, 1); } @@ -601,14 +601,14 @@ struct pci_vtcon_softc *sc; struct pci_vtcon_port *port; struct iovec iov[1]; - uint16_t idx, n; - uint16_t flags[8]; + struct vi_req req; + uint16_t n; sc = vsc; port = pci_vtcon_vq_to_port(sc, vq); while (vq_has_descs(vq)) { - n = vq_getchain(vq, &idx, iov, 1, flags); + n = vq_getchain(vq, iov, 1, &req); assert(n >= 1); if (port != NULL) port->vsp_cb(port, port->vsp_arg, iov, 1); @@ -616,7 +616,7 @@ /* * Release this chain and handle more */ - vq_relchain(vq, idx, 0); + vq_relchain(vq, req.idx, 0); } vq_endchains(vq, 1); /* Generate interrupt if appropriate. */ } Index: usr.sbin/bhyve/pci_virtio_net.c =================================================================== --- usr.sbin/bhyve/pci_virtio_net.c +++ usr.sbin/bhyve/pci_virtio_net.c @@ -248,6 +248,7 @@ struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS]; struct iovec iov[VTNET_MAXSEGS + 1]; struct vqueue_info *vq; + struct vi_req req; vq = &sc->vsc_queues[VTNET_RXQ]; @@ -288,8 +289,9 @@ riov = iov; n_chains = 0; do { - int n = vq_getchain(vq, &info[n_chains].idx, riov, - VTNET_MAXSEGS - riov_len, NULL); + int n = vq_getchain(vq, riov, VTNET_MAXSEGS - riov_len, + &req); + info[n_chains].idx = req.idx; if (n == 0) { /* @@ -435,7 +437,7 @@ { struct iovec iov[VTNET_MAXSEGS + 1]; struct iovec *siov = iov; - uint16_t idx; + struct vi_req req; ssize_t len; int n; @@ -443,7 +445,7 @@ * Obtain chain of descriptors. The first descriptor also * contains the virtio-net header. */ - n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL); + n = vq_getchain(vq, iov, VTNET_MAXSEGS, &req); assert(n >= 1 && n <= VTNET_MAXSEGS); if (sc->vhdrlen != sc->be_vhdrlen) { @@ -473,7 +475,7 @@ * Return the processed chain to the guest, reporting * the number of bytes that we read. */ - vq_relchain(vq, idx, len); + vq_relchain(vq, req.idx, len); } /* Called on TX kick. */ Index: usr.sbin/bhyve/pci_virtio_rnd.c =================================================================== --- usr.sbin/bhyve/pci_virtio_rnd.c +++ usr.sbin/bhyve/pci_virtio_rnd.c @@ -113,8 +113,8 @@ { struct iovec iov; struct pci_vtrnd_softc *sc; + struct vi_req req; int len; - uint16_t idx; sc = vsc; @@ -124,7 +124,7 @@ } while (vq_has_descs(vq)) { - vq_getchain(vq, &idx, &iov, 1, NULL); + vq_getchain(vq, &iov, 1, &req); len = read(sc->vrsc_fd, iov.iov_base, iov.iov_len); @@ -136,7 +136,7 @@ /* * Release this chain and handle more */ - vq_relchain(vq, idx, len); + vq_relchain(vq, req.idx, len); } vq_endchains(vq, 1); /* Generate interrupt if appropriate. */ } Index: usr.sbin/bhyve/pci_virtio_scsi.c =================================================================== --- usr.sbin/bhyve/pci_virtio_scsi.c +++ usr.sbin/bhyve/pci_virtio_scsi.c @@ -558,7 +558,8 @@ { struct pci_vtscsi_softc *sc; struct iovec iov[VTSCSI_MAXSEG]; - uint16_t idx, n; + struct vi_req req; + uint16_t n; void *buf = NULL; size_t bufsize; int iolen; @@ -566,7 +567,7 @@ sc = vsc; while (vq_has_descs(vq)) { - n = vq_getchain(vq, &idx, iov, VTSCSI_MAXSEG, NULL); + n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &req); bufsize = iov_to_buf(iov, n, &buf); iolen = pci_vtscsi_control_handle(sc, buf, bufsize); buf_to_iov(buf + bufsize - iolen, iolen, iov, n, @@ -575,7 +576,7 @@ /* * Release this chain and handle more */ - vq_relchain(vq, idx, iolen); + vq_relchain(vq, req.idx, iolen); } vq_endchains(vq, 1); /* Generate interrupt if appropriate. */ free(buf); @@ -595,33 +596,23 @@ struct pci_vtscsi_queue *q; struct pci_vtscsi_request *req; struct iovec iov[VTSCSI_MAXSEG]; - uint16_t flags[VTSCSI_MAXSEG]; - uint16_t idx, n, i; - int readable; + struct vi_req vireq; + uint16_t n; sc = vsc; q = &sc->vss_queues[vq->vq_num - 2]; while (vq_has_descs(vq)) { - readable = 0; - n = vq_getchain(vq, &idx, iov, VTSCSI_MAXSEG, flags); - - /* Count readable descriptors */ - for (i = 0; i < n; i++) { - if (flags[i] & VRING_DESC_F_WRITE) - break; - - readable++; - } + n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &vireq); req = calloc(1, sizeof(struct pci_vtscsi_request)); - req->vsr_idx = idx; + req->vsr_idx = vireq.idx; req->vsr_queue = q; - req->vsr_niov_in = readable; - req->vsr_niov_out = n - readable; - memcpy(req->vsr_iov_in, iov, + req->vsr_niov_in = vireq.rd_niov; + req->vsr_niov_out = vireq.wr_niov; + memcpy(req->vsr_iov_in, vireq.rd_iov, req->vsr_niov_in * sizeof(struct iovec)); - memcpy(req->vsr_iov_out, iov + readable, + memcpy(req->vsr_iov_out, vireq.wr_iov, req->vsr_niov_out * sizeof(struct iovec)); pthread_mutex_lock(&q->vsq_mtx); @@ -629,7 +620,8 @@ pthread_cond_signal(&q->vsq_cv); pthread_mutex_unlock(&q->vsq_mtx); - DPRINTF(("virtio-scsi: request enqueued", idx)); + DPRINTF(("virtio-scsi: request enqueuedvi", + vireq.idx)); } } Index: usr.sbin/bhyve/virtio.h =================================================================== --- usr.sbin/bhyve/virtio.h +++ usr.sbin/bhyve/virtio.h @@ -371,6 +371,18 @@ } struct iovec; + +/* + * Request description returned by vq_getchain + */ +struct vi_req { + struct iovec *rd_iov; /* driver-to-device buffer iovec */ + struct iovec *wr_iov; /* device-to-driver buffer iovec */ + int rd_niov; /* num of driver-to-device iovec */ + int wr_niov; /* num of driver-to-device iovec */ + unsigned int idx; /* ring index */ +}; + void vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc, void *dev_softc, struct pci_devinst *pi, struct vqueue_info *queues); @@ -378,8 +390,8 @@ void vi_reset_dev(struct virtio_softc *); void vi_set_io_bar(struct virtio_softc *, int); -int vq_getchain(struct vqueue_info *vq, uint16_t *pidx, - struct iovec *iov, int n_iov, uint16_t *flags); +int vq_getchain(struct vqueue_info *vq, struct iovec *iov, int niov, + struct vi_req *reqp); void vq_retchains(struct vqueue_info *vq, uint16_t n_chains); void vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx, uint32_t iolen); Index: usr.sbin/bhyve/virtio.c =================================================================== --- usr.sbin/bhyve/virtio.c +++ usr.sbin/bhyve/virtio.c @@ -213,15 +213,23 @@ * descriptor. */ static inline void -_vq_record(int i, volatile struct vring_desc *vd, struct vmctx *ctx, - struct iovec *iov, int n_iov, uint16_t *flags) { +_vq_record(struct virtio_softc *vs, int i, volatile struct vring_desc *vd, + struct vmctx *ctx, struct iovec *iov, int n_iov, + struct vi_req *reqp) { if (i >= n_iov) return; iov[i].iov_base = paddr_guest2host(ctx, vd->addr, vd->len); iov[i].iov_len = vd->len; - if (flags != NULL) - flags[i] = vd->flags; + if (vd->flags & VRING_DESC_F_WRITE) { + if (reqp->wr_iov == NULL) + reqp->wr_iov = &iov[i]; + reqp->wr_niov++; + } else { + if (reqp->rd_iov == NULL) + reqp->rd_iov = &iov[i]; + reqp->rd_niov++; + } } #define VQ_MAX_DESCRIPTORS 512 /* see below */ @@ -253,11 +261,6 @@ * a larger iov array if needed, or supply a zero length to find * out how much space is needed). * - * If you want to verify the WRITE flag on each descriptor, pass a - * non-NULL "flags" pointer to an array of "uint16_t" of the same size - * as n_iov and we'll copy each "flags" field after unwinding any - * indirects. - * * If some descriptor(s) are invalid, this prints a diagnostic message * and returns -1. If no descriptors are ready now it simply returns 0. * @@ -265,12 +268,13 @@ * that vq_has_descs() does one). */ int -vq_getchain(struct vqueue_info *vq, uint16_t *pidx, - struct iovec *iov, int n_iov, uint16_t *flags) +vq_getchain(struct vqueue_info *vq, struct iovec *iov, int niov, + struct vi_req *reqp) { int i; u_int ndesc, n_indir; u_int idx, next; + struct vi_req req = { 0 }; volatile struct vring_desc *vdir, *vindir, *vp; struct vmctx *ctx; struct virtio_softc *vs; @@ -313,7 +317,7 @@ * index, but we just abort if the count gets excessive. */ ctx = vs->vs_pi->pi_vmctx; - *pidx = next = vq->vq_avail->ring[idx & (vq->vq_qsize - 1)]; + req.idx = next = vq->vq_avail->ring[idx & (vq->vq_qsize - 1)]; vq->vq_last_avail++; for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->next) { if (next >= vq->vq_qsize) { @@ -325,7 +329,7 @@ } vdir = &vq->vq_desc[next]; if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) { - _vq_record(i, vdir, ctx, iov, n_iov, flags); + _vq_record(vs, i, vdir, ctx, iov, niov, &req); i++; } else if ((vs->vs_vc->vc_hv_caps & VIRTIO_RING_F_INDIRECT_DESC) == 0) { @@ -362,7 +366,7 @@ name); return (-1); } - _vq_record(i, vp, ctx, iov, n_iov, flags); + _vq_record(vs, i, vp, ctx, iov, niov, &req); if (++i > VQ_MAX_DESCRIPTORS) goto loopy; if ((vp->flags & VRING_DESC_F_NEXT) == 0) @@ -378,13 +382,18 @@ } } if ((vdir->flags & VRING_DESC_F_NEXT) == 0) - return (i); + goto done; } + loopy: EPRINTLN( "%s: descriptor loop? count > %d - driver confused?", name, i); return (-1); + +done: + *reqp = req; + return (i); } /*