Index: head/sys/dev/virtio/balloon/virtio_balloon.c =================================================================== --- head/sys/dev/virtio/balloon/virtio_balloon.c (revision 360722) +++ head/sys/dev/virtio/balloon/virtio_balloon.c (revision 360723) @@ -1,564 +1,567 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO memory balloon devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" struct vtballoon_softc { device_t vtballoon_dev; struct mtx vtballoon_mtx; uint64_t vtballoon_features; uint32_t vtballoon_flags; #define VTBALLOON_FLAG_DETACH 0x01 struct virtqueue *vtballoon_inflate_vq; struct virtqueue *vtballoon_deflate_vq; uint32_t vtballoon_desired_npages; uint32_t vtballoon_current_npages; TAILQ_HEAD(,vm_page) vtballoon_pages; struct thread *vtballoon_td; uint32_t *vtballoon_page_frames; int vtballoon_timeout; }; static struct virtio_feature_desc vtballoon_feature_desc[] = { { VIRTIO_BALLOON_F_MUST_TELL_HOST, "MustTellHost" }, { VIRTIO_BALLOON_F_STATS_VQ, "StatsVq" }, { 0, NULL } }; static int vtballoon_probe(device_t); static int vtballoon_attach(device_t); static int vtballoon_detach(device_t); static int vtballoon_config_change(device_t); static void vtballoon_negotiate_features(struct vtballoon_softc *); static int vtballoon_alloc_virtqueues(struct vtballoon_softc *); static void vtballoon_vq_intr(void *); static void vtballoon_inflate(struct vtballoon_softc *, int); static void vtballoon_deflate(struct vtballoon_softc *, int); static void vtballoon_send_page_frames(struct vtballoon_softc *, struct virtqueue *, int); static void vtballoon_pop(struct vtballoon_softc *); static void vtballoon_stop(struct vtballoon_softc *); static vm_page_t vtballoon_alloc_page(struct vtballoon_softc *); static void vtballoon_free_page(struct vtballoon_softc *, vm_page_t); static int vtballoon_sleep(struct vtballoon_softc *); static void vtballoon_thread(void *); static void vtballoon_add_sysctl(struct vtballoon_softc *); /* Features desired/implemented by this driver. */ #define VTBALLOON_FEATURES 0 /* Timeout between retries when the balloon needs inflating. */ #define VTBALLOON_LOWMEM_TIMEOUT hz /* * Maximum number of pages we'll request to inflate or deflate * the balloon in one virtqueue request. Both Linux and NetBSD * have settled on 256, doing up to 1MB at a time. */ #define VTBALLOON_PAGES_PER_REQUEST 256 /* Must be able to fix all pages frames in one page (segment). */ CTASSERT(VTBALLOON_PAGES_PER_REQUEST * sizeof(uint32_t) <= PAGE_SIZE); #define VTBALLOON_MTX(_sc) &(_sc)->vtballoon_mtx #define VTBALLOON_LOCK_INIT(_sc, _name) mtx_init(VTBALLOON_MTX((_sc)), _name, \ "VirtIO Balloon Lock", MTX_DEF) #define VTBALLOON_LOCK(_sc) mtx_lock(VTBALLOON_MTX((_sc))) #define VTBALLOON_UNLOCK(_sc) mtx_unlock(VTBALLOON_MTX((_sc))) #define VTBALLOON_LOCK_DESTROY(_sc) mtx_destroy(VTBALLOON_MTX((_sc))) static device_method_t vtballoon_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtballoon_probe), DEVMETHOD(device_attach, vtballoon_attach), DEVMETHOD(device_detach, vtballoon_detach), /* VirtIO methods. */ DEVMETHOD(virtio_config_change, vtballoon_config_change), DEVMETHOD_END }; static driver_t vtballoon_driver = { "vtballoon", vtballoon_methods, sizeof(struct vtballoon_softc) }; static devclass_t vtballoon_devclass; +DRIVER_MODULE(virtio_balloon, virtio_mmio, vtballoon_driver, + vtballoon_devclass, 0, 0); DRIVER_MODULE(virtio_balloon, virtio_pci, vtballoon_driver, vtballoon_devclass, 0, 0); MODULE_VERSION(virtio_balloon, 1); MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1); VIRTIO_SIMPLE_PNPTABLE(virtio_balloon, VIRTIO_ID_BALLOON, "VirtIO Balloon Adapter"); +VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_balloon); VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_balloon); static int vtballoon_probe(device_t dev) { return (VIRTIO_SIMPLE_PROBE(dev, virtio_balloon)); } static int vtballoon_attach(device_t dev) { struct vtballoon_softc *sc; int error; sc = device_get_softc(dev); sc->vtballoon_dev = dev; VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev)); TAILQ_INIT(&sc->vtballoon_pages); vtballoon_add_sysctl(sc); virtio_set_feature_desc(dev, vtballoon_feature_desc); vtballoon_negotiate_features(sc); sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST * sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtballoon_page_frames == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate page frame request array\n"); goto fail; } error = vtballoon_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_MISC); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); goto fail; } error = kthread_add(vtballoon_thread, sc, NULL, &sc->vtballoon_td, 0, 0, "virtio_balloon"); if (error) { device_printf(dev, "cannot create balloon kthread\n"); goto fail; } virtqueue_enable_intr(sc->vtballoon_inflate_vq); virtqueue_enable_intr(sc->vtballoon_deflate_vq); fail: if (error) vtballoon_detach(dev); return (error); } static int vtballoon_detach(device_t dev) { struct vtballoon_softc *sc; sc = device_get_softc(dev); if (sc->vtballoon_td != NULL) { VTBALLOON_LOCK(sc); sc->vtballoon_flags |= VTBALLOON_FLAG_DETACH; wakeup_one(sc); msleep(sc->vtballoon_td, VTBALLOON_MTX(sc), 0, "vtbdth", 0); VTBALLOON_UNLOCK(sc); sc->vtballoon_td = NULL; } if (device_is_attached(dev)) { vtballoon_pop(sc); vtballoon_stop(sc); } if (sc->vtballoon_page_frames != NULL) { free(sc->vtballoon_page_frames, M_DEVBUF); sc->vtballoon_page_frames = NULL; } VTBALLOON_LOCK_DESTROY(sc); return (0); } static int vtballoon_config_change(device_t dev) { struct vtballoon_softc *sc; sc = device_get_softc(dev); VTBALLOON_LOCK(sc); wakeup_one(sc); VTBALLOON_UNLOCK(sc); return (1); } static void vtballoon_negotiate_features(struct vtballoon_softc *sc) { device_t dev; uint64_t features; dev = sc->vtballoon_dev; features = virtio_negotiate_features(dev, VTBALLOON_FEATURES); sc->vtballoon_features = features; } static int vtballoon_alloc_virtqueues(struct vtballoon_softc *sc) { device_t dev; struct vq_alloc_info vq_info[2]; int nvqs; dev = sc->vtballoon_dev; nvqs = 2; VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtballoon_vq_intr, sc, &sc->vtballoon_inflate_vq, "%s inflate", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtballoon_vq_intr, sc, &sc->vtballoon_deflate_vq, "%s deflate", device_get_nameunit(dev)); return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); } static void vtballoon_vq_intr(void *xsc) { struct vtballoon_softc *sc; sc = xsc; VTBALLOON_LOCK(sc); wakeup_one(sc); VTBALLOON_UNLOCK(sc); } static void vtballoon_inflate(struct vtballoon_softc *sc, int npages) { struct virtqueue *vq; vm_page_t m; int i; vq = sc->vtballoon_inflate_vq; if (npages > VTBALLOON_PAGES_PER_REQUEST) npages = VTBALLOON_PAGES_PER_REQUEST; for (i = 0; i < npages; i++) { if ((m = vtballoon_alloc_page(sc)) == NULL) { sc->vtballoon_timeout = VTBALLOON_LOWMEM_TIMEOUT; break; } sc->vtballoon_page_frames[i] = VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT; KASSERT(m->a.queue == PQ_NONE, ("%s: allocated page %p on queue", __func__, m)); TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, plinks.q); } if (i > 0) vtballoon_send_page_frames(sc, vq, i); } static void vtballoon_deflate(struct vtballoon_softc *sc, int npages) { TAILQ_HEAD(, vm_page) free_pages; struct virtqueue *vq; vm_page_t m; int i; vq = sc->vtballoon_deflate_vq; TAILQ_INIT(&free_pages); if (npages > VTBALLOON_PAGES_PER_REQUEST) npages = VTBALLOON_PAGES_PER_REQUEST; for (i = 0; i < npages; i++) { m = TAILQ_FIRST(&sc->vtballoon_pages); KASSERT(m != NULL, ("%s: no more pages to deflate", __func__)); sc->vtballoon_page_frames[i] = VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT; TAILQ_REMOVE(&sc->vtballoon_pages, m, plinks.q); TAILQ_INSERT_TAIL(&free_pages, m, plinks.q); } if (i > 0) { /* Always tell host first before freeing the pages. */ vtballoon_send_page_frames(sc, vq, i); while ((m = TAILQ_FIRST(&free_pages)) != NULL) { TAILQ_REMOVE(&free_pages, m, plinks.q); vtballoon_free_page(sc, m); } } KASSERT((TAILQ_EMPTY(&sc->vtballoon_pages) && sc->vtballoon_current_npages == 0) || (!TAILQ_EMPTY(&sc->vtballoon_pages) && sc->vtballoon_current_npages != 0), ("%s: bogus page count %d", __func__, sc->vtballoon_current_npages)); } static void vtballoon_send_page_frames(struct vtballoon_softc *sc, struct virtqueue *vq, int npages) { struct sglist sg; struct sglist_seg segs[1]; void *c; int error; sglist_init(&sg, 1, segs); error = sglist_append(&sg, sc->vtballoon_page_frames, npages * sizeof(uint32_t)); KASSERT(error == 0, ("error adding page frames to sglist")); error = virtqueue_enqueue(vq, vq, &sg, 1, 0); KASSERT(error == 0, ("error enqueuing page frames to virtqueue")); virtqueue_notify(vq); /* * Inflate and deflate operations are done synchronously. The * interrupt handler will wake us up. */ VTBALLOON_LOCK(sc); while ((c = virtqueue_dequeue(vq, NULL)) == NULL) msleep(sc, VTBALLOON_MTX(sc), 0, "vtbspf", 0); VTBALLOON_UNLOCK(sc); KASSERT(c == vq, ("unexpected balloon operation response")); } static void vtballoon_pop(struct vtballoon_softc *sc) { while (!TAILQ_EMPTY(&sc->vtballoon_pages)) vtballoon_deflate(sc, sc->vtballoon_current_npages); } static void vtballoon_stop(struct vtballoon_softc *sc) { virtqueue_disable_intr(sc->vtballoon_inflate_vq); virtqueue_disable_intr(sc->vtballoon_deflate_vq); virtio_stop(sc->vtballoon_dev); } static vm_page_t vtballoon_alloc_page(struct vtballoon_softc *sc) { vm_page_t m; m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ); if (m != NULL) sc->vtballoon_current_npages++; return (m); } static void vtballoon_free_page(struct vtballoon_softc *sc, vm_page_t m) { vm_page_free(m); sc->vtballoon_current_npages--; } static uint32_t vtballoon_desired_size(struct vtballoon_softc *sc) { uint32_t desired; desired = virtio_read_dev_config_4(sc->vtballoon_dev, offsetof(struct virtio_balloon_config, num_pages)); return (le32toh(desired)); } static void vtballoon_update_size(struct vtballoon_softc *sc) { virtio_write_dev_config_4(sc->vtballoon_dev, offsetof(struct virtio_balloon_config, actual), htole32(sc->vtballoon_current_npages)); } static int vtballoon_sleep(struct vtballoon_softc *sc) { int rc, timeout; uint32_t current, desired; rc = 0; current = sc->vtballoon_current_npages; VTBALLOON_LOCK(sc); for (;;) { if (sc->vtballoon_flags & VTBALLOON_FLAG_DETACH) { rc = 1; break; } desired = vtballoon_desired_size(sc); sc->vtballoon_desired_npages = desired; /* * If given, use non-zero timeout on the first time through * the loop. On subsequent times, timeout will be zero so * we will reevaluate the desired size of the balloon and * break out to retry if needed. */ timeout = sc->vtballoon_timeout; sc->vtballoon_timeout = 0; if (current > desired) break; if (current < desired && timeout == 0) break; msleep(sc, VTBALLOON_MTX(sc), 0, "vtbslp", timeout); } VTBALLOON_UNLOCK(sc); return (rc); } static void vtballoon_thread(void *xsc) { struct vtballoon_softc *sc; uint32_t current, desired; sc = xsc; for (;;) { if (vtballoon_sleep(sc) != 0) break; current = sc->vtballoon_current_npages; desired = sc->vtballoon_desired_npages; if (desired != current) { if (desired > current) vtballoon_inflate(sc, desired - current); else vtballoon_deflate(sc, current - desired); vtballoon_update_size(sc); } } kthread_exit(); } static void vtballoon_add_sysctl(struct vtballoon_softc *sc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vtballoon_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "desired", CTLFLAG_RD, &sc->vtballoon_desired_npages, sizeof(uint32_t), "Desired balloon size in pages"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "current", CTLFLAG_RD, &sc->vtballoon_current_npages, sizeof(uint32_t), "Current balloon size in pages"); } Index: head/sys/dev/virtio/console/virtio_console.c =================================================================== --- head/sys/dev/virtio/console/virtio_console.c (revision 360722) +++ head/sys/dev/virtio/console/virtio_console.c (revision 360723) @@ -1,1500 +1,1503 @@ /*- * Copyright (c) 2014, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO console devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" #define VTCON_MAX_PORTS 32 #define VTCON_TTY_PREFIX "V" #define VTCON_TTY_ALIAS_PREFIX "vtcon" #define VTCON_BULK_BUFSZ 128 #define VTCON_CTRL_BUFSZ 128 /* * The buffers cannot cross more than one page boundary due to the * size of the sglist segment array used. */ CTASSERT(VTCON_BULK_BUFSZ <= PAGE_SIZE); CTASSERT(VTCON_CTRL_BUFSZ <= PAGE_SIZE); CTASSERT(sizeof(struct virtio_console_config) <= VTCON_CTRL_BUFSZ); struct vtcon_softc; struct vtcon_softc_port; struct vtcon_port { struct mtx vtcport_mtx; struct vtcon_softc *vtcport_sc; struct vtcon_softc_port *vtcport_scport; struct tty *vtcport_tty; struct virtqueue *vtcport_invq; struct virtqueue *vtcport_outvq; int vtcport_id; int vtcport_flags; #define VTCON_PORT_FLAG_GONE 0x01 #define VTCON_PORT_FLAG_CONSOLE 0x02 #define VTCON_PORT_FLAG_ALIAS 0x04 #if defined(KDB) int vtcport_alt_break_state; #endif }; #define VTCON_PORT_LOCK(_port) mtx_lock(&(_port)->vtcport_mtx) #define VTCON_PORT_UNLOCK(_port) mtx_unlock(&(_port)->vtcport_mtx) struct vtcon_softc_port { struct vtcon_softc *vcsp_sc; struct vtcon_port *vcsp_port; struct virtqueue *vcsp_invq; struct virtqueue *vcsp_outvq; }; struct vtcon_softc { device_t vtcon_dev; struct mtx vtcon_mtx; uint64_t vtcon_features; uint32_t vtcon_max_ports; uint32_t vtcon_flags; #define VTCON_FLAG_DETACHED 0x01 #define VTCON_FLAG_SIZE 0x02 #define VTCON_FLAG_MULTIPORT 0x04 /* * Ports can be added and removed during runtime, but we have * to allocate all the virtqueues during attach. This array is * indexed by the port ID. */ struct vtcon_softc_port *vtcon_ports; struct task vtcon_ctrl_task; struct virtqueue *vtcon_ctrl_rxvq; struct virtqueue *vtcon_ctrl_txvq; struct mtx vtcon_ctrl_tx_mtx; }; #define VTCON_LOCK(_sc) mtx_lock(&(_sc)->vtcon_mtx) #define VTCON_UNLOCK(_sc) mtx_unlock(&(_sc)->vtcon_mtx) #define VTCON_LOCK_ASSERT(_sc) \ mtx_assert(&(_sc)->vtcon_mtx, MA_OWNED) #define VTCON_LOCK_ASSERT_NOTOWNED(_sc) \ mtx_assert(&(_sc)->vtcon_mtx, MA_NOTOWNED) #define VTCON_CTRL_TX_LOCK(_sc) mtx_lock(&(_sc)->vtcon_ctrl_tx_mtx) #define VTCON_CTRL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->vtcon_ctrl_tx_mtx) #define VTCON_ASSERT_VALID_PORTID(_sc, _id) \ KASSERT((_id) >= 0 && (_id) < (_sc)->vtcon_max_ports, \ ("%s: port ID %d out of range", __func__, _id)) #define VTCON_FEATURES VIRTIO_CONSOLE_F_MULTIPORT static struct virtio_feature_desc vtcon_feature_desc[] = { { VIRTIO_CONSOLE_F_SIZE, "ConsoleSize" }, { VIRTIO_CONSOLE_F_MULTIPORT, "MultiplePorts" }, { VIRTIO_CONSOLE_F_EMERG_WRITE, "EmergencyWrite" }, { 0, NULL } }; static int vtcon_modevent(module_t, int, void *); static void vtcon_drain_all(void); static int vtcon_probe(device_t); static int vtcon_attach(device_t); static int vtcon_detach(device_t); static int vtcon_config_change(device_t); static void vtcon_setup_features(struct vtcon_softc *); static void vtcon_negotiate_features(struct vtcon_softc *); static int vtcon_alloc_scports(struct vtcon_softc *); static int vtcon_alloc_virtqueues(struct vtcon_softc *); static void vtcon_read_config(struct vtcon_softc *, struct virtio_console_config *); static void vtcon_determine_max_ports(struct vtcon_softc *, struct virtio_console_config *); static void vtcon_destroy_ports(struct vtcon_softc *); static void vtcon_stop(struct vtcon_softc *); static int vtcon_ctrl_event_enqueue(struct vtcon_softc *, struct virtio_console_control *); static int vtcon_ctrl_event_create(struct vtcon_softc *); static void vtcon_ctrl_event_requeue(struct vtcon_softc *, struct virtio_console_control *); static int vtcon_ctrl_event_populate(struct vtcon_softc *); static void vtcon_ctrl_event_drain(struct vtcon_softc *); static int vtcon_ctrl_init(struct vtcon_softc *); static void vtcon_ctrl_deinit(struct vtcon_softc *); static void vtcon_ctrl_port_add_event(struct vtcon_softc *, int); static void vtcon_ctrl_port_remove_event(struct vtcon_softc *, int); static void vtcon_ctrl_port_console_event(struct vtcon_softc *, int); static void vtcon_ctrl_port_open_event(struct vtcon_softc *, int); static void vtcon_ctrl_port_name_event(struct vtcon_softc *, int, const char *, size_t); static void vtcon_ctrl_process_event(struct vtcon_softc *, struct virtio_console_control *, void *, size_t); static void vtcon_ctrl_task_cb(void *, int); static void vtcon_ctrl_event_intr(void *); static void vtcon_ctrl_poll(struct vtcon_softc *, struct virtio_console_control *control); static void vtcon_ctrl_send_control(struct vtcon_softc *, uint32_t, uint16_t, uint16_t); static int vtcon_port_enqueue_buf(struct vtcon_port *, void *, size_t); static int vtcon_port_create_buf(struct vtcon_port *); static void vtcon_port_requeue_buf(struct vtcon_port *, void *); static int vtcon_port_populate(struct vtcon_port *); static void vtcon_port_destroy(struct vtcon_port *); static int vtcon_port_create(struct vtcon_softc *, int); static void vtcon_port_dev_alias(struct vtcon_port *, const char *, size_t); static void vtcon_port_drain_bufs(struct virtqueue *); static void vtcon_port_drain(struct vtcon_port *); static void vtcon_port_teardown(struct vtcon_port *); static void vtcon_port_change_size(struct vtcon_port *, uint16_t, uint16_t); static void vtcon_port_update_console_size(struct vtcon_softc *); static void vtcon_port_enable_intr(struct vtcon_port *); static void vtcon_port_disable_intr(struct vtcon_port *); static void vtcon_port_in(struct vtcon_port *); static void vtcon_port_intr(void *); static void vtcon_port_out(struct vtcon_port *, void *, int); static void vtcon_port_submit_event(struct vtcon_port *, uint16_t, uint16_t); static int vtcon_tty_open(struct tty *); static void vtcon_tty_close(struct tty *); static void vtcon_tty_outwakeup(struct tty *); static void vtcon_tty_free(void *); static void vtcon_get_console_size(struct vtcon_softc *, uint16_t *, uint16_t *); static void vtcon_enable_interrupts(struct vtcon_softc *); static void vtcon_disable_interrupts(struct vtcon_softc *); static int vtcon_pending_free; static struct ttydevsw vtcon_tty_class = { .tsw_flags = 0, .tsw_open = vtcon_tty_open, .tsw_close = vtcon_tty_close, .tsw_outwakeup = vtcon_tty_outwakeup, .tsw_free = vtcon_tty_free, }; static device_method_t vtcon_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtcon_probe), DEVMETHOD(device_attach, vtcon_attach), DEVMETHOD(device_detach, vtcon_detach), /* VirtIO methods. */ DEVMETHOD(virtio_config_change, vtcon_config_change), DEVMETHOD_END }; static driver_t vtcon_driver = { "vtcon", vtcon_methods, sizeof(struct vtcon_softc) }; static devclass_t vtcon_devclass; +DRIVER_MODULE(virtio_console, virtio_mmio, vtcon_driver, vtcon_devclass, + vtcon_modevent, 0); DRIVER_MODULE(virtio_console, virtio_pci, vtcon_driver, vtcon_devclass, vtcon_modevent, 0); MODULE_VERSION(virtio_console, 1); MODULE_DEPEND(virtio_console, virtio, 1, 1, 1); VIRTIO_SIMPLE_PNPTABLE(virtio_console, VIRTIO_ID_CONSOLE, "VirtIO Console Adapter"); +VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_console); VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_console); static int vtcon_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: error = 0; break; case MOD_QUIESCE: error = 0; break; case MOD_UNLOAD: vtcon_drain_all(); error = 0; break; case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static void vtcon_drain_all(void) { int first; for (first = 1; vtcon_pending_free != 0; first = 0) { if (first != 0) { printf("virtio_console: Waiting for all detached TTY " "devices to have open fds closed.\n"); } pause("vtcondra", hz); } } static int vtcon_probe(device_t dev) { return (VIRTIO_SIMPLE_PROBE(dev, virtio_console)); } static int vtcon_attach(device_t dev) { struct vtcon_softc *sc; struct virtio_console_config concfg; int error; sc = device_get_softc(dev); sc->vtcon_dev = dev; mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF); mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF); virtio_set_feature_desc(dev, vtcon_feature_desc); vtcon_setup_features(sc); vtcon_read_config(sc, &concfg); vtcon_determine_max_ports(sc, &concfg); error = vtcon_alloc_scports(sc); if (error) { device_printf(dev, "cannot allocate softc port structures\n"); goto fail; } error = vtcon_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) { TASK_INIT(&sc->vtcon_ctrl_task, 0, vtcon_ctrl_task_cb, sc); error = vtcon_ctrl_init(sc); if (error) goto fail; } else { error = vtcon_port_create(sc, 0); if (error) goto fail; if (sc->vtcon_flags & VTCON_FLAG_SIZE) vtcon_port_update_console_size(sc); } error = virtio_setup_intr(dev, INTR_TYPE_TTY); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); goto fail; } vtcon_enable_interrupts(sc); vtcon_ctrl_send_control(sc, VIRTIO_CONSOLE_BAD_ID, VIRTIO_CONSOLE_DEVICE_READY, 1); fail: if (error) vtcon_detach(dev); return (error); } static int vtcon_detach(device_t dev) { struct vtcon_softc *sc; sc = device_get_softc(dev); VTCON_LOCK(sc); sc->vtcon_flags |= VTCON_FLAG_DETACHED; if (device_is_attached(dev)) vtcon_stop(sc); VTCON_UNLOCK(sc); if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) { taskqueue_drain(taskqueue_thread, &sc->vtcon_ctrl_task); vtcon_ctrl_deinit(sc); } vtcon_destroy_ports(sc); mtx_destroy(&sc->vtcon_mtx); mtx_destroy(&sc->vtcon_ctrl_tx_mtx); return (0); } static int vtcon_config_change(device_t dev) { struct vtcon_softc *sc; sc = device_get_softc(dev); /* * When the multiport feature is negotiated, all configuration * changes are done through control virtqueue events. */ if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0) { if (sc->vtcon_flags & VTCON_FLAG_SIZE) vtcon_port_update_console_size(sc); } return (0); } static void vtcon_negotiate_features(struct vtcon_softc *sc) { device_t dev; uint64_t features; dev = sc->vtcon_dev; features = VTCON_FEATURES; sc->vtcon_features = virtio_negotiate_features(dev, features); } static void vtcon_setup_features(struct vtcon_softc *sc) { device_t dev; dev = sc->vtcon_dev; vtcon_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE)) sc->vtcon_flags |= VTCON_FLAG_SIZE; if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT)) sc->vtcon_flags |= VTCON_FLAG_MULTIPORT; } #define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg) \ if (virtio_with_feature(_dev, _feature)) { \ virtio_read_device_config(_dev, \ offsetof(struct virtio_console_config, _field), \ &(_cfg)->_field, sizeof((_cfg)->_field)); \ } static void vtcon_read_config(struct vtcon_softc *sc, struct virtio_console_config *concfg) { device_t dev; dev = sc->vtcon_dev; bzero(concfg, sizeof(struct virtio_console_config)); VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_SIZE, cols, concfg); VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_SIZE, rows, concfg); VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_MULTIPORT, max_nr_ports, concfg); } #undef VTCON_GET_CONFIG static int vtcon_alloc_scports(struct vtcon_softc *sc) { struct vtcon_softc_port *scport; int max, i; max = sc->vtcon_max_ports; sc->vtcon_ports = malloc(sizeof(struct vtcon_softc_port) * max, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtcon_ports == NULL) return (ENOMEM); for (i = 0; i < max; i++) { scport = &sc->vtcon_ports[i]; scport->vcsp_sc = sc; } return (0); } static int vtcon_alloc_virtqueues(struct vtcon_softc *sc) { device_t dev; struct vq_alloc_info *info; struct vtcon_softc_port *scport; int i, idx, portidx, nvqs, error; dev = sc->vtcon_dev; nvqs = sc->vtcon_max_ports * 2; if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) nvqs += 2; info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT); if (info == NULL) return (ENOMEM); for (i = 0, idx = 0, portidx = 0; i < nvqs / 2; i++, idx += 2) { if (i == 1) { /* The control virtqueues are after the first port. */ VQ_ALLOC_INFO_INIT(&info[idx], 0, vtcon_ctrl_event_intr, sc, &sc->vtcon_ctrl_rxvq, "%s-control rx", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, sc, &sc->vtcon_ctrl_txvq, "%s-control tx", device_get_nameunit(dev)); continue; } scport = &sc->vtcon_ports[portidx]; VQ_ALLOC_INFO_INIT(&info[idx], 0, vtcon_port_intr, scport, &scport->vcsp_invq, "%s-port%d in", device_get_nameunit(dev), i); VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, NULL, &scport->vcsp_outvq, "%s-port%d out", device_get_nameunit(dev), i); portidx++; } error = virtio_alloc_virtqueues(dev, 0, nvqs, info); free(info, M_TEMP); return (error); } static void vtcon_determine_max_ports(struct vtcon_softc *sc, struct virtio_console_config *concfg) { if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) { sc->vtcon_max_ports = min(concfg->max_nr_ports, VTCON_MAX_PORTS); if (sc->vtcon_max_ports == 0) sc->vtcon_max_ports = 1; } else sc->vtcon_max_ports = 1; } static void vtcon_destroy_ports(struct vtcon_softc *sc) { struct vtcon_softc_port *scport; struct vtcon_port *port; struct virtqueue *vq; int i; if (sc->vtcon_ports == NULL) return; VTCON_LOCK(sc); for (i = 0; i < sc->vtcon_max_ports; i++) { scport = &sc->vtcon_ports[i]; port = scport->vcsp_port; if (port != NULL) { scport->vcsp_port = NULL; VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); vtcon_port_teardown(port); VTCON_LOCK(sc); } vq = scport->vcsp_invq; if (vq != NULL) vtcon_port_drain_bufs(vq); } VTCON_UNLOCK(sc); free(sc->vtcon_ports, M_DEVBUF); sc->vtcon_ports = NULL; } static void vtcon_stop(struct vtcon_softc *sc) { vtcon_disable_interrupts(sc); virtio_stop(sc->vtcon_dev); } static int vtcon_ctrl_event_enqueue(struct vtcon_softc *sc, struct virtio_console_control *control) { struct sglist_seg segs[2]; struct sglist sg; struct virtqueue *vq; int error; vq = sc->vtcon_ctrl_rxvq; sglist_init(&sg, 2, segs); error = sglist_append(&sg, control, VTCON_CTRL_BUFSZ); KASSERT(error == 0, ("%s: error %d adding control to sglist", __func__, error)); return (virtqueue_enqueue(vq, control, &sg, 0, sg.sg_nseg)); } static int vtcon_ctrl_event_create(struct vtcon_softc *sc) { struct virtio_console_control *control; int error; control = malloc(VTCON_CTRL_BUFSZ, M_DEVBUF, M_ZERO | M_NOWAIT); if (control == NULL) return (ENOMEM); error = vtcon_ctrl_event_enqueue(sc, control); if (error) free(control, M_DEVBUF); return (error); } static void vtcon_ctrl_event_requeue(struct vtcon_softc *sc, struct virtio_console_control *control) { int error; bzero(control, VTCON_CTRL_BUFSZ); error = vtcon_ctrl_event_enqueue(sc, control); KASSERT(error == 0, ("%s: cannot requeue control buffer %d", __func__, error)); } static int vtcon_ctrl_event_populate(struct vtcon_softc *sc) { struct virtqueue *vq; int nbufs, error; vq = sc->vtcon_ctrl_rxvq; error = ENOSPC; for (nbufs = 0; !virtqueue_full(vq); nbufs++) { error = vtcon_ctrl_event_create(sc); if (error) break; } if (nbufs > 0) { virtqueue_notify(vq); error = 0; } return (error); } static void vtcon_ctrl_event_drain(struct vtcon_softc *sc) { struct virtio_console_control *control; struct virtqueue *vq; int last; vq = sc->vtcon_ctrl_rxvq; last = 0; if (vq == NULL) return; VTCON_LOCK(sc); while ((control = virtqueue_drain(vq, &last)) != NULL) free(control, M_DEVBUF); VTCON_UNLOCK(sc); } static int vtcon_ctrl_init(struct vtcon_softc *sc) { int error; error = vtcon_ctrl_event_populate(sc); return (error); } static void vtcon_ctrl_deinit(struct vtcon_softc *sc) { vtcon_ctrl_event_drain(sc); } static void vtcon_ctrl_port_add_event(struct vtcon_softc *sc, int id) { device_t dev; int error; dev = sc->vtcon_dev; /* This single thread only way for ports to be created. */ if (sc->vtcon_ports[id].vcsp_port != NULL) { device_printf(dev, "%s: adding port %d, but already exists\n", __func__, id); return; } error = vtcon_port_create(sc, id); if (error) { device_printf(dev, "%s: cannot create port %d: %d\n", __func__, id, error); vtcon_ctrl_send_control(sc, id, VIRTIO_CONSOLE_PORT_READY, 0); return; } } static void vtcon_ctrl_port_remove_event(struct vtcon_softc *sc, int id) { device_t dev; struct vtcon_softc_port *scport; struct vtcon_port *port; dev = sc->vtcon_dev; scport = &sc->vtcon_ports[id]; VTCON_LOCK(sc); port = scport->vcsp_port; if (port == NULL) { VTCON_UNLOCK(sc); device_printf(dev, "%s: remove port %d, but does not exist\n", __func__, id); return; } scport->vcsp_port = NULL; VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); vtcon_port_teardown(port); } static void vtcon_ctrl_port_console_event(struct vtcon_softc *sc, int id) { device_t dev; struct vtcon_softc_port *scport; struct vtcon_port *port; dev = sc->vtcon_dev; scport = &sc->vtcon_ports[id]; VTCON_LOCK(sc); port = scport->vcsp_port; if (port == NULL) { VTCON_UNLOCK(sc); device_printf(dev, "%s: console port %d, but does not exist\n", __func__, id); return; } VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); port->vtcport_flags |= VTCON_PORT_FLAG_CONSOLE; vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 1); VTCON_PORT_UNLOCK(port); } static void vtcon_ctrl_port_open_event(struct vtcon_softc *sc, int id) { device_t dev; struct vtcon_softc_port *scport; struct vtcon_port *port; dev = sc->vtcon_dev; scport = &sc->vtcon_ports[id]; VTCON_LOCK(sc); port = scport->vcsp_port; if (port == NULL) { VTCON_UNLOCK(sc); device_printf(dev, "%s: open port %d, but does not exist\n", __func__, id); return; } VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); vtcon_port_enable_intr(port); VTCON_PORT_UNLOCK(port); } static void vtcon_ctrl_port_name_event(struct vtcon_softc *sc, int id, const char *name, size_t len) { device_t dev; struct vtcon_softc_port *scport; struct vtcon_port *port; dev = sc->vtcon_dev; scport = &sc->vtcon_ports[id]; /* * The VirtIO specification says the NUL terminator is not included in * the length, but QEMU includes it. Adjust the length if needed. */ if (name == NULL || len == 0) return; if (name[len - 1] == '\0') { len--; if (len == 0) return; } VTCON_LOCK(sc); port = scport->vcsp_port; if (port == NULL) { VTCON_UNLOCK(sc); device_printf(dev, "%s: name port %d, but does not exist\n", __func__, id); return; } VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); vtcon_port_dev_alias(port, name, len); VTCON_PORT_UNLOCK(port); } static void vtcon_ctrl_process_event(struct vtcon_softc *sc, struct virtio_console_control *control, void *data, size_t data_len) { device_t dev; int id; dev = sc->vtcon_dev; id = control->id; if (id < 0 || id >= sc->vtcon_max_ports) { device_printf(dev, "%s: invalid port ID %d\n", __func__, id); return; } switch (control->event) { case VIRTIO_CONSOLE_PORT_ADD: vtcon_ctrl_port_add_event(sc, id); break; case VIRTIO_CONSOLE_PORT_REMOVE: vtcon_ctrl_port_remove_event(sc, id); break; case VIRTIO_CONSOLE_CONSOLE_PORT: vtcon_ctrl_port_console_event(sc, id); break; case VIRTIO_CONSOLE_RESIZE: break; case VIRTIO_CONSOLE_PORT_OPEN: vtcon_ctrl_port_open_event(sc, id); break; case VIRTIO_CONSOLE_PORT_NAME: vtcon_ctrl_port_name_event(sc, id, (const char *)data, data_len); break; } } static void vtcon_ctrl_task_cb(void *xsc, int pending) { struct vtcon_softc *sc; struct virtqueue *vq; struct virtio_console_control *control; void *data; size_t data_len; int detached; uint32_t len; sc = xsc; vq = sc->vtcon_ctrl_rxvq; VTCON_LOCK(sc); while ((detached = (sc->vtcon_flags & VTCON_FLAG_DETACHED)) == 0) { control = virtqueue_dequeue(vq, &len); if (control == NULL) break; if (len > sizeof(struct virtio_console_control)) { data = (void *) &control[1]; data_len = len - sizeof(struct virtio_console_control); } else { data = NULL; data_len = 0; } VTCON_UNLOCK(sc); vtcon_ctrl_process_event(sc, control, data, data_len); VTCON_LOCK(sc); vtcon_ctrl_event_requeue(sc, control); } if (!detached) { virtqueue_notify(vq); if (virtqueue_enable_intr(vq) != 0) taskqueue_enqueue(taskqueue_thread, &sc->vtcon_ctrl_task); } VTCON_UNLOCK(sc); } static void vtcon_ctrl_event_intr(void *xsc) { struct vtcon_softc *sc; sc = xsc; /* * Only some events require us to potentially block, but it * easier to just defer all event handling to the taskqueue. */ taskqueue_enqueue(taskqueue_thread, &sc->vtcon_ctrl_task); } static void vtcon_ctrl_poll(struct vtcon_softc *sc, struct virtio_console_control *control) { struct sglist_seg segs[2]; struct sglist sg; struct virtqueue *vq; int error; vq = sc->vtcon_ctrl_txvq; sglist_init(&sg, 2, segs); error = sglist_append(&sg, control, sizeof(struct virtio_console_control)); KASSERT(error == 0, ("%s: error %d adding control to sglist", __func__, error)); /* * We cannot use the softc lock to serialize access to this * virtqueue since this is called from the tty layer with the * port lock held. Acquiring the softc would violate our lock * ordering. */ VTCON_CTRL_TX_LOCK(sc); KASSERT(virtqueue_empty(vq), ("%s: virtqueue is not emtpy", __func__)); error = virtqueue_enqueue(vq, control, &sg, sg.sg_nseg, 0); if (error == 0) { virtqueue_notify(vq); virtqueue_poll(vq, NULL); } VTCON_CTRL_TX_UNLOCK(sc); } static void vtcon_ctrl_send_control(struct vtcon_softc *sc, uint32_t portid, uint16_t event, uint16_t value) { struct virtio_console_control control; if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0) return; control.id = portid; control.event = event; control.value = value; vtcon_ctrl_poll(sc, &control); } static int vtcon_port_enqueue_buf(struct vtcon_port *port, void *buf, size_t len) { struct sglist_seg segs[2]; struct sglist sg; struct virtqueue *vq; int error; vq = port->vtcport_invq; sglist_init(&sg, 2, segs); error = sglist_append(&sg, buf, len); KASSERT(error == 0, ("%s: error %d adding buffer to sglist", __func__, error)); error = virtqueue_enqueue(vq, buf, &sg, 0, sg.sg_nseg); return (error); } static int vtcon_port_create_buf(struct vtcon_port *port) { void *buf; int error; buf = malloc(VTCON_BULK_BUFSZ, M_DEVBUF, M_ZERO | M_NOWAIT); if (buf == NULL) return (ENOMEM); error = vtcon_port_enqueue_buf(port, buf, VTCON_BULK_BUFSZ); if (error) free(buf, M_DEVBUF); return (error); } static void vtcon_port_requeue_buf(struct vtcon_port *port, void *buf) { int error; error = vtcon_port_enqueue_buf(port, buf, VTCON_BULK_BUFSZ); KASSERT(error == 0, ("%s: cannot requeue input buffer %d", __func__, error)); } static int vtcon_port_populate(struct vtcon_port *port) { struct virtqueue *vq; int nbufs, error; vq = port->vtcport_invq; error = ENOSPC; for (nbufs = 0; !virtqueue_full(vq); nbufs++) { error = vtcon_port_create_buf(port); if (error) break; } if (nbufs > 0) { virtqueue_notify(vq); error = 0; } return (error); } static void vtcon_port_destroy(struct vtcon_port *port) { port->vtcport_sc = NULL; port->vtcport_scport = NULL; port->vtcport_invq = NULL; port->vtcport_outvq = NULL; port->vtcport_id = -1; mtx_destroy(&port->vtcport_mtx); free(port, M_DEVBUF); } static int vtcon_port_init_vqs(struct vtcon_port *port) { struct vtcon_softc_port *scport; int error; scport = port->vtcport_scport; port->vtcport_invq = scport->vcsp_invq; port->vtcport_outvq = scport->vcsp_outvq; /* * Free any data left over from when this virtqueue was in use by a * prior port. We have not yet notified the host that the port is * ready, so assume nothing in the virtqueue can be for us. */ vtcon_port_drain(port); KASSERT(virtqueue_empty(port->vtcport_invq), ("%s: in virtqueue is not empty", __func__)); KASSERT(virtqueue_empty(port->vtcport_outvq), ("%s: out virtqueue is not empty", __func__)); error = vtcon_port_populate(port); if (error) return (error); return (0); } static int vtcon_port_create(struct vtcon_softc *sc, int id) { device_t dev; struct vtcon_softc_port *scport; struct vtcon_port *port; int error; dev = sc->vtcon_dev; scport = &sc->vtcon_ports[id]; VTCON_ASSERT_VALID_PORTID(sc, id); MPASS(scport->vcsp_port == NULL); port = malloc(sizeof(struct vtcon_port), M_DEVBUF, M_NOWAIT | M_ZERO); if (port == NULL) return (ENOMEM); port->vtcport_sc = sc; port->vtcport_scport = scport; port->vtcport_id = id; mtx_init(&port->vtcport_mtx, "vtcpmtx", NULL, MTX_DEF); port->vtcport_tty = tty_alloc_mutex(&vtcon_tty_class, port, &port->vtcport_mtx); error = vtcon_port_init_vqs(port); if (error) { VTCON_PORT_LOCK(port); vtcon_port_teardown(port); return (error); } VTCON_LOCK(sc); VTCON_PORT_LOCK(port); scport->vcsp_port = port; vtcon_port_enable_intr(port); vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_READY, 1); VTCON_PORT_UNLOCK(port); VTCON_UNLOCK(sc); tty_makedev(port->vtcport_tty, NULL, "%s%r.%r", VTCON_TTY_PREFIX, device_get_unit(dev), id); return (0); } static void vtcon_port_dev_alias(struct vtcon_port *port, const char *name, size_t len) { struct vtcon_softc *sc; struct cdev *pdev; struct tty *tp; int i, error; sc = port->vtcport_sc; tp = port->vtcport_tty; if (port->vtcport_flags & VTCON_PORT_FLAG_ALIAS) return; /* Port name is UTF-8, but we can only handle ASCII. */ for (i = 0; i < len; i++) { if (!isascii(name[i])) return; } /* * Port name may not conform to the devfs requirements so we cannot use * tty_makealias() because the MAKEDEV_CHECKNAME flag must be specified. */ error = make_dev_alias_p(MAKEDEV_NOWAIT | MAKEDEV_CHECKNAME, &pdev, tp->t_dev, "%s/%*s", VTCON_TTY_ALIAS_PREFIX, (int)len, name); if (error) { device_printf(sc->vtcon_dev, "%s: cannot make dev alias (%s/%*s) error %d\n", __func__, VTCON_TTY_ALIAS_PREFIX, (int)len, name, error); } else port->vtcport_flags |= VTCON_PORT_FLAG_ALIAS; } static void vtcon_port_drain_bufs(struct virtqueue *vq) { void *buf; int last; last = 0; while ((buf = virtqueue_drain(vq, &last)) != NULL) free(buf, M_DEVBUF); } static void vtcon_port_drain(struct vtcon_port *port) { vtcon_port_drain_bufs(port->vtcport_invq); } static void vtcon_port_teardown(struct vtcon_port *port) { struct tty *tp; tp = port->vtcport_tty; port->vtcport_flags |= VTCON_PORT_FLAG_GONE; if (tp != NULL) { atomic_add_int(&vtcon_pending_free, 1); tty_rel_gone(tp); } else vtcon_port_destroy(port); } static void vtcon_port_change_size(struct vtcon_port *port, uint16_t cols, uint16_t rows) { struct tty *tp; struct winsize sz; tp = port->vtcport_tty; if (tp == NULL) return; bzero(&sz, sizeof(struct winsize)); sz.ws_col = cols; sz.ws_row = rows; tty_set_winsize(tp, &sz); } static void vtcon_port_update_console_size(struct vtcon_softc *sc) { struct vtcon_port *port; struct vtcon_softc_port *scport; uint16_t cols, rows; vtcon_get_console_size(sc, &cols, &rows); /* * For now, assume the first (only) port is the console. Note * QEMU does not implement this feature yet. */ scport = &sc->vtcon_ports[0]; VTCON_LOCK(sc); port = scport->vcsp_port; if (port != NULL) { VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); vtcon_port_change_size(port, cols, rows); VTCON_PORT_UNLOCK(port); } else VTCON_UNLOCK(sc); } static void vtcon_port_enable_intr(struct vtcon_port *port) { /* * NOTE: The out virtqueue is always polled, so its interrupt * kept disabled. */ virtqueue_enable_intr(port->vtcport_invq); } static void vtcon_port_disable_intr(struct vtcon_port *port) { if (port->vtcport_invq != NULL) virtqueue_disable_intr(port->vtcport_invq); if (port->vtcport_outvq != NULL) virtqueue_disable_intr(port->vtcport_outvq); } static void vtcon_port_in(struct vtcon_port *port) { struct virtqueue *vq; struct tty *tp; char *buf; uint32_t len; int i, deq; tp = port->vtcport_tty; vq = port->vtcport_invq; again: deq = 0; while ((buf = virtqueue_dequeue(vq, &len)) != NULL) { for (i = 0; i < len; i++) { #if defined(KDB) if (port->vtcport_flags & VTCON_PORT_FLAG_CONSOLE) kdb_alt_break(buf[i], &port->vtcport_alt_break_state); #endif ttydisc_rint(tp, buf[i], 0); } vtcon_port_requeue_buf(port, buf); deq++; } ttydisc_rint_done(tp); if (deq > 0) virtqueue_notify(vq); if (virtqueue_enable_intr(vq) != 0) goto again; } static void vtcon_port_intr(void *scportx) { struct vtcon_softc_port *scport; struct vtcon_softc *sc; struct vtcon_port *port; scport = scportx; sc = scport->vcsp_sc; VTCON_LOCK(sc); port = scport->vcsp_port; if (port == NULL) { VTCON_UNLOCK(sc); return; } VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); if ((port->vtcport_flags & VTCON_PORT_FLAG_GONE) == 0) vtcon_port_in(port); VTCON_PORT_UNLOCK(port); } static void vtcon_port_out(struct vtcon_port *port, void *buf, int bufsize) { struct sglist_seg segs[2]; struct sglist sg; struct virtqueue *vq; int error; vq = port->vtcport_outvq; KASSERT(virtqueue_empty(vq), ("%s: port %p out virtqueue not emtpy", __func__, port)); sglist_init(&sg, 2, segs); error = sglist_append(&sg, buf, bufsize); KASSERT(error == 0, ("%s: error %d adding buffer to sglist", __func__, error)); error = virtqueue_enqueue(vq, buf, &sg, sg.sg_nseg, 0); if (error == 0) { virtqueue_notify(vq); virtqueue_poll(vq, NULL); } } static void vtcon_port_submit_event(struct vtcon_port *port, uint16_t event, uint16_t value) { struct vtcon_softc *sc; sc = port->vtcport_sc; vtcon_ctrl_send_control(sc, port->vtcport_id, event, value); } static int vtcon_tty_open(struct tty *tp) { struct vtcon_port *port; port = tty_softc(tp); if (port->vtcport_flags & VTCON_PORT_FLAG_GONE) return (ENXIO); vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 1); return (0); } static void vtcon_tty_close(struct tty *tp) { struct vtcon_port *port; port = tty_softc(tp); if (port->vtcport_flags & VTCON_PORT_FLAG_GONE) return; vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 0); } static void vtcon_tty_outwakeup(struct tty *tp) { struct vtcon_port *port; char buf[VTCON_BULK_BUFSZ]; int len; port = tty_softc(tp); if (port->vtcport_flags & VTCON_PORT_FLAG_GONE) return; while ((len = ttydisc_getc(tp, buf, sizeof(buf))) != 0) vtcon_port_out(port, buf, len); } static void vtcon_tty_free(void *xport) { struct vtcon_port *port; port = xport; vtcon_port_destroy(port); atomic_subtract_int(&vtcon_pending_free, 1); } static void vtcon_get_console_size(struct vtcon_softc *sc, uint16_t *cols, uint16_t *rows) { struct virtio_console_config concfg; KASSERT(sc->vtcon_flags & VTCON_FLAG_SIZE, ("%s: size feature not negotiated", __func__)); vtcon_read_config(sc, &concfg); *cols = concfg.cols; *rows = concfg.rows; } static void vtcon_enable_interrupts(struct vtcon_softc *sc) { struct vtcon_softc_port *scport; struct vtcon_port *port; int i; VTCON_LOCK(sc); if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) virtqueue_enable_intr(sc->vtcon_ctrl_rxvq); for (i = 0; i < sc->vtcon_max_ports; i++) { scport = &sc->vtcon_ports[i]; port = scport->vcsp_port; if (port == NULL) continue; VTCON_PORT_LOCK(port); vtcon_port_enable_intr(port); VTCON_PORT_UNLOCK(port); } VTCON_UNLOCK(sc); } static void vtcon_disable_interrupts(struct vtcon_softc *sc) { struct vtcon_softc_port *scport; struct vtcon_port *port; int i; VTCON_LOCK_ASSERT(sc); if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) virtqueue_disable_intr(sc->vtcon_ctrl_rxvq); for (i = 0; i < sc->vtcon_max_ports; i++) { scport = &sc->vtcon_ports[i]; port = scport->vcsp_port; if (port == NULL) continue; VTCON_PORT_LOCK(port); vtcon_port_disable_intr(port); VTCON_PORT_UNLOCK(port); } } Index: head/sys/dev/virtio/random/virtio_random.c =================================================================== --- head/sys/dev/virtio/random/virtio_random.c (revision 360722) +++ head/sys/dev/virtio/random/virtio_random.c (revision 360723) @@ -1,265 +1,268 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO entropy device. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct vtrnd_softc { uint64_t vtrnd_features; struct virtqueue *vtrnd_vq; }; static int vtrnd_modevent(module_t, int, void *); static int vtrnd_probe(device_t); static int vtrnd_attach(device_t); static int vtrnd_detach(device_t); static void vtrnd_negotiate_features(device_t); static int vtrnd_alloc_virtqueue(device_t); static int vtrnd_harvest(struct vtrnd_softc *, void *, size_t *); static unsigned vtrnd_read(void *, unsigned); #define VTRND_FEATURES 0 static struct virtio_feature_desc vtrnd_feature_desc[] = { { 0, NULL } }; static struct random_source random_vtrnd = { .rs_ident = "VirtIO Entropy Adapter", .rs_source = RANDOM_PURE_VIRTIO, .rs_read = vtrnd_read, }; /* Kludge for API limitations of random(4). */ static _Atomic(struct vtrnd_softc *) g_vtrnd_softc; static device_method_t vtrnd_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtrnd_probe), DEVMETHOD(device_attach, vtrnd_attach), DEVMETHOD(device_detach, vtrnd_detach), DEVMETHOD_END }; static driver_t vtrnd_driver = { "vtrnd", vtrnd_methods, sizeof(struct vtrnd_softc) }; static devclass_t vtrnd_devclass; +DRIVER_MODULE(virtio_random, virtio_mmio, vtrnd_driver, vtrnd_devclass, + vtrnd_modevent, 0); DRIVER_MODULE(virtio_random, virtio_pci, vtrnd_driver, vtrnd_devclass, vtrnd_modevent, 0); MODULE_VERSION(virtio_random, 1); MODULE_DEPEND(virtio_random, virtio, 1, 1, 1); MODULE_DEPEND(virtio_random, random_device, 1, 1, 1); VIRTIO_SIMPLE_PNPTABLE(virtio_random, VIRTIO_ID_ENTROPY, "VirtIO Entropy Adapter"); +VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_random); VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_random); static int vtrnd_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: case MOD_QUIESCE: case MOD_UNLOAD: case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static int vtrnd_probe(device_t dev) { return (VIRTIO_SIMPLE_PROBE(dev, virtio_random)); } static int vtrnd_attach(device_t dev) { struct vtrnd_softc *sc, *exp; int error; sc = device_get_softc(dev); virtio_set_feature_desc(dev, vtrnd_feature_desc); vtrnd_negotiate_features(dev); error = vtrnd_alloc_virtqueue(dev); if (error) { device_printf(dev, "cannot allocate virtqueue\n"); goto fail; } exp = NULL; if (!atomic_compare_exchange_strong_explicit(&g_vtrnd_softc, &exp, sc, memory_order_release, memory_order_acquire)) { error = EEXIST; goto fail; } random_source_register(&random_vtrnd); fail: if (error) vtrnd_detach(dev); return (error); } static int vtrnd_detach(device_t dev) { struct vtrnd_softc *sc; sc = device_get_softc(dev); KASSERT( atomic_load_explicit(&g_vtrnd_softc, memory_order_acquire) == sc, ("only one global instance at a time")); random_source_deregister(&random_vtrnd); atomic_store_explicit(&g_vtrnd_softc, NULL, memory_order_release); return (0); } static void vtrnd_negotiate_features(device_t dev) { struct vtrnd_softc *sc; sc = device_get_softc(dev); sc->vtrnd_features = virtio_negotiate_features(dev, VTRND_FEATURES); } static int vtrnd_alloc_virtqueue(device_t dev) { struct vtrnd_softc *sc; struct vq_alloc_info vq_info; sc = device_get_softc(dev); VQ_ALLOC_INFO_INIT(&vq_info, 0, NULL, sc, &sc->vtrnd_vq, "%s request", device_get_nameunit(dev)); return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info)); } static int vtrnd_harvest(struct vtrnd_softc *sc, void *buf, size_t *sz) { struct sglist_seg segs[1]; struct sglist sg; struct virtqueue *vq; uint32_t value[HARVESTSIZE] __aligned(sizeof(uint32_t) * HARVESTSIZE); uint32_t rdlen; int error; _Static_assert(sizeof(value) < PAGE_SIZE, "sglist assumption"); sglist_init(&sg, 1, segs); error = sglist_append(&sg, value, *sz); if (error != 0) panic("%s: sglist_append error=%d", __func__, error); vq = sc->vtrnd_vq; KASSERT(virtqueue_empty(vq), ("%s: non-empty queue", __func__)); error = virtqueue_enqueue(vq, buf, &sg, 0, 1); if (error != 0) return (error); /* * Poll for the response, but the command is likely already * done when we return from the notify. */ virtqueue_notify(vq); virtqueue_poll(vq, &rdlen); if (rdlen > *sz) panic("%s: random device wrote %zu bytes beyond end of provided" " buffer %p:%zu", __func__, (size_t)rdlen - *sz, (void *)value, *sz); else if (rdlen == 0) return (EAGAIN); *sz = MIN(rdlen, *sz); memcpy(buf, value, *sz); explicit_bzero(value, *sz); return (0); } static unsigned vtrnd_read(void *buf, unsigned usz) { struct vtrnd_softc *sc; size_t sz; int error; sc = g_vtrnd_softc; if (sc == NULL) return (0); sz = usz; error = vtrnd_harvest(sc, buf, &sz); if (error != 0) return (0); return (sz); } Index: head/sys/dev/virtio/scsi/virtio_scsi.c =================================================================== --- head/sys/dev/virtio/scsi/virtio_scsi.c (revision 360722) +++ head/sys/dev/virtio/scsi/virtio_scsi.c (revision 360723) @@ -1,2338 +1,2341 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO SCSI devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" static int vtscsi_modevent(module_t, int, void *); static int vtscsi_probe(device_t); static int vtscsi_attach(device_t); static int vtscsi_detach(device_t); static int vtscsi_suspend(device_t); static int vtscsi_resume(device_t); static void vtscsi_negotiate_features(struct vtscsi_softc *); static void vtscsi_read_config(struct vtscsi_softc *, struct virtio_scsi_config *); static int vtscsi_maximum_segments(struct vtscsi_softc *, int); static int vtscsi_alloc_virtqueues(struct vtscsi_softc *); static void vtscsi_check_sizes(struct vtscsi_softc *); static void vtscsi_write_device_config(struct vtscsi_softc *); static int vtscsi_reinit(struct vtscsi_softc *); static int vtscsi_alloc_cam(struct vtscsi_softc *); static int vtscsi_register_cam(struct vtscsi_softc *); static void vtscsi_free_cam(struct vtscsi_softc *); static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *); static int vtscsi_register_async(struct vtscsi_softc *); static void vtscsi_deregister_async(struct vtscsi_softc *); static void vtscsi_cam_action(struct cam_sim *, union ccb *); static void vtscsi_cam_poll(struct cam_sim *); static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *, union ccb *); static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_path_inquiry(struct vtscsi_softc *, struct cam_sim *, union ccb *); static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *, struct sglist *, struct ccb_scsiio *); static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *, struct vtscsi_request *, int *, int *); static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *); static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_timedout_scsi_cmd(void *); static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *); static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *, struct ccb_scsiio *, struct virtio_scsi_cmd_resp *); static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_poll_ctrl_req(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_execute_ctrl_req(struct vtscsi_softc *, struct vtscsi_request *, struct sglist *, int, int, int); static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c, struct vtscsi_request *); static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *); static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []); static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *, struct virtio_scsi_cmd_req *); static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *); static void vtscsi_freeze_simq(struct vtscsi_softc *, int); static int vtscsi_thaw_simq(struct vtscsi_softc *, int); static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t, lun_id_t); static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t, lun_id_t); static void vtscsi_execute_rescan_bus(struct vtscsi_softc *); static void vtscsi_handle_event(struct vtscsi_softc *, struct virtio_scsi_event *); static int vtscsi_enqueue_event_buf(struct vtscsi_softc *, struct virtio_scsi_event *); static int vtscsi_init_event_vq(struct vtscsi_softc *); static void vtscsi_reinit_event_vq(struct vtscsi_softc *); static void vtscsi_drain_event_vq(struct vtscsi_softc *); static void vtscsi_complete_vqs_locked(struct vtscsi_softc *); static void vtscsi_complete_vqs(struct vtscsi_softc *); static void vtscsi_drain_vqs(struct vtscsi_softc *); static void vtscsi_cancel_request(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *); static void vtscsi_stop(struct vtscsi_softc *); static int vtscsi_reset_bus(struct vtscsi_softc *); static void vtscsi_init_request(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_alloc_requests(struct vtscsi_softc *); static void vtscsi_free_requests(struct vtscsi_softc *); static void vtscsi_enqueue_request(struct vtscsi_softc *, struct vtscsi_request *); static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *); static void vtscsi_complete_request(struct vtscsi_request *); static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *); static void vtscsi_control_vq_intr(void *); static void vtscsi_event_vq_intr(void *); static void vtscsi_request_vq_intr(void *); static void vtscsi_disable_vqs_intr(struct vtscsi_softc *); static void vtscsi_enable_vqs_intr(struct vtscsi_softc *); static void vtscsi_get_tunables(struct vtscsi_softc *); static void vtscsi_add_sysctl(struct vtscsi_softc *); static void vtscsi_printf_req(struct vtscsi_request *, const char *, const char *, ...); /* Global tunables. */ /* * The current QEMU VirtIO SCSI implementation does not cancel in-flight * IO during virtio_stop(). So in-flight requests still complete after the * device reset. We would have to wait for all the in-flight IO to complete, * which defeats the typical purpose of a bus reset. We could simulate the * bus reset with either I_T_NEXUS_RESET of all the targets, or with * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the * control virtqueue). But this isn't very useful if things really go off * the rails, so default to disabled for now. */ static int vtscsi_bus_reset_disable = 1; TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable); static struct virtio_feature_desc vtscsi_feature_desc[] = { { VIRTIO_SCSI_F_INOUT, "InOut" }, { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" }, { 0, NULL } }; static device_method_t vtscsi_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtscsi_probe), DEVMETHOD(device_attach, vtscsi_attach), DEVMETHOD(device_detach, vtscsi_detach), DEVMETHOD(device_suspend, vtscsi_suspend), DEVMETHOD(device_resume, vtscsi_resume), DEVMETHOD_END }; static driver_t vtscsi_driver = { "vtscsi", vtscsi_methods, sizeof(struct vtscsi_softc) }; static devclass_t vtscsi_devclass; +DRIVER_MODULE(virtio_scsi, virtio_mmio, vtscsi_driver, vtscsi_devclass, + vtscsi_modevent, 0); DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass, vtscsi_modevent, 0); MODULE_VERSION(virtio_scsi, 1); MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1); MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1); VIRTIO_SIMPLE_PNPTABLE(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter"); +VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_scsi); VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_scsi); static int vtscsi_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: case MOD_QUIESCE: case MOD_UNLOAD: case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static int vtscsi_probe(device_t dev) { return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi)); } static int vtscsi_attach(device_t dev) { struct vtscsi_softc *sc; struct virtio_scsi_config scsicfg; int error; sc = device_get_softc(dev); sc->vtscsi_dev = dev; VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev)); TAILQ_INIT(&sc->vtscsi_req_free); vtscsi_get_tunables(sc); vtscsi_add_sysctl(sc); virtio_set_feature_desc(dev, vtscsi_feature_desc); vtscsi_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT; if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT)) sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL; if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG)) sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG; vtscsi_read_config(sc, &scsicfg); sc->vtscsi_max_channel = scsicfg.max_channel; sc->vtscsi_max_target = scsicfg.max_target; sc->vtscsi_max_lun = scsicfg.max_lun; sc->vtscsi_event_buf_size = scsicfg.event_info_size; vtscsi_write_device_config(sc); sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max); sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT); if (sc->vtscsi_sglist == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate sglist\n"); goto fail; } error = vtscsi_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } vtscsi_check_sizes(sc); error = vtscsi_init_event_vq(sc); if (error) { device_printf(dev, "cannot populate the eventvq\n"); goto fail; } error = vtscsi_alloc_requests(sc); if (error) { device_printf(dev, "cannot allocate requests\n"); goto fail; } error = vtscsi_alloc_cam(sc); if (error) { device_printf(dev, "cannot allocate CAM structures\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_CAM); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); goto fail; } vtscsi_enable_vqs_intr(sc); /* * Register with CAM after interrupts are enabled so we will get * notified of the probe responses. */ error = vtscsi_register_cam(sc); if (error) { device_printf(dev, "cannot register with CAM\n"); goto fail; } fail: if (error) vtscsi_detach(dev); return (error); } static int vtscsi_detach(device_t dev) { struct vtscsi_softc *sc; sc = device_get_softc(dev); VTSCSI_LOCK(sc); sc->vtscsi_flags |= VTSCSI_FLAG_DETACH; if (device_is_attached(dev)) vtscsi_stop(sc); VTSCSI_UNLOCK(sc); vtscsi_complete_vqs(sc); vtscsi_drain_vqs(sc); vtscsi_free_cam(sc); vtscsi_free_requests(sc); if (sc->vtscsi_sglist != NULL) { sglist_free(sc->vtscsi_sglist); sc->vtscsi_sglist = NULL; } VTSCSI_LOCK_DESTROY(sc); return (0); } static int vtscsi_suspend(device_t dev) { return (0); } static int vtscsi_resume(device_t dev) { return (0); } static void vtscsi_negotiate_features(struct vtscsi_softc *sc) { device_t dev; uint64_t features; dev = sc->vtscsi_dev; features = virtio_negotiate_features(dev, VTSCSI_FEATURES); sc->vtscsi_features = features; } #define VTSCSI_GET_CONFIG(_dev, _field, _cfg) \ virtio_read_device_config(_dev, \ offsetof(struct virtio_scsi_config, _field), \ &(_cfg)->_field, sizeof((_cfg)->_field)) \ static void vtscsi_read_config(struct vtscsi_softc *sc, struct virtio_scsi_config *scsicfg) { device_t dev; dev = sc->vtscsi_dev; bzero(scsicfg, sizeof(struct virtio_scsi_config)); VTSCSI_GET_CONFIG(dev, num_queues, scsicfg); VTSCSI_GET_CONFIG(dev, seg_max, scsicfg); VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg); VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg); VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg); VTSCSI_GET_CONFIG(dev, sense_size, scsicfg); VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg); VTSCSI_GET_CONFIG(dev, max_channel, scsicfg); VTSCSI_GET_CONFIG(dev, max_target, scsicfg); VTSCSI_GET_CONFIG(dev, max_lun, scsicfg); } #undef VTSCSI_GET_CONFIG static int vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max) { int nsegs; nsegs = VTSCSI_MIN_SEGMENTS; if (seg_max > 0) { nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1); if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); } else nsegs += 1; return (nsegs); } static int vtscsi_alloc_virtqueues(struct vtscsi_softc *sc) { device_t dev; struct vq_alloc_info vq_info[3]; int nvqs; dev = sc->vtscsi_dev; nvqs = 3; VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc, &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc, &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs, vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq, "%s request", device_get_nameunit(dev)); return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); } static void vtscsi_check_sizes(struct vtscsi_softc *sc) { int rqsize; if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) { /* * Ensure the assertions in virtqueue_enqueue(), * even if the hypervisor reports a bad seg_max. */ rqsize = virtqueue_size(sc->vtscsi_request_vq); if (sc->vtscsi_max_nsegs > rqsize) { device_printf(sc->vtscsi_dev, "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs, rqsize); sc->vtscsi_max_nsegs = rqsize; } } } static void vtscsi_write_device_config(struct vtscsi_softc *sc) { virtio_write_dev_config_4(sc->vtscsi_dev, offsetof(struct virtio_scsi_config, sense_size), VIRTIO_SCSI_SENSE_SIZE); /* * This is the size in the virtio_scsi_cmd_req structure. Note * this value (32) is larger than the maximum CAM CDB size (16). */ virtio_write_dev_config_4(sc->vtscsi_dev, offsetof(struct virtio_scsi_config, cdb_size), VIRTIO_SCSI_CDB_SIZE); } static int vtscsi_reinit(struct vtscsi_softc *sc) { device_t dev; int error; dev = sc->vtscsi_dev; error = virtio_reinit(dev, sc->vtscsi_features); if (error == 0) { vtscsi_write_device_config(sc); vtscsi_reinit_event_vq(sc); virtio_reinit_complete(dev); vtscsi_enable_vqs_intr(sc); } vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error); return (error); } static int vtscsi_alloc_cam(struct vtscsi_softc *sc) { device_t dev; struct cam_devq *devq; int openings; dev = sc->vtscsi_dev; openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS; devq = cam_simq_alloc(openings); if (devq == NULL) { device_printf(dev, "cannot allocate SIM queue\n"); return (ENOMEM); } sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll, "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1, openings, devq); if (sc->vtscsi_sim == NULL) { cam_simq_free(devq); device_printf(dev, "cannot allocate SIM\n"); return (ENOMEM); } return (0); } static int vtscsi_register_cam(struct vtscsi_softc *sc) { device_t dev; int registered, error; dev = sc->vtscsi_dev; registered = 0; VTSCSI_LOCK(sc); if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) { error = ENOMEM; device_printf(dev, "cannot register XPT bus\n"); goto fail; } registered = 1; if (xpt_create_path(&sc->vtscsi_path, NULL, cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { error = ENOMEM; device_printf(dev, "cannot create bus path\n"); goto fail; } if (vtscsi_register_async(sc) != CAM_REQ_CMP) { error = EIO; device_printf(dev, "cannot register async callback\n"); goto fail; } VTSCSI_UNLOCK(sc); return (0); fail: if (sc->vtscsi_path != NULL) { xpt_free_path(sc->vtscsi_path); sc->vtscsi_path = NULL; } if (registered != 0) xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); VTSCSI_UNLOCK(sc); return (error); } static void vtscsi_free_cam(struct vtscsi_softc *sc) { VTSCSI_LOCK(sc); if (sc->vtscsi_path != NULL) { vtscsi_deregister_async(sc); xpt_free_path(sc->vtscsi_path); sc->vtscsi_path = NULL; xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); } if (sc->vtscsi_sim != NULL) { cam_sim_free(sc->vtscsi_sim, 1); sc->vtscsi_sim = NULL; } VTSCSI_UNLOCK(sc); } static void vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg) { struct cam_sim *sim; struct vtscsi_softc *sc; sim = cb_arg; sc = cam_sim_softc(sim); vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code); /* * TODO Once QEMU supports event reporting, we should * (un)subscribe to events here. */ switch (code) { case AC_FOUND_DEVICE: break; case AC_LOST_DEVICE: break; } } static int vtscsi_register_async(struct vtscsi_softc *sc) { struct ccb_setasync csa; xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE; csa.callback = vtscsi_cam_async; csa.callback_arg = sc->vtscsi_sim; xpt_action((union ccb *) &csa); return (csa.ccb_h.status); } static void vtscsi_deregister_async(struct vtscsi_softc *sc) { struct ccb_setasync csa; xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = vtscsi_cam_async; csa.callback_arg = sc->vtscsi_sim; xpt_action((union ccb *) &csa); } static void vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb) { struct vtscsi_softc *sc; struct ccb_hdr *ccbh; sc = cam_sim_softc(sim); ccbh = &ccb->ccb_h; VTSCSI_LOCK_OWNED(sc); if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) { /* * The VTSCSI_MTX is briefly dropped between setting * VTSCSI_FLAG_DETACH and deregistering with CAM, so * drop any CCBs that come in during that window. */ ccbh->status = CAM_NO_HBA; xpt_done(ccb); return; } switch (ccbh->func_code) { case XPT_SCSI_IO: vtscsi_cam_scsi_io(sc, sim, ccb); break; case XPT_SET_TRAN_SETTINGS: ccbh->status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: vtscsi_cam_get_tran_settings(sc, ccb); break; case XPT_RESET_BUS: vtscsi_cam_reset_bus(sc, ccb); break; case XPT_RESET_DEV: vtscsi_cam_reset_dev(sc, ccb); break; case XPT_ABORT: vtscsi_cam_abort(sc, ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: vtscsi_cam_path_inquiry(sc, sim, ccb); break; default: vtscsi_dprintf(sc, VTSCSI_ERROR, "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code); ccbh->status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void vtscsi_cam_poll(struct cam_sim *sim) { struct vtscsi_softc *sc; sc = cam_sim_softc(sim); vtscsi_complete_vqs_locked(sc); } static void vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim, union ccb *ccb) { struct ccb_hdr *ccbh; struct ccb_scsiio *csio; int error; ccbh = &ccb->ccb_h; csio = &ccb->csio; if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) { error = EINVAL; ccbh->status = CAM_REQ_INVALID; goto done; } if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH && (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) { error = EINVAL; ccbh->status = CAM_REQ_INVALID; goto done; } error = vtscsi_start_scsi_cmd(sc, ccb); done: if (error) { vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status); xpt_done(ccb); } } static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb) { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC3; cts->transport = XPORT_SAS; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } static void vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb) { int error; error = vtscsi_reset_bus(sc); if (error == 0) ccb->ccb_h.status = CAM_REQ_CMP; else ccb->ccb_h.status = CAM_REQ_CMP_ERR; vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n", error, ccb, ccb->ccb_h.status); xpt_done(ccb); } static void vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb) { struct ccb_hdr *ccbh; struct vtscsi_request *req; int error; ccbh = &ccb->ccb_h; req = vtscsi_dequeue_request(sc); if (req == NULL) { error = EAGAIN; vtscsi_freeze_simq(sc, VTSCSI_REQUEST); goto fail; } req->vsr_ccb = ccb; error = vtscsi_execute_reset_dev_cmd(sc, req); if (error == 0) return; vtscsi_enqueue_request(sc, req); fail: vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", error, req, ccb); if (error == EAGAIN) ccbh->status = CAM_RESRC_UNAVAIL; else ccbh->status = CAM_REQ_CMP_ERR; xpt_done(ccb); } static void vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb) { struct vtscsi_request *req; struct ccb_hdr *ccbh; int error; ccbh = &ccb->ccb_h; req = vtscsi_dequeue_request(sc); if (req == NULL) { error = EAGAIN; vtscsi_freeze_simq(sc, VTSCSI_REQUEST); goto fail; } req->vsr_ccb = ccb; error = vtscsi_execute_abort_task_cmd(sc, req); if (error == 0) return; vtscsi_enqueue_request(sc, req); fail: vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", error, req, ccb); if (error == EAGAIN) ccbh->status = CAM_RESRC_UNAVAIL; else ccbh->status = CAM_REQ_CMP_ERR; xpt_done(ccb); } static void vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim, union ccb *ccb) { device_t dev; struct ccb_pathinq *cpi; dev = sc->vtscsi_dev; cpi = &ccb->cpi; vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb); cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; if (vtscsi_bus_reset_disable != 0) cpi->hba_misc |= PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = sc->vtscsi_max_target; cpi->max_lun = sc->vtscsi_max_lun; cpi->initiator_id = cpi->max_target + 1; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 300000; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC3; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) * PAGE_SIZE; cpi->hba_vendor = virtio_get_vendor(dev); cpi->hba_device = virtio_get_device(dev); cpi->hba_subvendor = virtio_get_subvendor(dev); cpi->hba_subdevice = virtio_get_subdevice(dev); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg, struct ccb_scsiio *csio) { struct ccb_hdr *ccbh; struct bus_dma_segment *dseg; int i, error; ccbh = &csio->ccb_h; error = 0; switch ((ccbh->flags & CAM_DATA_MASK)) { case CAM_DATA_VADDR: error = sglist_append(sg, csio->data_ptr, csio->dxfer_len); break; case CAM_DATA_PADDR: error = sglist_append_phys(sg, (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len); break; case CAM_DATA_SG: for (i = 0; i < csio->sglist_cnt && error == 0; i++) { dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; error = sglist_append(sg, (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len); } break; case CAM_DATA_SG_PADDR: for (i = 0; i < csio->sglist_cnt && error == 0; i++) { dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; error = sglist_append_phys(sg, (vm_paddr_t) dseg->ds_addr, dseg->ds_len); } break; case CAM_DATA_BIO: error = sglist_append_bio(sg, (struct bio *) csio->data_ptr); break; default: error = EINVAL; break; } return (error); } static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req, int *readable, int *writable) { struct sglist *sg; struct ccb_hdr *ccbh; struct ccb_scsiio *csio; struct virtio_scsi_cmd_req *cmd_req; struct virtio_scsi_cmd_resp *cmd_resp; int error; sg = sc->vtscsi_sglist; csio = &req->vsr_ccb->csio; ccbh = &csio->ccb_h; cmd_req = &req->vsr_cmd_req; cmd_resp = &req->vsr_cmd_resp; sglist_reset(sg); sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req)); if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) { error = vtscsi_sg_append_scsi_buf(sc, sg, csio); /* At least one segment must be left for the response. */ if (error || sg->sg_nseg == sg->sg_maxseg) goto fail; } *readable = sg->sg_nseg; sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp)); if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) { error = vtscsi_sg_append_scsi_buf(sc, sg, csio); if (error) goto fail; } *writable = sg->sg_nseg - *readable; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d " "writable=%d\n", req, ccbh, *readable, *writable); return (0); fail: /* * This should never happen unless maxio was incorrectly set. */ vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0); vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p " "nseg=%d maxseg=%d\n", error, req, ccbh, sg->sg_nseg, sg->sg_maxseg); return (EFBIG); } static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct sglist *sg; struct virtqueue *vq; struct ccb_scsiio *csio; struct ccb_hdr *ccbh; struct virtio_scsi_cmd_req *cmd_req; struct virtio_scsi_cmd_resp *cmd_resp; int readable, writable, error; sg = sc->vtscsi_sglist; vq = sc->vtscsi_request_vq; csio = &req->vsr_ccb->csio; ccbh = &csio->ccb_h; cmd_req = &req->vsr_cmd_req; cmd_resp = &req->vsr_cmd_resp; vtscsi_init_scsi_cmd_req(csio, cmd_req); error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable); if (error) return (error); req->vsr_complete = vtscsi_complete_scsi_cmd; cmd_resp->response = -1; error = virtqueue_enqueue(vq, req, sg, readable, writable); if (error) { vtscsi_dprintf(sc, VTSCSI_ERROR, "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh); ccbh->status = CAM_REQUEUE_REQ; vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ); return (error); } ccbh->status |= CAM_SIM_QUEUED; ccbh->ccbh_vtscsi_req = req; virtqueue_notify(vq); if (ccbh->timeout != CAM_TIME_INFINITY) { req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET; callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout, 0, vtscsi_timedout_scsi_cmd, req, 0); } vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n", req, ccbh); return (0); } static int vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb) { struct vtscsi_request *req; int error; req = vtscsi_dequeue_request(sc); if (req == NULL) { ccb->ccb_h.status = CAM_REQUEUE_REQ; vtscsi_freeze_simq(sc, VTSCSI_REQUEST); return (ENOBUFS); } req->vsr_ccb = ccb; error = vtscsi_execute_scsi_cmd(sc, req); if (error) vtscsi_enqueue_request(sc, req); return (error); } static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct virtio_scsi_ctrl_tmf_resp *tmf_resp; struct vtscsi_request *to_req; uint8_t response; tmf_resp = &req->vsr_tmf_resp; response = tmf_resp->response; to_req = req->vsr_timedout_req; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n", req, to_req, response); vtscsi_enqueue_request(sc, req); /* * The timedout request could have completed between when the * abort task was sent and when the host processed it. */ if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT) return; /* The timedout request was successfully aborted. */ if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) return; /* Don't bother if the device is going away. */ if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) return; /* The timedout request will be aborted by the reset. */ if (sc->vtscsi_flags & VTSCSI_FLAG_RESET) return; vtscsi_reset_bus(sc); } static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *to_req) { struct sglist *sg; struct ccb_hdr *to_ccbh; struct vtscsi_request *req; struct virtio_scsi_ctrl_tmf_req *tmf_req; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; int error; sg = sc->vtscsi_sglist; to_ccbh = &to_req->vsr_ccb->ccb_h; req = vtscsi_dequeue_request(sc); if (req == NULL) { error = ENOBUFS; goto fail; } tmf_req = &req->vsr_tmf_req; tmf_resp = &req->vsr_tmf_resp; vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, (uintptr_t) to_ccbh, tmf_req); sglist_reset(sg); sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); req->vsr_timedout_req = to_req; req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd; tmf_resp->response = -1; error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, VTSCSI_EXECUTE_ASYNC); if (error == 0) return (0); vtscsi_enqueue_request(sc, req); fail: vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p " "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh); return (error); } static void vtscsi_timedout_scsi_cmd(void *xreq) { struct vtscsi_softc *sc; struct vtscsi_request *to_req; to_req = xreq; sc = to_req->vsr_softc; vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n", to_req, to_req->vsr_ccb, to_req->vsr_state); /* Don't bother if the device is going away. */ if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) return; /* * Bail if the request is not in use. We likely raced when * stopping the callout handler or it has already been aborted. */ if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE || (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0) return; /* * Complete the request queue in case the timedout request is * actually just pending. */ vtscsi_complete_vq(sc, sc->vtscsi_request_vq); if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE) return; sc->vtscsi_stats.scsi_cmd_timeouts++; to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT; if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0) return; vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n"); vtscsi_reset_bus(sc); } static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp) { cam_status status; switch (cmd_resp->response) { case VIRTIO_SCSI_S_OK: status = CAM_REQ_CMP; break; case VIRTIO_SCSI_S_OVERRUN: status = CAM_DATA_RUN_ERR; break; case VIRTIO_SCSI_S_ABORTED: status = CAM_REQ_ABORTED; break; case VIRTIO_SCSI_S_BAD_TARGET: status = CAM_SEL_TIMEOUT; break; case VIRTIO_SCSI_S_RESET: status = CAM_SCSI_BUS_RESET; break; case VIRTIO_SCSI_S_BUSY: status = CAM_SCSI_BUSY; break; case VIRTIO_SCSI_S_TRANSPORT_FAILURE: case VIRTIO_SCSI_S_TARGET_FAILURE: case VIRTIO_SCSI_S_NEXUS_FAILURE: status = CAM_SCSI_IT_NEXUS_LOST; break; default: /* VIRTIO_SCSI_S_FAILURE */ status = CAM_REQ_CMP_ERR; break; } return (status); } static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc, struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp) { cam_status status; csio->scsi_status = cmd_resp->status; csio->resid = cmd_resp->resid; if (csio->scsi_status == SCSI_STATUS_OK) status = CAM_REQ_CMP; else status = CAM_SCSI_STATUS_ERROR; if (cmd_resp->sense_len > 0) { status |= CAM_AUTOSNS_VALID; if (cmd_resp->sense_len < csio->sense_len) csio->sense_resid = csio->sense_len - cmd_resp->sense_len; else csio->sense_resid = 0; memcpy(&csio->sense_data, cmd_resp->sense, csio->sense_len - csio->sense_resid); } vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR, "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n", csio, csio->scsi_status, csio->resid, csio->sense_resid); return (status); } static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct ccb_hdr *ccbh; struct ccb_scsiio *csio; struct virtio_scsi_cmd_resp *cmd_resp; cam_status status; csio = &req->vsr_ccb->csio; ccbh = &csio->ccb_h; cmd_resp = &req->vsr_cmd_resp; KASSERT(ccbh->ccbh_vtscsi_req == req, ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req)); if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) callout_stop(&req->vsr_callout); status = vtscsi_scsi_cmd_cam_status(cmd_resp); if (status == CAM_REQ_ABORTED) { if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT) status = CAM_CMD_TIMEOUT; } else if (status == CAM_REQ_CMP) status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccbh->path, 1); } if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) status |= CAM_RELEASE_SIMQ; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n", req, ccbh, status); ccbh->status = status; xpt_done(req->vsr_ccb); vtscsi_enqueue_request(sc, req); } static void vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req) { /* XXX We probably shouldn't poll forever. */ req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED; do vtscsi_complete_vq(sc, sc->vtscsi_control_vq); while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0); req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED; } static int vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req, struct sglist *sg, int readable, int writable, int flag) { struct virtqueue *vq; int error; vq = sc->vtscsi_control_vq; MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL); error = virtqueue_enqueue(vq, req, sg, readable, writable); if (error) { /* * Return EAGAIN when the virtqueue does not have enough * descriptors available. */ if (error == ENOSPC || error == EMSGSIZE) error = EAGAIN; return (error); } virtqueue_notify(vq); if (flag == VTSCSI_EXECUTE_POLL) vtscsi_poll_ctrl_req(sc, req); return (0); } static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { union ccb *ccb; struct ccb_hdr *ccbh; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; ccb = req->vsr_ccb; ccbh = &ccb->ccb_h; tmf_resp = &req->vsr_tmf_resp; switch (tmf_resp->response) { case VIRTIO_SCSI_S_FUNCTION_COMPLETE: ccbh->status = CAM_REQ_CMP; break; case VIRTIO_SCSI_S_FUNCTION_REJECTED: ccbh->status = CAM_UA_ABORT; break; default: ccbh->status = CAM_REQ_CMP_ERR; break; } xpt_done(ccb); vtscsi_enqueue_request(sc, req); } static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct sglist *sg; struct ccb_abort *cab; struct ccb_hdr *ccbh; struct ccb_hdr *abort_ccbh; struct vtscsi_request *abort_req; struct virtio_scsi_ctrl_tmf_req *tmf_req; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; int error; sg = sc->vtscsi_sglist; cab = &req->vsr_ccb->cab; ccbh = &cab->ccb_h; tmf_req = &req->vsr_tmf_req; tmf_resp = &req->vsr_tmf_resp; /* CCB header and request that's to be aborted. */ abort_ccbh = &cab->abort_ccb->ccb_h; abort_req = abort_ccbh->ccbh_vtscsi_req; if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) { error = EINVAL; goto fail; } /* Only attempt to abort requests that could be in-flight. */ if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) { error = EALREADY; goto fail; } abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED; if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) callout_stop(&abort_req->vsr_callout); vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, (uintptr_t) abort_ccbh, tmf_req); sglist_reset(sg); sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); req->vsr_complete = vtscsi_complete_abort_task_cmd; tmf_resp->response = -1; error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, VTSCSI_EXECUTE_ASYNC); fail: vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p " "abort_req=%p\n", error, req, abort_ccbh, abort_req); return (error); } static void vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { union ccb *ccb; struct ccb_hdr *ccbh; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; ccb = req->vsr_ccb; ccbh = &ccb->ccb_h; tmf_resp = &req->vsr_tmf_resp; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n", req, ccb, tmf_resp->response); if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) { ccbh->status = CAM_REQ_CMP; vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id, ccbh->target_lun); } else ccbh->status = CAM_REQ_CMP_ERR; xpt_done(ccb); vtscsi_enqueue_request(sc, req); } static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct sglist *sg; struct ccb_resetdev *crd; struct ccb_hdr *ccbh; struct virtio_scsi_ctrl_tmf_req *tmf_req; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; uint32_t subtype; int error; sg = sc->vtscsi_sglist; crd = &req->vsr_ccb->crd; ccbh = &crd->ccb_h; tmf_req = &req->vsr_tmf_req; tmf_resp = &req->vsr_tmf_resp; if (ccbh->target_lun == CAM_LUN_WILDCARD) subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET; else subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req); sglist_reset(sg); sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); req->vsr_complete = vtscsi_complete_reset_dev_cmd; tmf_resp->response = -1; error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, VTSCSI_EXECUTE_ASYNC); vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n", error, req, ccbh); return (error); } static void vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id) { *target_id = lun[1]; *lun_id = (lun[2] << 8) | lun[3]; } static void vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[]) { lun[0] = 1; lun[1] = ccbh->target_id; lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F); lun[3] = ccbh->target_lun & 0xFF; } static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio, struct virtio_scsi_cmd_req *cmd_req) { uint8_t attr; switch (csio->tag_action) { case MSG_HEAD_OF_Q_TAG: attr = VIRTIO_SCSI_S_HEAD; break; case MSG_ORDERED_Q_TAG: attr = VIRTIO_SCSI_S_ORDERED; break; case MSG_ACA_TASK: attr = VIRTIO_SCSI_S_ACA; break; default: /* MSG_SIMPLE_Q_TAG */ attr = VIRTIO_SCSI_S_SIMPLE; break; } vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun); cmd_req->tag = (uintptr_t) csio; cmd_req->task_attr = attr; memcpy(cmd_req->cdb, csio->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes, csio->cdb_len); } static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req) { vtscsi_set_request_lun(ccbh, tmf_req->lun); tmf_req->type = VIRTIO_SCSI_T_TMF; tmf_req->subtype = subtype; tmf_req->tag = tag; } static void vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason) { int frozen; frozen = sc->vtscsi_frozen; if (reason & VTSCSI_REQUEST && (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0) sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS; if (reason & VTSCSI_REQUEST_VQ && (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0) sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL; /* Freeze the SIMQ if transitioned to frozen. */ if (frozen == 0 && sc->vtscsi_frozen != 0) { vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n"); xpt_freeze_simq(sc->vtscsi_sim, 1); } } static int vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason) { int thawed; if (sc->vtscsi_frozen == 0 || reason == 0) return (0); if (reason & VTSCSI_REQUEST && sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS; if (reason & VTSCSI_REQUEST_VQ && sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL; thawed = sc->vtscsi_frozen == 0; if (thawed != 0) vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n"); return (thawed); } static void vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; /* Use the wildcard path from our softc for bus announcements. */ if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) { xpt_async(ac_code, sc->vtscsi_path, NULL); return; } if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim), target_id, lun_id) != CAM_REQ_CMP) { vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n"); return; } xpt_async(ac_code, path, NULL); xpt_free_path(path); } static void vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id, lun_id_t lun_id) { union ccb *ccb; cam_status status; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n"); return; } status = xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->vtscsi_sim), target_id, lun_id); if (status != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void vtscsi_execute_rescan_bus(struct vtscsi_softc *sc) { vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); } static void vtscsi_transport_reset_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) { target_id_t target_id; lun_id_t lun_id; vtscsi_get_request_lun(event->lun, &target_id, &lun_id); switch (event->reason) { case VIRTIO_SCSI_EVT_RESET_RESCAN: case VIRTIO_SCSI_EVT_RESET_REMOVED: vtscsi_execute_rescan(sc, target_id, lun_id); break; default: device_printf(sc->vtscsi_dev, "unhandled transport event reason: %d\n", event->reason); break; } } static void vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) { int error; if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) { switch (event->event) { case VIRTIO_SCSI_T_TRANSPORT_RESET: vtscsi_transport_reset_event(sc, event); break; default: device_printf(sc->vtscsi_dev, "unhandled event: %d\n", event->event); break; } } else vtscsi_execute_rescan_bus(sc); /* * This should always be successful since the buffer * was just dequeued. */ error = vtscsi_enqueue_event_buf(sc, event); KASSERT(error == 0, ("cannot requeue event buffer: %d", error)); } static int vtscsi_enqueue_event_buf(struct vtscsi_softc *sc, struct virtio_scsi_event *event) { struct sglist *sg; struct virtqueue *vq; int size, error; sg = sc->vtscsi_sglist; vq = sc->vtscsi_event_vq; size = sc->vtscsi_event_buf_size; bzero(event, size); sglist_reset(sg); error = sglist_append(sg, event, size); if (error) return (error); error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg); if (error) return (error); virtqueue_notify(vq); return (0); } static int vtscsi_init_event_vq(struct vtscsi_softc *sc) { struct virtio_scsi_event *event; int i, size, error; /* * The first release of QEMU with VirtIO SCSI support would crash * when attempting to notify the event virtqueue. This was fixed * when hotplug support was added. */ if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) size = sc->vtscsi_event_buf_size; else size = 0; if (size < sizeof(struct virtio_scsi_event)) return (0); for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { event = &sc->vtscsi_event_bufs[i]; error = vtscsi_enqueue_event_buf(sc, event); if (error) break; } /* * Even just one buffer is enough. Missed events are * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag. */ if (i > 0) error = 0; return (error); } static void vtscsi_reinit_event_vq(struct vtscsi_softc *sc) { struct virtio_scsi_event *event; int i, error; if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 || sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event)) return; for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { event = &sc->vtscsi_event_bufs[i]; error = vtscsi_enqueue_event_buf(sc, event); if (error) break; } KASSERT(i > 0, ("cannot reinit event vq: %d", error)); } static void vtscsi_drain_event_vq(struct vtscsi_softc *sc) { struct virtqueue *vq; int last; vq = sc->vtscsi_event_vq; last = 0; while (virtqueue_drain(vq, &last) != NULL) ; KASSERT(virtqueue_empty(vq), ("eventvq not empty")); } static void vtscsi_complete_vqs_locked(struct vtscsi_softc *sc) { VTSCSI_LOCK_OWNED(sc); if (sc->vtscsi_request_vq != NULL) vtscsi_complete_vq(sc, sc->vtscsi_request_vq); if (sc->vtscsi_control_vq != NULL) vtscsi_complete_vq(sc, sc->vtscsi_control_vq); } static void vtscsi_complete_vqs(struct vtscsi_softc *sc) { VTSCSI_LOCK(sc); vtscsi_complete_vqs_locked(sc); VTSCSI_UNLOCK(sc); } static void vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req) { union ccb *ccb; int detach; ccb = req->vsr_ccb; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb); /* * The callout must be drained when detaching since the request is * about to be freed. The VTSCSI_MTX must not be held for this in * case the callout is pending because there is a deadlock potential. * Otherwise, the virtqueue is being drained because of a bus reset * so we only need to attempt to stop the callouts. */ detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0; if (detach != 0) VTSCSI_LOCK_NOTOWNED(sc); else VTSCSI_LOCK_OWNED(sc); if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) { if (detach != 0) callout_drain(&req->vsr_callout); else callout_stop(&req->vsr_callout); } if (ccb != NULL) { if (detach != 0) { VTSCSI_LOCK(sc); ccb->ccb_h.status = CAM_NO_HBA; } else ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); if (detach != 0) VTSCSI_UNLOCK(sc); } vtscsi_enqueue_request(sc, req); } static void vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq) { struct vtscsi_request *req; int last; last = 0; vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq); while ((req = virtqueue_drain(vq, &last)) != NULL) vtscsi_cancel_request(sc, req); KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); } static void vtscsi_drain_vqs(struct vtscsi_softc *sc) { if (sc->vtscsi_control_vq != NULL) vtscsi_drain_vq(sc, sc->vtscsi_control_vq); if (sc->vtscsi_request_vq != NULL) vtscsi_drain_vq(sc, sc->vtscsi_request_vq); if (sc->vtscsi_event_vq != NULL) vtscsi_drain_event_vq(sc); } static void vtscsi_stop(struct vtscsi_softc *sc) { vtscsi_disable_vqs_intr(sc); virtio_stop(sc->vtscsi_dev); } static int vtscsi_reset_bus(struct vtscsi_softc *sc) { int error; VTSCSI_LOCK_OWNED(sc); if (vtscsi_bus_reset_disable != 0) { device_printf(sc->vtscsi_dev, "bus reset disabled\n"); return (0); } sc->vtscsi_flags |= VTSCSI_FLAG_RESET; /* * vtscsi_stop() will cause the in-flight requests to be canceled. * Those requests are then completed here so CAM will retry them * after the reset is complete. */ vtscsi_stop(sc); vtscsi_complete_vqs_locked(sc); /* Rid the virtqueues of any remaining requests. */ vtscsi_drain_vqs(sc); /* * Any resource shortage that froze the SIMQ cannot persist across * a bus reset so ensure it gets thawed here. */ if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) xpt_release_simq(sc->vtscsi_sim, 0); error = vtscsi_reinit(sc); if (error) { device_printf(sc->vtscsi_dev, "reinitialization failed, stopping device...\n"); vtscsi_stop(sc); } else vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET; return (error); } static void vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req) { #ifdef INVARIANTS int req_nsegs, resp_nsegs; req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq)); resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp)); KASSERT(req_nsegs == 1, ("request crossed page boundary")); KASSERT(resp_nsegs == 1, ("response crossed page boundary")); #endif req->vsr_softc = sc; callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0); } static int vtscsi_alloc_requests(struct vtscsi_softc *sc) { struct vtscsi_request *req; int i, nreqs; /* * Commands destined for either the request or control queues come * from the same SIM queue. Use the size of the request virtqueue * as it (should) be much more frequently used. Some additional * requests are allocated for internal (TMF) use. */ nreqs = virtqueue_size(sc->vtscsi_request_vq); if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) nreqs /= VTSCSI_MIN_SEGMENTS; nreqs += VTSCSI_RESERVED_REQUESTS; for (i = 0; i < nreqs; i++) { req = malloc(sizeof(struct vtscsi_request), M_DEVBUF, M_NOWAIT); if (req == NULL) return (ENOMEM); vtscsi_init_request(sc, req); sc->vtscsi_nrequests++; vtscsi_enqueue_request(sc, req); } return (0); } static void vtscsi_free_requests(struct vtscsi_softc *sc) { struct vtscsi_request *req; while ((req = vtscsi_dequeue_request(sc)) != NULL) { KASSERT(callout_active(&req->vsr_callout) == 0, ("request callout still active")); sc->vtscsi_nrequests--; free(req, M_DEVBUF); } KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d", sc->vtscsi_nrequests)); } static void vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req) { KASSERT(req->vsr_softc == sc, ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc)); vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); /* A request is available so the SIMQ could be released. */ if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0) xpt_release_simq(sc->vtscsi_sim, 1); req->vsr_ccb = NULL; req->vsr_complete = NULL; req->vsr_ptr0 = NULL; req->vsr_state = VTSCSI_REQ_STATE_FREE; req->vsr_flags = 0; bzero(&req->vsr_ureq, sizeof(req->vsr_ureq)); bzero(&req->vsr_uresp, sizeof(req->vsr_uresp)); /* * We insert at the tail of the queue in order to make it * very unlikely a request will be reused if we race with * stopping its callout handler. */ TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link); } static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *sc) { struct vtscsi_request *req; req = TAILQ_FIRST(&sc->vtscsi_req_free); if (req != NULL) { req->vsr_state = VTSCSI_REQ_STATE_INUSE; TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link); } else sc->vtscsi_stats.dequeue_no_requests++; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); return (req); } static void vtscsi_complete_request(struct vtscsi_request *req) { if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED) req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE; if (req->vsr_complete != NULL) req->vsr_complete(req->vsr_softc, req); } static void vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq) { struct vtscsi_request *req; VTSCSI_LOCK_OWNED(sc); while ((req = virtqueue_dequeue(vq, NULL)) != NULL) vtscsi_complete_request(req); } static void vtscsi_control_vq_intr(void *xsc) { struct vtscsi_softc *sc; struct virtqueue *vq; sc = xsc; vq = sc->vtscsi_control_vq; again: VTSCSI_LOCK(sc); vtscsi_complete_vq(sc, sc->vtscsi_control_vq); if (virtqueue_enable_intr(vq) != 0) { virtqueue_disable_intr(vq); VTSCSI_UNLOCK(sc); goto again; } VTSCSI_UNLOCK(sc); } static void vtscsi_event_vq_intr(void *xsc) { struct vtscsi_softc *sc; struct virtqueue *vq; struct virtio_scsi_event *event; sc = xsc; vq = sc->vtscsi_event_vq; again: VTSCSI_LOCK(sc); while ((event = virtqueue_dequeue(vq, NULL)) != NULL) vtscsi_handle_event(sc, event); if (virtqueue_enable_intr(vq) != 0) { virtqueue_disable_intr(vq); VTSCSI_UNLOCK(sc); goto again; } VTSCSI_UNLOCK(sc); } static void vtscsi_request_vq_intr(void *xsc) { struct vtscsi_softc *sc; struct virtqueue *vq; sc = xsc; vq = sc->vtscsi_request_vq; again: VTSCSI_LOCK(sc); vtscsi_complete_vq(sc, sc->vtscsi_request_vq); if (virtqueue_enable_intr(vq) != 0) { virtqueue_disable_intr(vq); VTSCSI_UNLOCK(sc); goto again; } VTSCSI_UNLOCK(sc); } static void vtscsi_disable_vqs_intr(struct vtscsi_softc *sc) { virtqueue_disable_intr(sc->vtscsi_control_vq); virtqueue_disable_intr(sc->vtscsi_event_vq); virtqueue_disable_intr(sc->vtscsi_request_vq); } static void vtscsi_enable_vqs_intr(struct vtscsi_softc *sc) { virtqueue_enable_intr(sc->vtscsi_control_vq); virtqueue_enable_intr(sc->vtscsi_event_vq); virtqueue_enable_intr(sc->vtscsi_request_vq); } static void vtscsi_get_tunables(struct vtscsi_softc *sc) { char tmpstr[64]; TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug); snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level", device_get_unit(sc->vtscsi_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug); } static void vtscsi_add_sysctl(struct vtscsi_softc *sc) { device_t dev; struct vtscsi_statistics *stats; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vtscsi_dev; stats = &sc->vtscsi_stats; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level", CTLFLAG_RW, &sc->vtscsi_debug, 0, "Debug level"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts", CTLFLAG_RD, &stats->scsi_cmd_timeouts, "SCSI command timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests", CTLFLAG_RD, &stats->dequeue_no_requests, "No available requests to dequeue"); } static void vtscsi_printf_req(struct vtscsi_request *req, const char *func, const char *fmt, ...) { struct vtscsi_softc *sc; union ccb *ccb; struct sbuf sb; va_list ap; char str[192]; char path_str[64]; if (req == NULL) return; sc = req->vsr_softc; ccb = req->vsr_ccb; va_start(ap, fmt); sbuf_new(&sb, str, sizeof(str), 0); if (ccb == NULL) { sbuf_printf(&sb, "(noperiph:%s%d:%u): ", cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim), cam_sim_bus(sc->vtscsi_sim)); } else { xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str)); sbuf_cat(&sb, path_str); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { scsi_command_string(&ccb->csio, &sb); sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len); } } sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func, sbuf_data(&sb)); }