Index: projects/virtio/sys/dev/virtio/balloon/virtio_balloon.c =================================================================== --- projects/virtio/sys/dev/virtio/balloon/virtio_balloon.c (revision 245709) +++ projects/virtio/sys/dev/virtio/balloon/virtio_balloon.c (revision 245710) @@ -1,570 +1,568 @@ /*- * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO memory balloon devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" struct vtballoon_softc { device_t vtballoon_dev; struct mtx vtballoon_mtx; uint64_t vtballoon_features; uint32_t vtballoon_flags; #define VTBALLOON_FLAG_DETACH 0x01 struct virtqueue *vtballoon_inflate_vq; struct virtqueue *vtballoon_deflate_vq; uint32_t vtballoon_desired_npages; uint32_t vtballoon_current_npages; TAILQ_HEAD(,vm_page) vtballoon_pages; struct proc *vtballoon_kproc; uint32_t *vtballoon_page_frames; int vtballoon_timeout; }; static struct virtio_feature_desc vtballoon_feature_desc[] = { { VIRTIO_BALLOON_F_MUST_TELL_HOST, "MustTellHost" }, { VIRTIO_BALLOON_F_STATS_VQ, "StatsVq" }, { 0, NULL } }; static int vtballoon_probe(device_t); static int vtballoon_attach(device_t); static int vtballoon_detach(device_t); static int vtballoon_config_change(device_t); static void vtballoon_negotiate_features(struct vtballoon_softc *); static int vtballoon_alloc_virtqueues(struct vtballoon_softc *); -static int vtballoon_vq_intr(void *); +static void vtballoon_vq_intr(void *); static void vtballoon_inflate(struct vtballoon_softc *, int); static void vtballoon_deflate(struct vtballoon_softc *, int); static void vtballoon_send_page_frames(struct vtballoon_softc *, struct virtqueue *, int); static void vtballoon_pop(struct vtballoon_softc *); static void vtballoon_stop(struct vtballoon_softc *); static vm_page_t vtballoon_alloc_page(struct vtballoon_softc *); static void vtballoon_free_page(struct vtballoon_softc *, vm_page_t); static int vtballoon_sleep(struct vtballoon_softc *); static void vtballoon_thread(void *); static void vtballoon_add_sysctl(struct vtballoon_softc *); /* Features desired/implemented by this driver. */ #define VTBALLOON_FEATURES 0 /* Timeout between retries when the balloon needs inflating. */ #define VTBALLOON_LOWMEM_TIMEOUT hz /* * Maximum number of pages we'll request to inflate or deflate * the balloon in one virtqueue request. Both Linux and NetBSD * have settled on 256, doing up to 1MB at a time. */ #define VTBALLOON_PAGES_PER_REQUEST 256 /* Must be able to fix all pages frames in one page (segment). */ CTASSERT(VTBALLOON_PAGES_PER_REQUEST * sizeof(uint32_t) <= PAGE_SIZE); #define VTBALLOON_MTX(_sc) &(_sc)->vtballoon_mtx #define VTBALLOON_LOCK_INIT(_sc, _name) mtx_init(VTBALLOON_MTX((_sc)), _name, \ "VirtIO Balloon Lock", MTX_SPIN) #define VTBALLOON_LOCK(_sc) mtx_lock_spin(VTBALLOON_MTX((_sc))) #define VTBALLOON_UNLOCK(_sc) mtx_unlock_spin(VTBALLOON_MTX((_sc))) #define VTBALLOON_LOCK_DESTROY(_sc) mtx_destroy(VTBALLOON_MTX((_sc))) static device_method_t vtballoon_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtballoon_probe), DEVMETHOD(device_attach, vtballoon_attach), DEVMETHOD(device_detach, vtballoon_detach), /* VirtIO methods. */ DEVMETHOD(virtio_config_change, vtballoon_config_change), DEVMETHOD_END }; static driver_t vtballoon_driver = { "vtballoon", vtballoon_methods, sizeof(struct vtballoon_softc) }; static devclass_t vtballoon_devclass; DRIVER_MODULE(virtio_balloon, virtio_pci, vtballoon_driver, vtballoon_devclass, 0, 0); MODULE_VERSION(virtio_balloon, 1); MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1); static int vtballoon_probe(device_t dev) { if (virtio_get_device_type(dev) != VIRTIO_ID_BALLOON) return (ENXIO); device_set_desc(dev, "VirtIO Balloon Adapter"); return (BUS_PROBE_DEFAULT); } static int vtballoon_attach(device_t dev) { struct vtballoon_softc *sc; int error; sc = device_get_softc(dev); sc->vtballoon_dev = dev; VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev)); TAILQ_INIT(&sc->vtballoon_pages); vtballoon_add_sysctl(sc); virtio_set_feature_desc(dev, vtballoon_feature_desc); vtballoon_negotiate_features(sc); sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST * sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtballoon_page_frames == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate page frame request array\n"); goto fail; } error = vtballoon_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_MISC); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); goto fail; } error = kproc_create(vtballoon_thread, sc, &sc->vtballoon_kproc, 0, 0, "virtio_balloon"); if (error) { device_printf(dev, "cannot create balloon kproc\n"); goto fail; } virtqueue_enable_intr(sc->vtballoon_inflate_vq); virtqueue_enable_intr(sc->vtballoon_deflate_vq); fail: if (error) vtballoon_detach(dev); return (error); } static int vtballoon_detach(device_t dev) { struct vtballoon_softc *sc; sc = device_get_softc(dev); if (sc->vtballoon_kproc != NULL) { VTBALLOON_LOCK(sc); sc->vtballoon_flags |= VTBALLOON_FLAG_DETACH; wakeup_one(sc); msleep_spin(sc->vtballoon_kproc, VTBALLOON_MTX(sc), "vtbdth", 0); VTBALLOON_UNLOCK(sc); sc->vtballoon_kproc = NULL; } if (device_is_attached(dev)) { vtballoon_pop(sc); vtballoon_stop(sc); } if (sc->vtballoon_page_frames != NULL) { free(sc->vtballoon_page_frames, M_DEVBUF); sc->vtballoon_page_frames = NULL; } VTBALLOON_LOCK_DESTROY(sc); return (0); } static int vtballoon_config_change(device_t dev) { struct vtballoon_softc *sc; sc = device_get_softc(dev); VTBALLOON_LOCK(sc); wakeup_one(sc); VTBALLOON_UNLOCK(sc); return (1); } static void vtballoon_negotiate_features(struct vtballoon_softc *sc) { device_t dev; uint64_t features; dev = sc->vtballoon_dev; features = virtio_negotiate_features(dev, VTBALLOON_FEATURES); sc->vtballoon_features = features; } static int vtballoon_alloc_virtqueues(struct vtballoon_softc *sc) { device_t dev; struct vq_alloc_info vq_info[2]; int nvqs; dev = sc->vtballoon_dev; nvqs = 2; VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtballoon_vq_intr, sc, &sc->vtballoon_inflate_vq, "%s inflate", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtballoon_vq_intr, sc, &sc->vtballoon_deflate_vq, "%s deflate", device_get_nameunit(dev)); return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); } -static int +static void vtballoon_vq_intr(void *xsc) { struct vtballoon_softc *sc; sc = xsc; VTBALLOON_LOCK(sc); wakeup_one(sc); VTBALLOON_UNLOCK(sc); - - return (1); } static void vtballoon_inflate(struct vtballoon_softc *sc, int npages) { struct virtqueue *vq; vm_page_t m; int i; vq = sc->vtballoon_inflate_vq; m = NULL; if (npages > VTBALLOON_PAGES_PER_REQUEST) npages = VTBALLOON_PAGES_PER_REQUEST; KASSERT(npages > 0, ("balloon doesn't need inflating?")); for (i = 0; i < npages; i++) { if ((m = vtballoon_alloc_page(sc)) == NULL) break; sc->vtballoon_page_frames[i] = VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT; KASSERT(m->queue == PQ_NONE, ("allocated page on queue")); TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, pageq); } if (i > 0) vtballoon_send_page_frames(sc, vq, i); if (m == NULL) sc->vtballoon_timeout = VTBALLOON_LOWMEM_TIMEOUT; } static void vtballoon_deflate(struct vtballoon_softc *sc, int npages) { TAILQ_HEAD(, vm_page) free_pages; struct virtqueue *vq; vm_page_t m; int i; vq = sc->vtballoon_deflate_vq; TAILQ_INIT(&free_pages); if (npages > VTBALLOON_PAGES_PER_REQUEST) npages = VTBALLOON_PAGES_PER_REQUEST; KASSERT(npages > 0, ("balloon doesn't need deflating?")); for (i = 0; i < npages; i++) { m = TAILQ_FIRST(&sc->vtballoon_pages); KASSERT(m != NULL, ("no more pages to deflate")); sc->vtballoon_page_frames[i] = VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT; TAILQ_REMOVE(&sc->vtballoon_pages, m, pageq); TAILQ_INSERT_TAIL(&free_pages, m, pageq); } if (i > 0) { /* Always tell host first before freeing the pages. */ vtballoon_send_page_frames(sc, vq, i); while ((m = TAILQ_FIRST(&free_pages)) != NULL) { TAILQ_REMOVE(&free_pages, m, pageq); vtballoon_free_page(sc, m); } } KASSERT((TAILQ_EMPTY(&sc->vtballoon_pages) && sc->vtballoon_current_npages == 0) || (!TAILQ_EMPTY(&sc->vtballoon_pages) && sc->vtballoon_current_npages != 0), ("balloon empty?")); } static void vtballoon_send_page_frames(struct vtballoon_softc *sc, struct virtqueue *vq, int npages) { struct sglist sg; struct sglist_seg segs[1]; void *c; int error; sglist_init(&sg, 1, segs); error = sglist_append(&sg, sc->vtballoon_page_frames, npages * sizeof(uint32_t)); KASSERT(error == 0, ("error adding page frames to sglist")); error = virtqueue_enqueue(vq, vq, &sg, 1, 0); KASSERT(error == 0, ("error enqueuing page frames to virtqueue")); virtqueue_notify(vq); /* * Inflate and deflate operations are done synchronously. The * interrupt handler will wake us up. */ VTBALLOON_LOCK(sc); while ((c = virtqueue_dequeue(vq, NULL)) == NULL) msleep_spin(sc, VTBALLOON_MTX(sc), "vtbspf", 0); VTBALLOON_UNLOCK(sc); KASSERT(c == vq, ("unexpected balloon operation response")); } static void vtballoon_pop(struct vtballoon_softc *sc) { while (!TAILQ_EMPTY(&sc->vtballoon_pages)) vtballoon_deflate(sc, sc->vtballoon_current_npages); } static void vtballoon_stop(struct vtballoon_softc *sc) { virtqueue_disable_intr(sc->vtballoon_inflate_vq); virtqueue_disable_intr(sc->vtballoon_deflate_vq); virtio_stop(sc->vtballoon_dev); } static vm_page_t vtballoon_alloc_page(struct vtballoon_softc *sc) { vm_page_t m; m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); if (m != NULL) sc->vtballoon_current_npages++; return (m); } static void vtballoon_free_page(struct vtballoon_softc *sc, vm_page_t m) { vm_page_unwire(m, 0); vm_page_free(m); sc->vtballoon_current_npages--; } static uint32_t vtballoon_desired_size(struct vtballoon_softc *sc) { uint32_t desired; desired = virtio_read_dev_config_4(sc->vtballoon_dev, offsetof(struct virtio_balloon_config, num_pages)); return (le32toh(desired)); } static void vtballoon_update_size(struct vtballoon_softc *sc) { virtio_write_dev_config_4(sc->vtballoon_dev, offsetof(struct virtio_balloon_config, actual), htole32(sc->vtballoon_current_npages)); } static int vtballoon_sleep(struct vtballoon_softc *sc) { int rc, timeout; uint32_t current, desired; rc = 0; current = sc->vtballoon_current_npages; VTBALLOON_LOCK(sc); for (;;) { if (sc->vtballoon_flags & VTBALLOON_FLAG_DETACH) { rc = 1; break; } desired = vtballoon_desired_size(sc); sc->vtballoon_desired_npages = desired; /* * If given, use non-zero timeout on the first time through * the loop. On subsequent times, timeout will be zero so * we will reevaluate the desired size of the balloon and * break out to retry if needed. */ timeout = sc->vtballoon_timeout; sc->vtballoon_timeout = 0; if (current > desired) break; if (current < desired && timeout == 0) break; msleep_spin(sc, VTBALLOON_MTX(sc), "vtbslp", timeout); } VTBALLOON_UNLOCK(sc); return (rc); } static void vtballoon_thread(void *xsc) { struct vtballoon_softc *sc; uint32_t current, desired; sc = xsc; for (;;) { if (vtballoon_sleep(sc) != 0) break; current = sc->vtballoon_current_npages; desired = sc->vtballoon_desired_npages; if (desired != current) { if (desired > current) vtballoon_inflate(sc, desired - current); else vtballoon_deflate(sc, current - desired); vtballoon_update_size(sc); } } kproc_exit(0); } static void vtballoon_add_sysctl(struct vtballoon_softc *sc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vtballoon_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "desired", CTLFLAG_RD, &sc->vtballoon_desired_npages, sizeof(uint32_t), "Desired balloon size in pages"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "current", CTLFLAG_RD, &sc->vtballoon_current_npages, sizeof(uint32_t), "Current balloon size in pages"); } Index: projects/virtio/sys/dev/virtio/block/virtio_blk.c =================================================================== --- projects/virtio/sys/dev/virtio/block/virtio_blk.c (revision 245709) +++ projects/virtio/sys/dev/virtio/block/virtio_blk.c (revision 245710) @@ -1,1205 +1,1203 @@ /*- * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO block devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" struct vtblk_request { struct virtio_blk_outhdr vbr_hdr; struct bio *vbr_bp; uint8_t vbr_ack; TAILQ_ENTRY(vtblk_request) vbr_link; }; struct vtblk_softc { device_t vtblk_dev; struct mtx vtblk_mtx; uint64_t vtblk_features; uint32_t vtblk_flags; #define VTBLK_FLAG_INDIRECT 0x0001 #define VTBLK_FLAG_READONLY 0x0002 #define VTBLK_FLAG_DETACH 0x0004 #define VTBLK_FLAG_SUSPEND 0x0008 #define VTBLK_FLAG_DUMPING 0x0010 struct virtqueue *vtblk_vq; struct sglist *vtblk_sglist; struct disk *vtblk_disk; struct bio_queue_head vtblk_bioq; TAILQ_HEAD(, vtblk_request) vtblk_req_free; TAILQ_HEAD(, vtblk_request) vtblk_req_ready; struct taskqueue *vtblk_tq; struct task vtblk_intr_task; int vtblk_max_nsegs; int vtblk_request_count; struct vtblk_request vtblk_dump_request; }; static struct virtio_feature_desc vtblk_feature_desc[] = { { VIRTIO_BLK_F_BARRIER, "HostBarrier" }, { VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" }, { VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" }, { VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" }, { VIRTIO_BLK_F_RO, "ReadOnly" }, { VIRTIO_BLK_F_BLK_SIZE, "BlockSize" }, { VIRTIO_BLK_F_SCSI, "SCSICmds" }, { VIRTIO_BLK_F_FLUSH, "FlushCmd" }, { VIRTIO_BLK_F_TOPOLOGY, "Topology" }, { 0, NULL } }; static int vtblk_modevent(module_t, int, void *); static int vtblk_probe(device_t); static int vtblk_attach(device_t); static int vtblk_detach(device_t); static int vtblk_suspend(device_t); static int vtblk_resume(device_t); static int vtblk_shutdown(device_t); static int vtblk_open(struct disk *); static int vtblk_close(struct disk *); static int vtblk_ioctl(struct disk *, u_long, void *, int, struct thread *); static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t); static void vtblk_strategy(struct bio *); static void vtblk_negotiate_features(struct vtblk_softc *); static int vtblk_maximum_segments(struct vtblk_softc *, struct virtio_blk_config *); static int vtblk_alloc_virtqueue(struct vtblk_softc *); static void vtblk_alloc_disk(struct vtblk_softc *, struct virtio_blk_config *); static void vtblk_create_disk(struct vtblk_softc *); static int vtblk_quiesce(struct vtblk_softc *); static void vtblk_startio(struct vtblk_softc *); static struct vtblk_request * vtblk_bio_request(struct vtblk_softc *); static int vtblk_execute_request(struct vtblk_softc *, struct vtblk_request *); -static int vtblk_vq_intr(void *); +static void vtblk_vq_intr(void *); static void vtblk_intr_task(void *, int); static void vtblk_stop(struct vtblk_softc *); static void vtblk_get_ident(struct vtblk_softc *); static void vtblk_prepare_dump(struct vtblk_softc *); static int vtblk_write_dump(struct vtblk_softc *, void *, off_t, size_t); static int vtblk_flush_dump(struct vtblk_softc *); static int vtblk_poll_request(struct vtblk_softc *, struct vtblk_request *); static void vtblk_finish_completed(struct vtblk_softc *); static void vtblk_drain_vq(struct vtblk_softc *, int); static void vtblk_drain(struct vtblk_softc *); static int vtblk_alloc_requests(struct vtblk_softc *); static void vtblk_free_requests(struct vtblk_softc *); static struct vtblk_request * vtblk_dequeue_request(struct vtblk_softc *); static void vtblk_enqueue_request(struct vtblk_softc *, struct vtblk_request *); static struct vtblk_request * vtblk_dequeue_ready(struct vtblk_softc *); static void vtblk_enqueue_ready(struct vtblk_softc *, struct vtblk_request *); static int vtblk_request_error(struct vtblk_request *); static void vtblk_finish_bio(struct bio *, int); /* Tunables. */ static int vtblk_no_ident = 0; TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident); /* Features desired/implemented by this driver. */ #define VTBLK_FEATURES \ (VIRTIO_BLK_F_BARRIER | \ VIRTIO_BLK_F_SIZE_MAX | \ VIRTIO_BLK_F_SEG_MAX | \ VIRTIO_BLK_F_GEOMETRY | \ VIRTIO_BLK_F_RO | \ VIRTIO_BLK_F_BLK_SIZE | \ VIRTIO_BLK_F_FLUSH | \ VIRTIO_RING_F_INDIRECT_DESC) #define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx #define VTBLK_LOCK_INIT(_sc, _name) \ mtx_init(VTBLK_MTX((_sc)), (_name), \ "VTBLK Lock", MTX_DEF) #define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc))) #define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc))) #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc))) #define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED) #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \ mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED) #define VTBLK_DISK_NAME "vtbd" #define VTBLK_QUIESCE_TIMEOUT (30 * hz) /* * Each block request uses at least two segments - one for the header * and one for the status. */ #define VTBLK_MIN_SEGMENTS 2 static device_method_t vtblk_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtblk_probe), DEVMETHOD(device_attach, vtblk_attach), DEVMETHOD(device_detach, vtblk_detach), DEVMETHOD(device_suspend, vtblk_suspend), DEVMETHOD(device_resume, vtblk_resume), DEVMETHOD(device_shutdown, vtblk_shutdown), DEVMETHOD_END }; static driver_t vtblk_driver = { "vtblk", vtblk_methods, sizeof(struct vtblk_softc) }; static devclass_t vtblk_devclass; DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass, vtblk_modevent, 0); MODULE_VERSION(virtio_blk, 1); MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1); static int vtblk_modevent(module_t mod, int type, void *unused) { int error; error = 0; switch (type) { case MOD_LOAD: case MOD_QUIESCE: case MOD_UNLOAD: case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static int vtblk_probe(device_t dev) { if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK) return (ENXIO); device_set_desc(dev, "VirtIO Block Adapter"); return (BUS_PROBE_DEFAULT); } static int vtblk_attach(device_t dev) { struct vtblk_softc *sc; struct virtio_blk_config blkcfg; int error; sc = device_get_softc(dev); sc->vtblk_dev = dev; VTBLK_LOCK_INIT(sc, device_get_nameunit(dev)); bioq_init(&sc->vtblk_bioq); TAILQ_INIT(&sc->vtblk_req_free); TAILQ_INIT(&sc->vtblk_req_ready); virtio_set_feature_desc(dev, vtblk_feature_desc); vtblk_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) sc->vtblk_flags |= VTBLK_FLAG_INDIRECT; if (virtio_with_feature(dev, VIRTIO_BLK_F_RO)) sc->vtblk_flags |= VTBLK_FLAG_READONLY; /* Get local copy of config. */ virtio_read_device_config(dev, 0, &blkcfg, sizeof(struct virtio_blk_config)); /* * With the current sglist(9) implementation, it is not easy * for us to support a maximum segment size as adjacent * segments are coalesced. For now, just make sure it's larger * than the maximum supported transfer size. */ if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) { if (blkcfg.size_max < MAXPHYS) { error = ENOTSUP; device_printf(dev, "host requires unsupported " "maximum segment size feature\n"); goto fail; } } sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg); if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) { error = EINVAL; device_printf(dev, "fewer than minimum number of segments " "allowed: %d\n", sc->vtblk_max_nsegs); goto fail; } sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT); if (sc->vtblk_sglist == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate sglist\n"); goto fail; } error = vtblk_alloc_virtqueue(sc); if (error) { device_printf(dev, "cannot allocate virtqueue\n"); goto fail; } error = vtblk_alloc_requests(sc); if (error) { device_printf(dev, "cannot preallocate requests\n"); goto fail; } vtblk_alloc_disk(sc, &blkcfg); TASK_INIT(&sc->vtblk_intr_task, 0, vtblk_intr_task, sc); sc->vtblk_tq = taskqueue_create_fast("vtblk_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->vtblk_tq); if (sc->vtblk_tq == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate taskqueue\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY); if (error) { device_printf(dev, "cannot setup virtqueue interrupt\n"); goto fail; } taskqueue_start_threads(&sc->vtblk_tq, 1, PI_DISK, "%s taskq", device_get_nameunit(dev)); vtblk_create_disk(sc); virtqueue_enable_intr(sc->vtblk_vq); fail: if (error) vtblk_detach(dev); return (error); } static int vtblk_detach(device_t dev) { struct vtblk_softc *sc; sc = device_get_softc(dev); VTBLK_LOCK(sc); sc->vtblk_flags |= VTBLK_FLAG_DETACH; if (device_is_attached(dev)) vtblk_stop(sc); VTBLK_UNLOCK(sc); if (sc->vtblk_tq != NULL) { taskqueue_drain(sc->vtblk_tq, &sc->vtblk_intr_task); taskqueue_free(sc->vtblk_tq); sc->vtblk_tq = NULL; } vtblk_drain(sc); if (sc->vtblk_disk != NULL) { disk_destroy(sc->vtblk_disk); sc->vtblk_disk = NULL; } if (sc->vtblk_sglist != NULL) { sglist_free(sc->vtblk_sglist); sc->vtblk_sglist = NULL; } VTBLK_LOCK_DESTROY(sc); return (0); } static int vtblk_suspend(device_t dev) { struct vtblk_softc *sc; int error; sc = device_get_softc(dev); VTBLK_LOCK(sc); sc->vtblk_flags |= VTBLK_FLAG_SUSPEND; /* XXX BMV: virtio_stop(), etc needed here? */ error = vtblk_quiesce(sc); if (error) sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND; VTBLK_UNLOCK(sc); return (error); } static int vtblk_resume(device_t dev) { struct vtblk_softc *sc; sc = device_get_softc(dev); VTBLK_LOCK(sc); /* XXX BMV: virtio_reinit(), etc needed here? */ sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND; vtblk_startio(sc); VTBLK_UNLOCK(sc); return (0); } static int vtblk_shutdown(device_t dev) { return (0); } static int vtblk_open(struct disk *dp) { struct vtblk_softc *sc; if ((sc = dp->d_drv1) == NULL) return (ENXIO); return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0); } static int vtblk_close(struct disk *dp) { struct vtblk_softc *sc; if ((sc = dp->d_drv1) == NULL) return (ENXIO); return (0); } static int vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) { struct vtblk_softc *sc; if ((sc = dp->d_drv1) == NULL) return (ENXIO); return (ENOTTY); } static int vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct disk *dp; struct vtblk_softc *sc; int error; dp = arg; if ((sc = dp->d_drv1) == NULL) return (ENXIO); VTBLK_LOCK(sc); if ((sc->vtblk_flags & VTBLK_FLAG_DUMPING) == 0) { vtblk_prepare_dump(sc); sc->vtblk_flags |= VTBLK_FLAG_DUMPING; } if (length > 0) error = vtblk_write_dump(sc, virtual, offset, length); else if (virtual == NULL && offset == 0) error = vtblk_flush_dump(sc); else { error = EINVAL; sc->vtblk_flags &= ~VTBLK_FLAG_DUMPING; } VTBLK_UNLOCK(sc); return (error); } static void vtblk_strategy(struct bio *bp) { struct vtblk_softc *sc; if ((sc = bp->bio_disk->d_drv1) == NULL) { vtblk_finish_bio(bp, EINVAL); return; } /* * Fail any write if RO. Unfortunately, there does not seem to * be a better way to report our readonly'ness to GEOM above. */ if (sc->vtblk_flags & VTBLK_FLAG_READONLY && (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) { vtblk_finish_bio(bp, EROFS); return; } #ifdef INVARIANTS /* * Prevent read/write buffers spanning too many segments from * getting into the queue. This should only trip if d_maxsize * was incorrectly set. */ if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { int nsegs, max_nsegs; nsegs = sglist_count(bp->bio_data, bp->bio_bcount); max_nsegs = sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS; KASSERT(nsegs <= max_nsegs, ("bio %p spanned too many segments: %d, max: %d", bp, nsegs, max_nsegs)); } #endif VTBLK_LOCK(sc); if (sc->vtblk_flags & VTBLK_FLAG_DETACH) vtblk_finish_bio(bp, ENXIO); else { bioq_disksort(&sc->vtblk_bioq, bp); if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0) vtblk_startio(sc); } VTBLK_UNLOCK(sc); } static void vtblk_negotiate_features(struct vtblk_softc *sc) { device_t dev; uint64_t features; dev = sc->vtblk_dev; features = VTBLK_FEATURES; sc->vtblk_features = virtio_negotiate_features(dev, features); } static int vtblk_maximum_segments(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg) { device_t dev; int nsegs; dev = sc->vtblk_dev; nsegs = VTBLK_MIN_SEGMENTS; if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) { nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1); if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT) nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); } else nsegs += 1; return (nsegs); } static int vtblk_alloc_virtqueue(struct vtblk_softc *sc) { device_t dev; struct vq_alloc_info vq_info; dev = sc->vtblk_dev; VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs, vtblk_vq_intr, sc, &sc->vtblk_vq, "%s request", device_get_nameunit(dev)); return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info)); } static void vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg) { device_t dev; struct disk *dp; dev = sc->vtblk_dev; sc->vtblk_disk = dp = disk_alloc(); dp->d_open = vtblk_open; dp->d_close = vtblk_close; dp->d_ioctl = vtblk_ioctl; dp->d_strategy = vtblk_strategy; dp->d_name = VTBLK_DISK_NAME; dp->d_unit = device_get_unit(dev); dp->d_drv1 = sc; if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0) dp->d_dump = vtblk_dump; /* Capacity is always in 512-byte units. */ dp->d_mediasize = blkcfg->capacity * 512; if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE)) dp->d_sectorsize = blkcfg->blk_size; else dp->d_sectorsize = 512; /* * The VirtIO maximum I/O size is given in terms of segments. * However, FreeBSD limits I/O size by logical buffer size, not * by physically contiguous pages. Therefore, we have to assume * no pages are contiguous. This may impose an artificially low * maximum I/O size. But in practice, since QEMU advertises 128 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE, * which is typically greater than MAXPHYS. Eventually we should * just advertise MAXPHYS and split buffers that are too big. * * Note we must subtract one additional segment in case of non * page aligned buffers. */ dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) * PAGE_SIZE; if (dp->d_maxsize < PAGE_SIZE) dp->d_maxsize = PAGE_SIZE; /* XXX */ if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) { dp->d_fwsectors = blkcfg->geometry.sectors; dp->d_fwheads = blkcfg->geometry.heads; } if (virtio_with_feature(dev, VIRTIO_BLK_F_FLUSH)) dp->d_flags |= DISKFLAG_CANFLUSHCACHE; } static void vtblk_create_disk(struct vtblk_softc *sc) { struct disk *dp; dp = sc->vtblk_disk; /* * Retrieving the identification string must be done after * the virtqueue interrupt is setup otherwise it will hang. */ vtblk_get_ident(sc); device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n", (uintmax_t) dp->d_mediasize >> 20, (uintmax_t) dp->d_mediasize / dp->d_sectorsize, dp->d_sectorsize); disk_create(dp, DISK_VERSION); } static int vtblk_quiesce(struct vtblk_softc *sc) { int error; error = 0; VTBLK_LOCK_ASSERT(sc); while (!virtqueue_empty(sc->vtblk_vq)) { if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq", VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) { error = EBUSY; break; } } return (error); } static void vtblk_startio(struct vtblk_softc *sc) { struct virtqueue *vq; struct vtblk_request *req; int enq; vq = sc->vtblk_vq; enq = 0; VTBLK_LOCK_ASSERT(sc); while (!virtqueue_full(vq)) { if ((req = vtblk_dequeue_ready(sc)) == NULL) req = vtblk_bio_request(sc); if (req == NULL) break; if (vtblk_execute_request(sc, req) != 0) { vtblk_enqueue_ready(sc, req); break; } enq++; } if (enq > 0) virtqueue_notify(vq); } static struct vtblk_request * vtblk_bio_request(struct vtblk_softc *sc) { struct bio_queue_head *bioq; struct vtblk_request *req; struct bio *bp; bioq = &sc->vtblk_bioq; if (bioq_first(bioq) == NULL) return (NULL); req = vtblk_dequeue_request(sc); if (req == NULL) return (NULL); bp = bioq_takefirst(bioq); req->vbr_bp = bp; req->vbr_ack = -1; req->vbr_hdr.ioprio = 1; switch (bp->bio_cmd) { case BIO_FLUSH: req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH; break; case BIO_READ: req->vbr_hdr.type = VIRTIO_BLK_T_IN; req->vbr_hdr.sector = bp->bio_offset / 512; break; case BIO_WRITE: req->vbr_hdr.type = VIRTIO_BLK_T_OUT; req->vbr_hdr.sector = bp->bio_offset / 512; break; default: panic("%s: bio with unhandled cmd: %d", __FUNCTION__, bp->bio_cmd); } if (bp->bio_flags & BIO_ORDERED) req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER; return (req); } static int vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request *req) { struct sglist *sg; struct bio *bp; int readable, writable, error; sg = sc->vtblk_sglist; bp = req->vbr_bp; writable = 0; VTBLK_LOCK_ASSERT(sc); sglist_reset(sg); sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr)); if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { error = sglist_append(sg, bp->bio_data, bp->bio_bcount); if (error || sg->sg_nseg == sg->sg_maxseg) panic("%s: data buffer too big bio:%p error:%d", __FUNCTION__, bp, error); /* BIO_READ means the host writes into our buffer. */ if (bp->bio_cmd == BIO_READ) writable = sg->sg_nseg - 1; } writable++; sglist_append(sg, &req->vbr_ack, sizeof(uint8_t)); readable = sg->sg_nseg - writable; return (virtqueue_enqueue(sc->vtblk_vq, req, sg, readable, writable)); } -static int +static void vtblk_vq_intr(void *xsc) { struct vtblk_softc *sc; sc = xsc; virtqueue_disable_intr(sc->vtblk_vq); taskqueue_enqueue_fast(sc->vtblk_tq, &sc->vtblk_intr_task); - - return (1); } static void vtblk_intr_task(void *arg, int pending) { struct vtblk_softc *sc; struct virtqueue *vq; sc = arg; vq = sc->vtblk_vq; VTBLK_LOCK(sc); if (sc->vtblk_flags & VTBLK_FLAG_DETACH) { VTBLK_UNLOCK(sc); return; } vtblk_finish_completed(sc); if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0) vtblk_startio(sc); else wakeup(&sc->vtblk_vq); if (virtqueue_enable_intr(vq) != 0) { virtqueue_disable_intr(vq); VTBLK_UNLOCK(sc); taskqueue_enqueue_fast(sc->vtblk_tq, &sc->vtblk_intr_task); return; } VTBLK_UNLOCK(sc); } static void vtblk_stop(struct vtblk_softc *sc) { virtqueue_disable_intr(sc->vtblk_vq); virtio_stop(sc->vtblk_dev); } static void vtblk_get_ident(struct vtblk_softc *sc) { struct bio buf; struct disk *dp; struct vtblk_request *req; int len, error; dp = sc->vtblk_disk; len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE); if (vtblk_no_ident != 0) return; req = vtblk_dequeue_request(sc); if (req == NULL) return; req->vbr_ack = -1; req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID; req->vbr_hdr.ioprio = 1; req->vbr_hdr.sector = 0; req->vbr_bp = &buf; bzero(&buf, sizeof(struct bio)); buf.bio_cmd = BIO_READ; buf.bio_data = dp->d_ident; buf.bio_bcount = len; VTBLK_LOCK(sc); error = vtblk_poll_request(sc, req); VTBLK_UNLOCK(sc); vtblk_enqueue_request(sc, req); if (error) { device_printf(sc->vtblk_dev, "error getting device identifier: %d\n", error); } } static void vtblk_prepare_dump(struct vtblk_softc *sc) { device_t dev; struct virtqueue *vq; dev = sc->vtblk_dev; vq = sc->vtblk_vq; vtblk_stop(sc); /* * Drain all requests caught in-flight in the virtqueue, * skipping biodone(). When dumping, only one request is * outstanding at a time, and we just poll the virtqueue * for the response. */ vtblk_drain_vq(sc, 1); if (virtio_reinit(dev, sc->vtblk_features) != 0) panic("cannot reinit VirtIO block device during dump"); virtqueue_disable_intr(vq); virtio_reinit_complete(dev); } static int vtblk_write_dump(struct vtblk_softc *sc, void *virtual, off_t offset, size_t length) { struct bio buf; struct vtblk_request *req; req = &sc->vtblk_dump_request; req->vbr_ack = -1; req->vbr_hdr.type = VIRTIO_BLK_T_OUT; req->vbr_hdr.ioprio = 1; req->vbr_hdr.sector = offset / 512; req->vbr_bp = &buf; bzero(&buf, sizeof(struct bio)); buf.bio_cmd = BIO_WRITE; buf.bio_data = virtual; buf.bio_bcount = length; return (vtblk_poll_request(sc, req)); } static int vtblk_flush_dump(struct vtblk_softc *sc) { struct bio buf; struct vtblk_request *req; req = &sc->vtblk_dump_request; req->vbr_ack = -1; req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH; req->vbr_hdr.ioprio = 1; req->vbr_hdr.sector = 0; req->vbr_bp = &buf; bzero(&buf, sizeof(struct bio)); buf.bio_cmd = BIO_FLUSH; return (vtblk_poll_request(sc, req)); } static int vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req) { struct virtqueue *vq; struct vtblk_request *r; int error; vq = sc->vtblk_vq; if (!virtqueue_empty(vq)) return (EBUSY); error = vtblk_execute_request(sc, req); if (error) return (error); virtqueue_notify(vq); r = virtqueue_poll(vq, NULL); KASSERT(r == req, ("unexpected request response: %p/%p", r, req)); error = vtblk_request_error(req); if (error && bootverbose) { device_printf(sc->vtblk_dev, "%s: IO error: %d\n", __FUNCTION__, error); } return (error); } static void vtblk_finish_completed(struct vtblk_softc *sc) { struct vtblk_request *req; struct bio *bp; int error; while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) { bp = req->vbr_bp; error = vtblk_request_error(req); if (error) disk_err(bp, "hard error", -1, 1); vtblk_finish_bio(bp, error); vtblk_enqueue_request(sc, req); } } static void vtblk_drain_vq(struct vtblk_softc *sc, int skip_done) { struct virtqueue *vq; struct vtblk_request *req; int last; vq = sc->vtblk_vq; last = 0; while ((req = virtqueue_drain(vq, &last)) != NULL) { if (!skip_done) vtblk_finish_bio(req->vbr_bp, ENXIO); vtblk_enqueue_request(sc, req); } KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); } static void vtblk_drain(struct vtblk_softc *sc) { struct bio_queue_head *bioq; struct vtblk_request *req; struct bio *bp; bioq = &sc->vtblk_bioq; if (sc->vtblk_vq != NULL) { vtblk_finish_completed(sc); vtblk_drain_vq(sc, 0); } while ((req = vtblk_dequeue_ready(sc)) != NULL) { vtblk_finish_bio(req->vbr_bp, ENXIO); vtblk_enqueue_request(sc, req); } while (bioq_first(bioq) != NULL) { bp = bioq_takefirst(bioq); vtblk_finish_bio(bp, ENXIO); } vtblk_free_requests(sc); } #ifdef INVARIANTS static void vtblk_request_invariants(struct vtblk_request *req) { int hdr_nsegs, ack_nsegs; hdr_nsegs = sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)); ack_nsegs = sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)); KASSERT(hdr_nsegs == 1, ("request header crossed page boundary")); KASSERT(ack_nsegs == 1, ("request ack crossed page boundary")); } #endif static int vtblk_alloc_requests(struct vtblk_softc *sc) { struct vtblk_request *req; int i, nreqs; nreqs = virtqueue_size(sc->vtblk_vq); /* * Preallocate sufficient requests to keep the virtqueue full. Each * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce * the number allocated when indirect descriptors are not available. */ if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0) nreqs /= VTBLK_MIN_SEGMENTS; for (i = 0; i < nreqs; i++) { req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT); if (req == NULL) return (ENOMEM); #ifdef INVARIANTS vtblk_request_invariants(req); #endif sc->vtblk_request_count++; vtblk_enqueue_request(sc, req); } return (0); } static void vtblk_free_requests(struct vtblk_softc *sc) { struct vtblk_request *req; KASSERT(TAILQ_EMPTY(&sc->vtblk_req_ready), ("ready requests left on queue")); while ((req = vtblk_dequeue_request(sc)) != NULL) { sc->vtblk_request_count--; free(req, M_DEVBUF); } KASSERT(sc->vtblk_request_count == 0, ("leaked requests: %d", sc->vtblk_request_count)); } static struct vtblk_request * vtblk_dequeue_request(struct vtblk_softc *sc) { struct vtblk_request *req; req = TAILQ_FIRST(&sc->vtblk_req_free); if (req != NULL) TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link); return (req); } static void vtblk_enqueue_request(struct vtblk_softc *sc, struct vtblk_request *req) { bzero(req, sizeof(struct vtblk_request)); TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link); } static struct vtblk_request * vtblk_dequeue_ready(struct vtblk_softc *sc) { struct vtblk_request *req; req = TAILQ_FIRST(&sc->vtblk_req_ready); if (req != NULL) TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link); return (req); } static void vtblk_enqueue_ready(struct vtblk_softc *sc, struct vtblk_request *req) { TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link); } static int vtblk_request_error(struct vtblk_request *req) { int error; switch (req->vbr_ack) { case VIRTIO_BLK_S_OK: error = 0; break; case VIRTIO_BLK_S_UNSUPP: error = ENOTSUP; break; default: error = EIO; break; } return (error); } static void vtblk_finish_bio(struct bio *bp, int error) { if (error) { bp->bio_resid = bp->bio_bcount; bp->bio_error = error; bp->bio_flags |= BIO_ERROR; } biodone(bp); } Index: projects/virtio/sys/dev/virtio/network/if_vtnet.c =================================================================== --- projects/virtio/sys/dev/virtio/network/if_vtnet.c (revision 245709) +++ projects/virtio/sys/dev/virtio/network/if_vtnet.c (revision 245710) @@ -1,2746 +1,2742 @@ /*- * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO network devices. */ #include __FBSDID("$FreeBSD$"); #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" static int vtnet_modevent(module_t, int, void *); static int vtnet_probe(device_t); static int vtnet_attach(device_t); static int vtnet_detach(device_t); static int vtnet_suspend(device_t); static int vtnet_resume(device_t); static int vtnet_shutdown(device_t); static int vtnet_config_change(device_t); static void vtnet_negotiate_features(struct vtnet_softc *); static int vtnet_alloc_virtqueues(struct vtnet_softc *); static void vtnet_get_hwaddr(struct vtnet_softc *); static void vtnet_set_hwaddr(struct vtnet_softc *); static int vtnet_is_link_up(struct vtnet_softc *); static void vtnet_update_link_status(struct vtnet_softc *); static void vtnet_watchdog(struct vtnet_softc *); static void vtnet_config_change_task(void *, int); static int vtnet_change_mtu(struct vtnet_softc *, int); static int vtnet_ioctl(struct ifnet *, u_long, caddr_t); static int vtnet_init_rx_vq(struct vtnet_softc *); static void vtnet_free_rx_mbufs(struct vtnet_softc *); static void vtnet_free_tx_mbufs(struct vtnet_softc *); static void vtnet_free_ctrl_vq(struct vtnet_softc *); #ifdef DEVICE_POLLING static poll_handler_t vtnet_poll; #endif static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int, struct mbuf **); static int vtnet_replace_rxbuf(struct vtnet_softc *, struct mbuf *, int); static int vtnet_newbuf(struct vtnet_softc *); static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int); static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *); static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *); static void vtnet_vlan_tag_remove(struct mbuf *); static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *, struct virtio_net_hdr *); static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int); static int vtnet_rxeof(struct vtnet_softc *, int, int *); static void vtnet_rx_intr_task(void *, int); -static int vtnet_rx_vq_intr(void *); +static void vtnet_rx_vq_intr(void *); static void vtnet_txeof(struct vtnet_softc *); static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *, struct virtio_net_hdr *); static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **, struct vtnet_tx_header *); static int vtnet_encap(struct vtnet_softc *, struct mbuf **); static void vtnet_start_locked(struct ifnet *); static void vtnet_start(struct ifnet *); static void vtnet_tick(void *); static void vtnet_tx_intr_task(void *, int); -static int vtnet_tx_vq_intr(void *); +static void vtnet_tx_vq_intr(void *); static void vtnet_stop(struct vtnet_softc *); static int vtnet_reinit(struct vtnet_softc *); static void vtnet_init_locked(struct vtnet_softc *); static void vtnet_init(void *); static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, struct sglist *, int, int); static void vtnet_rx_filter(struct vtnet_softc *sc); static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); static int vtnet_set_promisc(struct vtnet_softc *, int); static int vtnet_set_allmulti(struct vtnet_softc *, int); static void vtnet_rx_filter_mac(struct vtnet_softc *); static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); static void vtnet_rx_filter_vlan(struct vtnet_softc *); static void vtnet_set_vlan_filter(struct vtnet_softc *, int, uint16_t); static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); static int vtnet_ifmedia_upd(struct ifnet *); static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void vtnet_add_statistics(struct vtnet_softc *); static int vtnet_enable_rx_intr(struct vtnet_softc *); static int vtnet_enable_tx_intr(struct vtnet_softc *); static void vtnet_disable_rx_intr(struct vtnet_softc *); static void vtnet_disable_tx_intr(struct vtnet_softc *); /* Tunables. */ static int vtnet_csum_disable = 0; TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); static int vtnet_tso_disable = 0; TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); static int vtnet_lro_disable = 0; TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); /* * Reducing the number of transmit completed interrupts can * improve performance. To do so, the define below keeps the * Tx vq interrupt disabled and adds calls to vtnet_txeof() * in the start and watchdog paths. The price to pay for this * is the m_free'ing of transmitted mbufs may be delayed until * the watchdog fires. */ #define VTNET_TX_INTR_MODERATION static uma_zone_t vtnet_tx_header_zone; static struct virtio_feature_desc vtnet_feature_desc[] = { { VIRTIO_NET_F_CSUM, "TxChecksum" }, { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, { VIRTIO_NET_F_MAC, "MacAddress" }, { VIRTIO_NET_F_GSO, "TxAllGSO" }, { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, { VIRTIO_NET_F_STATUS, "Status" }, { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, { VIRTIO_NET_F_CTRL_RX, "RxMode" }, { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, { 0, NULL } }; static device_method_t vtnet_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtnet_probe), DEVMETHOD(device_attach, vtnet_attach), DEVMETHOD(device_detach, vtnet_detach), DEVMETHOD(device_suspend, vtnet_suspend), DEVMETHOD(device_resume, vtnet_resume), DEVMETHOD(device_shutdown, vtnet_shutdown), /* VirtIO methods. */ DEVMETHOD(virtio_config_change, vtnet_config_change), DEVMETHOD_END }; static driver_t vtnet_driver = { "vtnet", vtnet_methods, sizeof(struct vtnet_softc) }; static devclass_t vtnet_devclass; DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); MODULE_VERSION(vtnet, 1); MODULE_DEPEND(vtnet, virtio, 1, 1, 1); static int vtnet_modevent(module_t mod, int type, void *unused) { int error; error = 0; switch (type) { case MOD_LOAD: vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr", sizeof(struct vtnet_tx_header), NULL, NULL, NULL, NULL, 0, 0); break; case MOD_QUIESCE: case MOD_UNLOAD: if (uma_zone_get_cur(vtnet_tx_header_zone) > 0) error = EBUSY; else if (type == MOD_UNLOAD) { uma_zdestroy(vtnet_tx_header_zone); vtnet_tx_header_zone = NULL; } break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static int vtnet_probe(device_t dev) { if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) return (ENXIO); device_set_desc(dev, "VirtIO Networking Adapter"); return (BUS_PROBE_DEFAULT); } static int vtnet_attach(device_t dev) { struct vtnet_softc *sc; struct ifnet *ifp; int tx_size, error; sc = device_get_softc(dev); sc->vtnet_dev = dev; VTNET_LOCK_INIT(sc); callout_init_mtx(&sc->vtnet_tick_ch, VTNET_MTX(sc), 0); ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, vtnet_ifmedia_sts); ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); vtnet_add_statistics(sc); virtio_set_feature_desc(dev, vtnet_feature_desc); vtnet_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); } else sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); sc->vtnet_rx_mbuf_size = MCLBYTES; sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) { sc->vtnet_mac_filter = malloc( sizeof(struct vtnet_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_mac_filter == NULL) { device_printf(dev, "cannot allocate mac filter table\n"); error = ENOMEM; goto fail; } sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; } if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; } vtnet_get_hwaddr(sc); error = vtnet_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "cannot allocate ifnet structure\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = vtnet_init; ifp->if_start = vtnet_start; ifp->if_ioctl = vtnet_ioctl; sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq); sc->vtnet_rx_process_limit = sc->vtnet_rx_size; tx_size = virtqueue_size(sc->vtnet_tx_vq); sc->vtnet_tx_size = tx_size; IFQ_SET_MAXLEN(&ifp->if_snd, tx_size - 1); ifp->if_snd.ifq_drv_maxlen = tx_size - 1; IFQ_SET_READY(&ifp->if_snd); ether_ifattach(ifp, sc->vtnet_hwaddr); if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) ifp->if_capabilities |= IFCAP_LINKSTATE; /* Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { ifp->if_capabilities |= IFCAP_TXCSUM; if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) ifp->if_capabilities |= IFCAP_TSO4; if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) ifp->if_capabilities |= IFCAP_TSO6; if (ifp->if_capabilities & IFCAP_TSO) ifp->if_capabilities |= IFCAP_VLAN_HWTSO; if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; } if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { ifp->if_capabilities |= IFCAP_RXCSUM; if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) ifp->if_capabilities |= IFCAP_LRO; } if (ifp->if_capabilities & IFCAP_HWCSUM) { /* * VirtIO does not support VLAN tagging, but we can fake * it by inserting and removing the 802.1Q header during * transmit and receive. We are then able to do checksum * offloading of VLAN frames. */ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; } ifp->if_capenable = ifp->if_capabilities; /* * Capabilities after here are not enabled by default. */ if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); } #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif TASK_INIT(&sc->vtnet_rx_intr_task, 0, vtnet_rx_intr_task, sc); TASK_INIT(&sc->vtnet_tx_intr_task, 0, vtnet_tx_intr_task, sc); TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc); sc->vtnet_tq = taskqueue_create_fast("vtnet_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->vtnet_tq); if (sc->vtnet_tq == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate taskqueue\n"); ether_ifdetach(ifp); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_NET); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); ether_ifdetach(ifp); goto fail; } taskqueue_start_threads(&sc->vtnet_tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev)); /* * Device defaults to promiscuous mode for backwards * compatibility. Turn it off if possible. */ if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { VTNET_LOCK(sc); if (vtnet_set_promisc(sc, 0) != 0) { ifp->if_flags |= IFF_PROMISC; device_printf(dev, "cannot disable promiscuous mode\n"); } VTNET_UNLOCK(sc); } else ifp->if_flags |= IFF_PROMISC; fail: if (error) vtnet_detach(dev); return (error); } static int vtnet_detach(device_t dev) { struct vtnet_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->vtnet_ifp; KASSERT(mtx_initialized(VTNET_MTX(sc)), ("vtnet mutex not initialized")); #ifdef DEVICE_POLLING if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (device_is_attached(dev)) { VTNET_LOCK(sc); vtnet_stop(sc); VTNET_UNLOCK(sc); callout_drain(&sc->vtnet_tick_ch); taskqueue_drain(taskqueue_fast, &sc->vtnet_cfgchg_task); ether_ifdetach(ifp); } if (sc->vtnet_tq != NULL) { taskqueue_drain(sc->vtnet_tq, &sc->vtnet_rx_intr_task); taskqueue_drain(sc->vtnet_tq, &sc->vtnet_tx_intr_task); taskqueue_free(sc->vtnet_tq); sc->vtnet_tq = NULL; } if (sc->vtnet_vlan_attach != NULL) { EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); sc->vtnet_vlan_attach = NULL; } if (sc->vtnet_vlan_detach != NULL) { EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach); sc->vtnet_vlan_detach = NULL; } if (sc->vtnet_mac_filter != NULL) { free(sc->vtnet_mac_filter, M_DEVBUF); sc->vtnet_mac_filter = NULL; } if (ifp != NULL) { if_free(ifp); sc->vtnet_ifp = NULL; } if (sc->vtnet_rx_vq != NULL) vtnet_free_rx_mbufs(sc); if (sc->vtnet_tx_vq != NULL) vtnet_free_tx_mbufs(sc); if (sc->vtnet_ctrl_vq != NULL) vtnet_free_ctrl_vq(sc); ifmedia_removeall(&sc->vtnet_media); VTNET_LOCK_DESTROY(sc); return (0); } static int vtnet_suspend(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); VTNET_LOCK(sc); vtnet_stop(sc); sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; VTNET_UNLOCK(sc); return (0); } static int vtnet_resume(device_t dev) { struct vtnet_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->vtnet_ifp; VTNET_LOCK(sc); if (ifp->if_flags & IFF_UP) vtnet_init_locked(sc); sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; VTNET_UNLOCK(sc); return (0); } static int vtnet_shutdown(device_t dev) { /* * Suspend already does all of what we need to * do here; we just never expect to be resumed. */ return (vtnet_suspend(dev)); } static int vtnet_config_change(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); taskqueue_enqueue_fast(taskqueue_fast, &sc->vtnet_cfgchg_task); return (1); } static void vtnet_negotiate_features(struct vtnet_softc *sc) { device_t dev; uint64_t mask, features; dev = sc->vtnet_dev; mask = 0; if (vtnet_csum_disable) mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; /* * TSO and LRO are only available when their corresponding * checksum offload feature is also negotiated. */ if (vtnet_csum_disable || vtnet_tso_disable) mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN; if (vtnet_csum_disable || vtnet_lro_disable) mask |= VTNET_LRO_FEATURES; features = VTNET_FEATURES & ~mask; #ifdef VTNET_TX_INTR_MODERATION features |= VIRTIO_F_NOTIFY_ON_EMPTY; #endif sc->vtnet_features = virtio_negotiate_features(dev, features); if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0 && virtio_with_feature(dev, VTNET_LRO_FEATURES)) { /* * LRO without mergeable buffers requires special care. This * is not ideal because every receive buffer must be large * enough to hold the maximum TCP packet, the Ethernet header, * and the vtnet_rx_header. This requires up to 34 descriptors * when using MCLBYTES clusters. If we do not have indirect * descriptors, LRO is disabled since the virtqueue will not * be able to contain very many receive buffers. */ if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) { device_printf(dev, "LRO disabled due to lack of both mergeable " "buffers and indirect descriptors\n"); sc->vtnet_features = virtio_negotiate_features(dev, features & ~VTNET_LRO_FEATURES); } else sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; } } static int vtnet_alloc_virtqueues(struct vtnet_softc *sc) { device_t dev; struct vq_alloc_info vq_info[3]; int nvqs, rxsegs; dev = sc->vtnet_dev; nvqs = 2; /* * Indirect descriptors are not needed for the Rx * virtqueue when mergeable buffers are negotiated. * The header is placed inline with the data, not * in a separate descriptor, and mbuf clusters are * always physically contiguous. */ if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ? VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS; } else rxsegs = 0; VQ_ALLOC_INFO_INIT(&vq_info[0], rxsegs, vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq, "%s receive", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&vq_info[1], VTNET_MAX_TX_SEGS, vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq, "%s transmit", device_get_nameunit(dev)); if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { nvqs++; VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL, &sc->vtnet_ctrl_vq, "%s control", device_get_nameunit(dev)); } return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); } static void vtnet_get_hwaddr(struct vtnet_softc *sc) { device_t dev; dev = sc->vtnet_dev; if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { virtio_read_device_config(dev, offsetof(struct virtio_net_config, mac), sc->vtnet_hwaddr, ETHER_ADDR_LEN); } else { /* Generate random locally administered unicast address. */ sc->vtnet_hwaddr[0] = 0xB2; arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); vtnet_set_hwaddr(sc); } } static void vtnet_set_hwaddr(struct vtnet_softc *sc) { device_t dev; dev = sc->vtnet_dev; virtio_write_device_config(dev, offsetof(struct virtio_net_config, mac), sc->vtnet_hwaddr, ETHER_ADDR_LEN); } static int vtnet_is_link_up(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; uint16_t status; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_LOCK_ASSERT(sc); if ((ifp->if_capenable & IFCAP_LINKSTATE) == 0) return (1); status = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, status)); return ((status & VIRTIO_NET_S_LINK_UP) != 0); } static void vtnet_update_link_status(struct vtnet_softc *sc) { struct ifnet *ifp; int link; ifp = sc->vtnet_ifp; link = vtnet_is_link_up(sc); if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) { sc->vtnet_flags |= VTNET_FLAG_LINK; if_link_state_change(ifp, LINK_STATE_UP); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vtnet_start_locked(ifp); } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) { sc->vtnet_flags &= ~VTNET_FLAG_LINK; if_link_state_change(ifp, LINK_STATE_DOWN); } } static void vtnet_watchdog(struct vtnet_softc *sc) { struct ifnet *ifp; ifp = sc->vtnet_ifp; #ifdef VTNET_TX_INTR_MODERATION vtnet_txeof(sc); #endif if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer) return; if_printf(ifp, "watchdog timeout -- resetting\n"); #ifdef VTNET_DEBUG virtqueue_dump(sc->vtnet_tx_vq); #endif ifp->if_oerrors++; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vtnet_init_locked(sc); } static void vtnet_config_change_task(void *arg, int pending) { struct vtnet_softc *sc; sc = arg; VTNET_LOCK(sc); vtnet_update_link_status(sc); VTNET_UNLOCK(sc); } static int vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct vtnet_softc *sc; struct ifreq *ifr; int reinit, mask, error; sc = ifp->if_softc; ifr = (struct ifreq *) data; reinit = 0; error = 0; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU) error = EINVAL; else if (ifp->if_mtu != ifr->ifr_mtu) { VTNET_LOCK(sc); error = vtnet_change_mtu(sc, ifr->ifr_mtu); VTNET_UNLOCK(sc); } break; case SIOCSIFFLAGS: VTNET_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) vtnet_stop(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->vtnet_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) vtnet_rx_filter(sc); else error = ENOTSUP; } } else vtnet_init_locked(sc); if (error == 0) sc->vtnet_if_flags = ifp->if_flags; VTNET_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: VTNET_LOCK(sc); if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) vtnet_rx_filter_mac(sc); VTNET_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(vtnet_poll, ifp); if (error) break; VTNET_LOCK(sc); vtnet_disable_rx_intr(sc); vtnet_disable_tx_intr(sc); ifp->if_capenable |= IFCAP_POLLING; VTNET_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts even in error case. */ VTNET_LOCK(sc); vtnet_enable_tx_intr(sc); vtnet_enable_rx_intr(sc); ifp->if_capenable &= ~IFCAP_POLLING; VTNET_UNLOCK(sc); } } #endif VTNET_LOCK(sc); if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; else ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD; } if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; } if (mask & IFCAP_RXCSUM) { ifp->if_capenable ^= IFCAP_RXCSUM; reinit = 1; } if (mask & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; reinit = 1; } if (mask & IFCAP_VLAN_HWFILTER) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; reinit = 1; } if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vtnet_init_locked(sc); } VLAN_CAPABILITIES(ifp); VTNET_UNLOCK(sc); break; default: error = ether_ioctl(ifp, cmd, data); break; } VTNET_LOCK_ASSERT_NOTOWNED(sc); return (error); } static int vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) { struct ifnet *ifp; int new_frame_size, clsize; ifp = sc->vtnet_ifp; if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { new_frame_size = sizeof(struct vtnet_rx_header) + sizeof(struct ether_vlan_header) + new_mtu; if (new_frame_size > MJUM9BYTES) return (EINVAL); if (new_frame_size <= MCLBYTES) clsize = MCLBYTES; else clsize = MJUM9BYTES; } else { new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) + sizeof(struct ether_vlan_header) + new_mtu; if (new_frame_size <= MCLBYTES) clsize = MCLBYTES; else clsize = MJUMPAGESIZE; } sc->vtnet_rx_mbuf_size = clsize; sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS, ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count)); ifp->if_mtu = new_mtu; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vtnet_init_locked(sc); } return (0); } static int vtnet_init_rx_vq(struct vtnet_softc *sc) { struct virtqueue *vq; int nbufs, error; vq = sc->vtnet_rx_vq; nbufs = 0; error = ENOSPC; while (!virtqueue_full(vq)) { if ((error = vtnet_newbuf(sc)) != 0) break; nbufs++; } if (nbufs > 0) { virtqueue_notify(vq); /* * EMSGSIZE signifies the virtqueue did not have enough * entries available to hold the last mbuf. This is not * an error. We should not get ENOSPC since we check if * the virtqueue is full before attempting to add a * buffer. */ if (error == EMSGSIZE) error = 0; } return (error); } static void vtnet_free_rx_mbufs(struct vtnet_softc *sc) { struct virtqueue *vq; struct mbuf *m; int last; vq = sc->vtnet_rx_vq; last = 0; while ((m = virtqueue_drain(vq, &last)) != NULL) m_freem(m); KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq")); } static void vtnet_free_tx_mbufs(struct vtnet_softc *sc) { struct virtqueue *vq; struct vtnet_tx_header *txhdr; int last; vq = sc->vtnet_tx_vq; last = 0; while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { m_freem(txhdr->vth_mbuf); uma_zfree(vtnet_tx_header_zone, txhdr); } KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq")); } static void vtnet_free_ctrl_vq(struct vtnet_softc *sc) { /* * The control virtqueue is only polled, therefore * it should already be empty. */ KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), ("Ctrl Vq not empty")); } #ifdef DEVICE_POLLING static int vtnet_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vtnet_softc *sc; int rx_done; sc = ifp->if_softc; rx_done = 0; VTNET_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if (cmd == POLL_AND_CHECK_STATUS) vtnet_update_link_status(sc); if (virtqueue_nused(sc->vtnet_rx_vq) > 0) vtnet_rxeof(sc, count, &rx_done); vtnet_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vtnet_start_locked(ifp); } VTNET_UNLOCK(sc); return (rx_done); } #endif /* DEVICE_POLLING */ static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) { struct mbuf *m_head, *m_tail, *m; int i, clsize; clsize = sc->vtnet_rx_mbuf_size; m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize); if (m_head == NULL) goto fail; m_head->m_len = clsize; m_tail = m_head; if (nbufs > 1) { KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, ("chained Rx mbuf requested without LRO_NOMRG")); for (i = 1; i < nbufs; i++) { m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize); if (m == NULL) goto fail; m->m_len = clsize; m_tail->m_next = m; m_tail = m; } } if (m_tailp != NULL) *m_tailp = m_tail; return (m_head); fail: sc->vtnet_stats.mbuf_alloc_failed++; m_freem(m_head); return (NULL); } static int vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0) { struct mbuf *m, *m_prev; struct mbuf *m_new, *m_tail; int len, clsize, nreplace, error; m = m0; m_prev = NULL; len = len0; m_tail = NULL; clsize = sc->vtnet_rx_mbuf_size; nreplace = 0; KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, ("chained Rx mbuf without LRO_NOMRG")); /* * Since LRO_NOMRG mbuf chains are so large, we want to avoid * allocating an entire chain for each received frame. When * the received frame's length is less than that of the chain, * the unused mbufs are reassigned to the new chain. */ while (len > 0) { /* * Something is seriously wrong if we received * a frame larger than the mbuf chain. Drop it. */ if (m == NULL) { sc->vtnet_stats.rx_frame_too_large++; return (EMSGSIZE); } KASSERT(m->m_len == clsize, ("mbuf length not expected cluster size: %d", m->m_len)); m->m_len = MIN(m->m_len, len); len -= m->m_len; m_prev = m; m = m->m_next; nreplace++; } KASSERT(m_prev != NULL, ("m_prev == NULL")); KASSERT(nreplace <= sc->vtnet_rx_mbuf_count, ("too many replacement mbufs: %d/%d", nreplace, sc->vtnet_rx_mbuf_count)); m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail); if (m_new == NULL) { m_prev->m_len = clsize; return (ENOBUFS); } /* * Move unused mbufs, if any, from the original chain * onto the end of the new chain. */ if (m_prev->m_next != NULL) { m_tail->m_next = m_prev->m_next; m_prev->m_next = NULL; } error = vtnet_enqueue_rxbuf(sc, m_new); if (error) { /* * BAD! We could not enqueue the replacement mbuf chain. We * must restore the m0 chain to the original state if it was * modified so we can subsequently discard it. * * NOTE: The replacement is suppose to be an identical copy * to the one just dequeued so this is an unexpected error. */ sc->vtnet_stats.rx_enq_replacement_failed++; if (m_tail->m_next != NULL) { m_prev->m_next = m_tail->m_next; m_tail->m_next = NULL; } m_prev->m_len = clsize; m_freem(m_new); } return (error); } static int vtnet_newbuf(struct vtnet_softc *sc) { struct mbuf *m; int error; m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL); if (m == NULL) return (ENOBUFS); error = vtnet_enqueue_rxbuf(sc, m); if (error) m_freem(m); return (error); } static void vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs) { struct virtqueue *vq; struct mbuf *m; vq = sc->vtnet_rx_vq; while (--nbufs > 0) { if ((m = virtqueue_dequeue(vq, NULL)) == NULL) break; vtnet_discard_rxbuf(sc, m); } } static void vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m) { int error; /* * Requeue the discarded mbuf. This should always be * successful since it was just dequeued. */ error = vtnet_enqueue_rxbuf(sc, m); KASSERT(error == 0, ("cannot requeue discarded mbuf")); } static int vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m) { struct sglist sg; struct sglist_seg segs[VTNET_MAX_RX_SEGS]; struct vtnet_rx_header *rxhdr; struct virtio_net_hdr *hdr; uint8_t *mdata; int offset, error; VTNET_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, ("chained Rx mbuf without LRO_NOMRG")); sglist_init(&sg, VTNET_MAX_RX_SEGS, segs); mdata = mtod(m, uint8_t *); offset = 0; if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { rxhdr = (struct vtnet_rx_header *) mdata; hdr = &rxhdr->vrh_hdr; offset += sizeof(struct vtnet_rx_header); error = sglist_append(&sg, hdr, sc->vtnet_hdr_size); KASSERT(error == 0, ("cannot add header to sglist")); } error = sglist_append(&sg, mdata + offset, m->m_len - offset); if (error) return (error); if (m->m_next != NULL) { error = sglist_append_mbuf(&sg, m->m_next); if (error) return (error); } return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg)); } static void vtnet_vlan_tag_remove(struct mbuf *m) { struct ether_vlan_header *evl; evl = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag); m->m_flags |= M_VLANTAG; /* Strip the 802.1Q header. */ bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } #ifdef notyet static int vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m, struct virtio_net_hdr *hdr) { struct ether_header *eh; struct ether_vlan_header *evh; struct ip *ip; struct ip6_hdr *ip6; struct udphdr *udp; int ip_offset, csum_start, csum_offset, hlen; uint16_t eth_type; uint8_t ip_proto; /* * Convert the VirtIO checksum interface to FreeBSD's interface. * The host only provides us with the offset at which to start * checksumming, and the offset from that to place the completed * checksum. While this maps well with how Linux does checksums, * for FreeBSD, we must parse the received packet in order to set * the appropriate CSUM_* flags. */ /* * Every mbuf added to the receive virtqueue is always at least * MCLBYTES big, so assume something is amiss if the first mbuf * does not contain both the Ethernet and protocol headers. */ ip_offset = sizeof(struct ether_header); if (m->m_len < ip_offset) return (1); eh = mtod(m, struct ether_header *); eth_type = ntohs(eh->ether_type); if (eth_type == ETHERTYPE_VLAN) { ip_offset = sizeof(struct ether_vlan_header); if (m->m_len < ip_offset) return (1); evh = mtod(m, struct ether_vlan_header *); eth_type = ntohs(evh->evl_proto); } switch (eth_type) { case ETHERTYPE_IP: if (m->m_len < ip_offset + sizeof(struct ip)) return (1); ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset); /* Sanity check the IP header. */ if (ip->ip_v != IPVERSION) return (1); hlen = ip->ip_hl << 2; if (hlen < sizeof(struct ip)) return (1); if (ntohs(ip->ip_len) < hlen) return (1); if (ntohs(ip->ip_len) != (m->m_pkthdr.len - ip_offset)) return (1); ip_proto = ip->ip_p; csum_start = ip_offset + hlen; break; case ETHERTYPE_IPV6: if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) return (1); /* * XXX FreeBSD does not handle any IPv6 checksum offloading * at the moment. */ ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset); /* XXX Assume no extension headers are present. */ ip_proto = ip6->ip6_nxt; csum_start = ip_offset + sizeof(struct ip6_hdr); break; default: sc->vtnet_stats.rx_csum_bad_ethtype++; return (1); } /* Assume checksum begins right after the IP header. */ if (hdr->csum_start != csum_start) { sc->vtnet_stats.rx_csum_bad_start++; return (1); } switch (ip_proto) { case IPPROTO_TCP: csum_offset = offsetof(struct tcphdr, th_sum); break; case IPPROTO_UDP: csum_offset = offsetof(struct udphdr, uh_sum); break; case IPPROTO_SCTP: csum_offset = offsetof(struct sctphdr, checksum); break; default: sc->vtnet_stats.rx_csum_bad_ipproto++; return (1); } if (hdr->csum_offset != csum_offset) { sc->vtnet_stats.rx_csum_bad_offset++; return (1); } /* * The IP header checksum is almost certainly valid but I'm * uncertain if that is guaranteed. * * m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; */ switch (ip_proto) { case IPPROTO_UDP: if (m->m_len < csum_start + sizeof(struct udphdr)) return (1); udp = (struct udphdr *)(mtod(m, uint8_t *) + csum_start); if (udp->uh_sum == 0) return (0); /* FALLTHROUGH */ case IPPROTO_TCP: m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_SCTP: m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; } sc->vtnet_stats.rx_csum_offloaded++; return (0); } #endif /* * Alternative method of doing receive checksum offloading. Rather * than parsing the received frame down to the IP header, use the * csum_offset to determine which CSUM_* flags are appropriate. We * can get by with doing this only because the checksum offsets are * unique for the things we care about. */ static int vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m, struct virtio_net_hdr *hdr) { struct ether_header *eh; struct ether_vlan_header *evh; struct udphdr *udp; int csum_len; uint16_t eth_type; csum_len = hdr->csum_start + hdr->csum_offset; if (csum_len < sizeof(struct ether_header) + sizeof(struct ip)) return (1); if (m->m_len < csum_len) return (1); eh = mtod(m, struct ether_header *); eth_type = ntohs(eh->ether_type); if (eth_type == ETHERTYPE_VLAN) { evh = mtod(m, struct ether_vlan_header *); eth_type = ntohs(evh->evl_proto); } if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) { sc->vtnet_stats.rx_csum_bad_ethtype++; return (1); } /* Use the offset to determine the appropriate CSUM_* flags. */ switch (hdr->csum_offset) { case offsetof(struct udphdr, uh_sum): if (m->m_len < hdr->csum_start + sizeof(struct udphdr)) return (1); udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start); if (udp->uh_sum == 0) return (0); /* FALLTHROUGH */ case offsetof(struct tcphdr, th_sum): m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case offsetof(struct sctphdr, checksum): m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: sc->vtnet_stats.rx_csum_bad_offset++; return (1); } sc->vtnet_stats.rx_csum_offloaded++; return (0); } static int vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs) { struct ifnet *ifp; struct virtqueue *vq; struct mbuf *m, *m_tail; int len; ifp = sc->vtnet_ifp; vq = sc->vtnet_rx_vq; m_tail = m_head; while (--nbufs > 0) { m = virtqueue_dequeue(vq, &len); if (m == NULL) { ifp->if_ierrors++; goto fail; } if (vtnet_newbuf(sc) != 0) { ifp->if_iqdrops++; vtnet_discard_rxbuf(sc, m); if (nbufs > 1) vtnet_discard_merged_rxbuf(sc, nbufs); goto fail; } if (m->m_len < len) len = m->m_len; m->m_len = len; m->m_flags &= ~M_PKTHDR; m_head->m_pkthdr.len += len; m_tail->m_next = m; m_tail = m; } return (0); fail: sc->vtnet_stats.rx_mergeable_failed++; m_freem(m_head); return (1); } static int vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp) { struct virtio_net_hdr lhdr; struct ifnet *ifp; struct virtqueue *vq; struct mbuf *m; struct ether_header *eh; struct virtio_net_hdr *hdr; struct virtio_net_hdr_mrg_rxbuf *mhdr; int len, deq, nbufs, adjsz, rx_npkts; ifp = sc->vtnet_ifp; vq = sc->vtnet_rx_vq; hdr = &lhdr; deq = 0; rx_npkts = 0; VTNET_LOCK_ASSERT(sc); while (--count >= 0) { m = virtqueue_dequeue(vq, &len); if (m == NULL) break; deq++; if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { ifp->if_ierrors++; vtnet_discard_rxbuf(sc, m); continue; } if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { nbufs = 1; adjsz = sizeof(struct vtnet_rx_header); /* * Account for our pad between the header and * the actual start of the frame. */ len += VTNET_RX_HEADER_PAD; } else { mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); nbufs = mhdr->num_buffers; adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); } if (vtnet_replace_rxbuf(sc, m, len) != 0) { ifp->if_iqdrops++; vtnet_discard_rxbuf(sc, m); if (nbufs > 1) vtnet_discard_merged_rxbuf(sc, nbufs); continue; } m->m_pkthdr.len = len; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.csum_flags = 0; if (nbufs > 1) { if (vtnet_rxeof_merged(sc, m, nbufs) != 0) continue; } ifp->if_ipackets++; /* * Save copy of header before we strip it. For both mergeable * and non-mergeable, the VirtIO header is placed first in the * mbuf's data. We no longer need num_buffers, so always use a * virtio_net_hdr. */ memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); m_adj(m, adjsz); if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { eh = mtod(m, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { vtnet_vlan_tag_remove(m); /* * With the 802.1Q header removed, update the * checksum starting location accordingly. */ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) hdr->csum_start -= ETHER_VLAN_ENCAP_LEN; } } if (ifp->if_capenable & IFCAP_RXCSUM && hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { if (vtnet_rx_csum(sc, m, hdr) != 0) sc->vtnet_stats.rx_csum_failed++; } VTNET_UNLOCK(sc); rx_npkts++; (*ifp->if_input)(ifp, m); VTNET_LOCK(sc); /* * The interface may have been stopped while we were * passing the packet up the network stack. */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; } if (deq > 0) virtqueue_notify(vq); if (rx_npktsp != NULL) *rx_npktsp = rx_npkts; return (count > 0 ? 0 : EAGAIN); } static void vtnet_rx_intr_task(void *arg, int pending) { struct vtnet_softc *sc; struct ifnet *ifp; int more; sc = arg; ifp = sc->vtnet_ifp; VTNET_LOCK(sc); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { VTNET_UNLOCK(sc); return; } #endif if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { vtnet_enable_rx_intr(sc); VTNET_UNLOCK(sc); return; } more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL); if (!more && vtnet_enable_rx_intr(sc) != 0) { vtnet_disable_rx_intr(sc); more = 1; } VTNET_UNLOCK(sc); if (more) { sc->vtnet_stats.rx_task_rescheduled++; taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_rx_intr_task); } } -static int +static void vtnet_rx_vq_intr(void *xsc) { struct vtnet_softc *sc; sc = xsc; vtnet_disable_rx_intr(sc); taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_rx_intr_task); - - return (1); } static void vtnet_txeof(struct vtnet_softc *sc) { struct virtqueue *vq; struct ifnet *ifp; struct vtnet_tx_header *txhdr; int deq; vq = sc->vtnet_tx_vq; ifp = sc->vtnet_ifp; deq = 0; VTNET_LOCK_ASSERT(sc); while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { deq++; ifp->if_opackets++; m_freem(txhdr->vth_mbuf); uma_zfree(vtnet_tx_header_zone, txhdr); } if (deq > 0) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (virtqueue_empty(vq)) sc->vtnet_watchdog_timer = 0; } } static struct mbuf * vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m, struct virtio_net_hdr *hdr) { struct ifnet *ifp; struct ether_header *eh; struct ether_vlan_header *evh; struct ip *ip; struct ip6_hdr *ip6; struct tcphdr *tcp; int ip_offset; uint16_t eth_type, csum_start; uint8_t ip_proto, gso_type; ifp = sc->vtnet_ifp; M_ASSERTPKTHDR(m); ip_offset = sizeof(struct ether_header); if (m->m_len < ip_offset) { if ((m = m_pullup(m, ip_offset)) == NULL) return (NULL); } eh = mtod(m, struct ether_header *); eth_type = ntohs(eh->ether_type); if (eth_type == ETHERTYPE_VLAN) { ip_offset = sizeof(struct ether_vlan_header); if (m->m_len < ip_offset) { if ((m = m_pullup(m, ip_offset)) == NULL) return (NULL); } evh = mtod(m, struct ether_vlan_header *); eth_type = ntohs(evh->evl_proto); } switch (eth_type) { case ETHERTYPE_IP: if (m->m_len < ip_offset + sizeof(struct ip)) { m = m_pullup(m, ip_offset + sizeof(struct ip)); if (m == NULL) return (NULL); } ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset); ip_proto = ip->ip_p; csum_start = ip_offset + (ip->ip_hl << 2); gso_type = VIRTIO_NET_HDR_GSO_TCPV4; break; case ETHERTYPE_IPV6: if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) { m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr)); if (m == NULL) return (NULL); } ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset); /* * XXX Assume no extension headers are present. Presently, * this will always be true in the case of TSO, and FreeBSD * does not perform checksum offloading of IPv6 yet. */ ip_proto = ip6->ip6_nxt; csum_start = ip_offset + sizeof(struct ip6_hdr); gso_type = VIRTIO_NET_HDR_GSO_TCPV6; break; default: return (m); } if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) { hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = csum_start; hdr->csum_offset = m->m_pkthdr.csum_data; sc->vtnet_stats.tx_csum_offloaded++; } if (m->m_pkthdr.csum_flags & CSUM_TSO) { if (ip_proto != IPPROTO_TCP) return (m); if (m->m_len < csum_start + sizeof(struct tcphdr)) { m = m_pullup(m, csum_start + sizeof(struct tcphdr)); if (m == NULL) return (NULL); } tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start); hdr->gso_type = gso_type; hdr->hdr_len = csum_start + (tcp->th_off << 2); hdr->gso_size = m->m_pkthdr.tso_segsz; if (tcp->th_flags & TH_CWR) { /* * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN. * ECN support is only configurable globally with the * net.inet.tcp.ecn.enable sysctl knob. */ if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { if_printf(ifp, "TSO with ECN not supported " "by host\n"); m_freem(m); return (NULL); } hdr->flags |= VIRTIO_NET_HDR_GSO_ECN; } sc->vtnet_stats.tx_tso_offloaded++; } return (m); } static int vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head, struct vtnet_tx_header *txhdr) { struct sglist sg; struct sglist_seg segs[VTNET_MAX_TX_SEGS]; struct virtqueue *vq; struct mbuf *m; int collapsed, error; vq = sc->vtnet_tx_vq; m = *m_head; collapsed = 0; sglist_init(&sg, VTNET_MAX_TX_SEGS, segs); error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); KASSERT(error == 0 && sg.sg_nseg == 1, ("cannot add header to sglist")); again: error = sglist_append_mbuf(&sg, m); if (error) { if (collapsed) goto fail; m = m_collapse(m, M_NOWAIT, VTNET_MAX_TX_SEGS - 1); if (m == NULL) goto fail; *m_head = m; collapsed = 1; goto again; } txhdr->vth_mbuf = m; return (virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0)); fail: m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } static int vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head) { struct vtnet_tx_header *txhdr; struct virtio_net_hdr *hdr; struct mbuf *m; int error; m = *m_head; txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO); if (txhdr == NULL) { *m_head = NULL; m_freem(m); return (ENOMEM); } /* * Always use the non-mergeable header to simplify things. When * the mergeable feature is negotiated, the num_buffers field * must be set to zero. We use vtnet_hdr_size later to enqueue * the correct header size to the host. */ hdr = &txhdr->vth_uhdr.hdr; if (m->m_flags & M_VLANTAG) { m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); if ((*m_head = m) == NULL) { error = ENOBUFS; goto fail; } m->m_flags &= ~M_VLANTAG; } if (m->m_pkthdr.csum_flags != 0) { m = vtnet_tx_offload(sc, m, hdr); if ((*m_head = m) == NULL) { error = ENOBUFS; goto fail; } } error = vtnet_enqueue_txbuf(sc, m_head, txhdr); fail: if (error) uma_zfree(vtnet_tx_header_zone, txhdr); return (error); } static void vtnet_start(struct ifnet *ifp) { struct vtnet_softc *sc; sc = ifp->if_softc; VTNET_LOCK(sc); vtnet_start_locked(ifp); VTNET_UNLOCK(sc); } static void vtnet_start_locked(struct ifnet *ifp) { struct vtnet_softc *sc; struct virtqueue *vq; struct mbuf *m0; int enq; sc = ifp->if_softc; vq = sc->vtnet_tx_vq; enq = 0; VTNET_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) return; #ifdef VTNET_TX_INTR_MODERATION if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2) vtnet_txeof(sc); #endif while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (virtqueue_full(vq)) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; if (vtnet_encap(sc, &m0) != 0) { if (m0 == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m0); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; ETHER_BPF_MTAP(ifp, m0); } if (enq > 0) { virtqueue_notify(vq); sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT; } } static void vtnet_tick(void *xsc) { struct vtnet_softc *sc; sc = xsc; VTNET_LOCK_ASSERT(sc); #ifdef VTNET_DEBUG virtqueue_dump(sc->vtnet_rx_vq); virtqueue_dump(sc->vtnet_tx_vq); #endif vtnet_watchdog(sc); callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); } static void vtnet_tx_intr_task(void *arg, int pending) { struct vtnet_softc *sc; struct ifnet *ifp; sc = arg; ifp = sc->vtnet_ifp; VTNET_LOCK(sc); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { VTNET_UNLOCK(sc); return; } #endif if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { vtnet_enable_tx_intr(sc); VTNET_UNLOCK(sc); return; } vtnet_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vtnet_start_locked(ifp); if (vtnet_enable_tx_intr(sc) != 0) { vtnet_disable_tx_intr(sc); sc->vtnet_stats.tx_task_rescheduled++; VTNET_UNLOCK(sc); taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_tx_intr_task); return; } VTNET_UNLOCK(sc); } -static int +static void vtnet_tx_vq_intr(void *xsc) { struct vtnet_softc *sc; sc = xsc; vtnet_disable_tx_intr(sc); taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_tx_intr_task); - - return (1); } static void vtnet_stop(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_LOCK_ASSERT(sc); sc->vtnet_watchdog_timer = 0; callout_stop(&sc->vtnet_tick_ch); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); vtnet_disable_rx_intr(sc); vtnet_disable_tx_intr(sc); /* * Stop the host VirtIO adapter. Note this will reset the host * adapter's state back to the pre-initialized state, so in * order to make the device usable again, we must drive it * through virtio_reinit() and virtio_reinit_complete(). */ virtio_stop(dev); sc->vtnet_flags &= ~VTNET_FLAG_LINK; vtnet_free_rx_mbufs(sc); vtnet_free_tx_mbufs(sc); } static int vtnet_reinit(struct vtnet_softc *sc) { struct ifnet *ifp; uint64_t features; ifp = sc->vtnet_ifp; features = sc->vtnet_features; /* * Re-negotiate with the host, removing any disabled receive * features. Transmit features are disabled only on our side * via if_capenable and if_hwassist. */ if (ifp->if_capabilities & IFCAP_RXCSUM) { if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) features &= ~VIRTIO_NET_F_GUEST_CSUM; } if (ifp->if_capabilities & IFCAP_LRO) { if ((ifp->if_capenable & IFCAP_LRO) == 0) features &= ~VTNET_LRO_FEATURES; } if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) features &= ~VIRTIO_NET_F_CTRL_VLAN; } return (virtio_reinit(sc->vtnet_dev, features)); } static void vtnet_init_locked(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; int error; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_LOCK_ASSERT(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; /* Stop host's adapter, cancel any pending I/O. */ vtnet_stop(sc); /* Reinitialize the host device. */ error = vtnet_reinit(sc); if (error) { device_printf(dev, "reinitialization failed, stopping device...\n"); vtnet_stop(sc); return; } /* Update host with assigned MAC address. */ bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); vtnet_set_hwaddr(sc); ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_TSO; error = vtnet_init_rx_vq(sc); if (error) { device_printf(dev, "cannot allocate mbufs for Rx virtqueue\n"); vtnet_stop(sc); return; } if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { /* Restore promiscuous and all-multicast modes. */ vtnet_rx_filter(sc); /* Restore filtered MAC addresses. */ vtnet_rx_filter_mac(sc); } /* Restore VLAN filters. */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) vtnet_rx_filter_vlan(sc); } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { vtnet_disable_rx_intr(sc); vtnet_disable_tx_intr(sc); } else #endif { vtnet_enable_rx_intr(sc); vtnet_enable_tx_intr(sc); } ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; virtio_reinit_complete(dev); vtnet_update_link_status(sc); callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); } static void vtnet_init(void *xsc) { struct vtnet_softc *sc; sc = xsc; VTNET_LOCK(sc); vtnet_init_locked(sc); VTNET_UNLOCK(sc); } static void vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, struct sglist *sg, int readable, int writable) { struct virtqueue *vq; void *c; vq = sc->vtnet_ctrl_vq; VTNET_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, ("no control virtqueue")); KASSERT(virtqueue_empty(vq), ("control command already enqueued")); if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) return; virtqueue_notify(vq); /* * Poll until the command is complete. Previously, we would * sleep until the control virtqueue interrupt handler woke * us up, but dropping the VTNET_MTX leads to serialization * difficulties. * * Furthermore, it appears QEMU/KVM only allocates three MSIX * vectors. Two of those vectors are needed for the Rx and Tx * virtqueues. We do not support sharing both a Vq and config * changed notification on the same MSIX vector. */ c = virtqueue_poll(vq, NULL); KASSERT(c == cookie, ("unexpected control command response")); } static void vtnet_rx_filter(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, ("CTRL_RX feature not negotiated")); if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) device_printf(dev, "cannot %s promiscuous mode\n", ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) device_printf(dev, "cannot %s all-multicast mode\n", ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); } static int vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) { struct virtio_net_ctrl_hdr hdr; struct sglist_seg segs[3]; struct sglist sg; uint8_t onoff, ack; int error; if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) return (ENOTSUP); error = 0; hdr.class = VIRTIO_NET_CTRL_RX; hdr.cmd = cmd; onoff = !!on; ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &onoff, sizeof(uint8_t)); error |= sglist_append(&sg, &ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("error adding Rx filter message to sglist")); vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); return (ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_set_promisc(struct vtnet_softc *sc, int on) { return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); } static int vtnet_set_allmulti(struct vtnet_softc *sc, int on) { return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); } static void vtnet_rx_filter_mac(struct vtnet_softc *sc) { struct virtio_net_ctrl_hdr hdr; struct vtnet_mac_filter *filter; struct sglist_seg segs[4]; struct sglist sg; struct ifnet *ifp; struct ifaddr *ifa; struct ifmultiaddr *ifma; int ucnt, mcnt, promisc, allmulti, error; uint8_t ack; ifp = sc->vtnet_ifp; filter = sc->vtnet_mac_filter; ucnt = 0; mcnt = 0; promisc = 0; allmulti = 0; error = 0; VTNET_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, ("CTRL_RX feature not negotiated")); /* Unicast MAC addresses: */ if_addr_rlock(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_LINK) continue; else if (ucnt == VTNET_MAX_MAC_ENTRIES) break; bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); ucnt++; } if_addr_runlock(ifp); if (ucnt >= VTNET_MAX_MAC_ENTRIES) { promisc = 1; filter->vmf_unicast.nentries = 0; if_printf(ifp, "more than %d MAC addresses assigned, " "falling back to promiscuous mode\n", VTNET_MAX_MAC_ENTRIES); } else filter->vmf_unicast.nentries = ucnt; /* Multicast MAC addresses: */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; else if (mcnt == VTNET_MAX_MAC_ENTRIES) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); mcnt++; } if_maddr_runlock(ifp); if (mcnt >= VTNET_MAX_MAC_ENTRIES) { allmulti = 1; filter->vmf_multicast.nentries = 0; if_printf(ifp, "more than %d multicast MAC addresses " "assigned, falling back to all-multicast mode\n", VTNET_MAX_MAC_ENTRIES); } else filter->vmf_multicast.nentries = mcnt; if (promisc && allmulti) goto out; hdr.class = VIRTIO_NET_CTRL_MAC; hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; ack = VIRTIO_NET_ERR; sglist_init(&sg, 4, segs); error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &filter->vmf_unicast, sizeof(struct vtnet_mac_table)); error |= sglist_append(&sg, &filter->vmf_multicast, sizeof(struct vtnet_mac_table)); error |= sglist_append(&sg, &ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 4, ("error adding MAC filtering message to sglist")); vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); if (ack != VIRTIO_NET_OK) if_printf(ifp, "error setting host MAC filter table\n"); out: if (promisc) if (vtnet_set_promisc(sc, 1) != 0) if_printf(ifp, "cannot enable promiscuous mode\n"); if (allmulti) if (vtnet_set_allmulti(sc, 1) != 0) if_printf(ifp, "cannot enable all-multicast mode\n"); } static int vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) { struct virtio_net_ctrl_hdr hdr; struct sglist_seg segs[3]; struct sglist sg; uint8_t ack; int error; hdr.class = VIRTIO_NET_CTRL_VLAN; hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; ack = VIRTIO_NET_ERR; error = 0; sglist_init(&sg, 3, segs); error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &tag, sizeof(uint16_t)); error |= sglist_append(&sg, &ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("error adding VLAN control message to sglist")); vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); return (ack == VIRTIO_NET_OK ? 0 : EIO); } static void vtnet_rx_filter_vlan(struct vtnet_softc *sc) { device_t dev; uint32_t w, mask; uint16_t tag; int i, nvlans, error; VTNET_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, ("VLAN_FILTER feature not negotiated")); dev = sc->vtnet_dev; nvlans = sc->vtnet_nvlans; error = 0; /* Enable filtering for each configured VLAN. */ for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) { w = sc->vtnet_vlan_shadow[i]; for (mask = 1, tag = i * 32; w != 0; mask <<= 1, tag++) { if ((w & mask) != 0) { w &= ~mask; nvlans--; if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) error++; } } } KASSERT(nvlans == 0, ("VLAN count incorrect")); if (error) device_printf(dev, "cannot restore VLAN filter table\n"); } static void vtnet_set_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) { struct ifnet *ifp; int idx, bit; KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, ("VLAN_FILTER feature not negotiated")); if ((tag == 0) || (tag > 4095)) return; ifp = sc->vtnet_ifp; idx = (tag >> 5) & 0x7F; bit = tag & 0x1F; VTNET_LOCK(sc); /* Update shadow VLAN table. */ if (add) { sc->vtnet_nvlans++; sc->vtnet_vlan_shadow[idx] |= (1 << bit); } else { sc->vtnet_nvlans--; sc->vtnet_vlan_shadow[idx] &= ~(1 << bit); } if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { if (vtnet_exec_vlan_filter(sc, add, tag) != 0) { device_printf(sc->vtnet_dev, "cannot %s VLAN %d %s the host filter table\n", add ? "add" : "remove", tag, add ? "to" : "from"); } } VTNET_UNLOCK(sc); } static void vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) { if (ifp->if_softc != arg) return; vtnet_set_vlan_filter(arg, 1, tag); } static void vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) { if (ifp->if_softc != arg) return; vtnet_set_vlan_filter(arg, 0, tag); } static int vtnet_ifmedia_upd(struct ifnet *ifp) { struct vtnet_softc *sc; struct ifmedia *ifm; sc = ifp->if_softc; ifm = &sc->vtnet_media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); return (0); } static void vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vtnet_softc *sc; sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; VTNET_LOCK(sc); if (vtnet_is_link_up(sc) != 0) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= VTNET_MEDIATYPE; } else ifmr->ifm_active |= IFM_NONE; VTNET_UNLOCK(sc); } static void vtnet_add_statistics(struct vtnet_softc *sc) { device_t dev; struct vtnet_statistics *stats; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vtnet_dev; stats = &sc->vtnet_stats; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_failed", CTLFLAG_RD, &stats->mbuf_alloc_failed, "Mbuf cluster allocation failures"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_frame_too_large", CTLFLAG_RD, &stats->rx_frame_too_large, "Received frame larger than the mbuf chain"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_enq_replacement_failed", CTLFLAG_RD, &stats->rx_enq_replacement_failed, "Enqueuing the replacement receive mbuf failed"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_mergeable_failed", CTLFLAG_RD, &stats->rx_mergeable_failed, "Mergeable buffers receive failures"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", CTLFLAG_RD, &stats->rx_csum_bad_ethtype, "Received checksum offloaded buffer with unsupported " "Ethernet type"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_start", CTLFLAG_RD, &stats->rx_csum_bad_start, "Received checksum offloaded buffer with incorrect start offset"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", CTLFLAG_RD, &stats->rx_csum_bad_ipproto, "Received checksum offloaded buffer with incorrect IP protocol"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_offset", CTLFLAG_RD, &stats->rx_csum_bad_offset, "Received checksum offloaded buffer with incorrect offset"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_failed", CTLFLAG_RD, &stats->rx_csum_failed, "Received buffer checksum offload failed"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_offloaded", CTLFLAG_RD, &stats->rx_csum_offloaded, "Received buffer checksum offload succeeded"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_task_rescheduled", CTLFLAG_RD, &stats->rx_task_rescheduled, "Times the receive interrupt task rescheduled itself"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_offloaded", CTLFLAG_RD, &stats->tx_csum_offloaded, "Offloaded checksum of transmitted buffer"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_offloaded", CTLFLAG_RD, &stats->tx_tso_offloaded, "Segmentation offload of transmitted buffer"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", CTLFLAG_RD, &stats->tx_csum_bad_ethtype, "Aborted transmit of checksum offloaded buffer with unknown " "Ethernet type"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", CTLFLAG_RD, &stats->tx_tso_bad_ethtype, "Aborted transmit of TSO buffer with unknown Ethernet type"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_task_rescheduled", CTLFLAG_RD, &stats->tx_task_rescheduled, "Times the transmit interrupt task rescheduled itself"); } static int vtnet_enable_rx_intr(struct vtnet_softc *sc) { return (virtqueue_enable_intr(sc->vtnet_rx_vq)); } static void vtnet_disable_rx_intr(struct vtnet_softc *sc) { virtqueue_disable_intr(sc->vtnet_rx_vq); } static int vtnet_enable_tx_intr(struct vtnet_softc *sc) { #ifdef VTNET_TX_INTR_MODERATION return (0); #else return (virtqueue_enable_intr(sc->vtnet_tx_vq)); #endif } static void vtnet_disable_tx_intr(struct vtnet_softc *sc) { virtqueue_disable_intr(sc->vtnet_tx_vq); } Index: projects/virtio/sys/dev/virtio/scsi/virtio_scsi.c =================================================================== --- projects/virtio/sys/dev/virtio/scsi/virtio_scsi.c (revision 245709) +++ projects/virtio/sys/dev/virtio/scsi/virtio_scsi.c (revision 245710) @@ -1,2363 +1,2357 @@ /*- * Copyright (c) 2012, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO SCSI devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" static int vtscsi_modevent(module_t, int, void *); static int vtscsi_probe(device_t); static int vtscsi_attach(device_t); static int vtscsi_detach(device_t); static int vtscsi_suspend(device_t); static int vtscsi_resume(device_t); static void vtscsi_negotiate_features(struct vtscsi_softc *); static int vtscsi_maximum_segments(struct vtscsi_softc *, int); static int vtscsi_alloc_virtqueues(struct vtscsi_softc *); static void vtscsi_write_device_config(struct vtscsi_softc *); static int vtscsi_reinit(struct vtscsi_softc *); static int vtscsi_alloc_cam(struct vtscsi_softc *); static int vtscsi_register_cam(struct vtscsi_softc *); static void vtscsi_free_cam(struct vtscsi_softc *); static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *); static int vtscsi_register_async(struct vtscsi_softc *); static void vtscsi_deregister_async(struct vtscsi_softc *); static void vtscsi_cam_action(struct cam_sim *, union ccb *); static void vtscsi_cam_poll(struct cam_sim *); static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *, union ccb *); static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *); static void vtscsi_cam_path_inquiry(struct vtscsi_softc *, struct cam_sim *, union ccb *); static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *, struct sglist *, struct ccb_scsiio *); static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *, struct vtscsi_request *, int *, int *); static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *); static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_timedout_scsi_cmd(void *); static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *); static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *, struct ccb_scsiio *, struct virtio_scsi_cmd_resp *); static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_poll_ctrl_req(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_execute_ctrl_req(struct vtscsi_softc *, struct vtscsi_request *, struct sglist *, int, int, int); static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c, struct vtscsi_request *); static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *); static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []); static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *, struct virtio_scsi_cmd_req *); static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *); static void vtscsi_freeze_simq(struct vtscsi_softc *, int); static int vtscsi_thaw_simq(struct vtscsi_softc *, int); static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t, lun_id_t); static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t, lun_id_t); static void vtscsi_execute_rescan_bus(struct vtscsi_softc *); static void vtscsi_handle_event(struct vtscsi_softc *, struct virtio_scsi_event *); static int vtscsi_enqueue_event_buf(struct vtscsi_softc *, struct virtio_scsi_event *); static int vtscsi_init_event_vq(struct vtscsi_softc *); static void vtscsi_reinit_event_vq(struct vtscsi_softc *); static void vtscsi_drain_event_vq(struct vtscsi_softc *); static void vtscsi_complete_vqs_locked(struct vtscsi_softc *); static void vtscsi_complete_vqs(struct vtscsi_softc *); static void vtscsi_drain_vqs(struct vtscsi_softc *); static void vtscsi_cancel_request(struct vtscsi_softc *, struct vtscsi_request *); static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *); static void vtscsi_stop(struct vtscsi_softc *); static int vtscsi_reset_bus(struct vtscsi_softc *); static void vtscsi_init_request(struct vtscsi_softc *, struct vtscsi_request *); static int vtscsi_alloc_requests(struct vtscsi_softc *); static void vtscsi_free_requests(struct vtscsi_softc *); static void vtscsi_enqueue_request(struct vtscsi_softc *, struct vtscsi_request *); static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *); static void vtscsi_complete_request(struct vtscsi_request *); static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *); static void vtscsi_control_vq_task(void *, int); static void vtscsi_event_vq_task(void *, int); static void vtscsi_request_vq_task(void *, int); -static int vtscsi_control_vq_intr(void *); -static int vtscsi_event_vq_intr(void *); -static int vtscsi_request_vq_intr(void *); +static void vtscsi_control_vq_intr(void *); +static void vtscsi_event_vq_intr(void *); +static void vtscsi_request_vq_intr(void *); static void vtscsi_disable_vqs_intr(struct vtscsi_softc *); static void vtscsi_enable_vqs_intr(struct vtscsi_softc *); static void vtscsi_get_tunables(struct vtscsi_softc *); static void vtscsi_add_sysctl(struct vtscsi_softc *); static void vtscsi_printf_req(struct vtscsi_request *, const char *, const char *, ...); /* Global tunables. */ /* * The current QEMU VirtIO SCSI implementation does not cancel in-flight * IO during virtio_stop(). So in-flight requests still complete after the * device reset. We would have to wait for all the in-flight IO to complete, * which defeats the typical purpose of a bus reset. We could simulate the * bus reset with either I_T_NEXUS_RESET of all the targets, or with * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the * control virtqueue). But this isn't very useful if things really go off * the rails, so default to disabled for now. */ static int vtscsi_bus_reset_disable = 1; TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable); static struct virtio_feature_desc vtscsi_feature_desc[] = { { VIRTIO_SCSI_F_INOUT, "InOut" }, { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" }, { 0, NULL } }; static device_method_t vtscsi_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtscsi_probe), DEVMETHOD(device_attach, vtscsi_attach), DEVMETHOD(device_detach, vtscsi_detach), DEVMETHOD(device_suspend, vtscsi_suspend), DEVMETHOD(device_resume, vtscsi_resume), DEVMETHOD_END }; static driver_t vtscsi_driver = { "vtscsi", vtscsi_methods, sizeof(struct vtscsi_softc) }; static devclass_t vtscsi_devclass; DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass, vtscsi_modevent, 0); MODULE_VERSION(virtio_scsi, 1); MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1); MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1); static int vtscsi_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: case MOD_QUIESCE: case MOD_UNLOAD: case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static int vtscsi_probe(device_t dev) { if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI) return (ENXIO); device_set_desc(dev, "VirtIO SCSI Adapter"); return (BUS_PROBE_DEFAULT); } static int vtscsi_attach(device_t dev) { struct vtscsi_softc *sc; struct virtio_scsi_config scsicfg; int error; sc = device_get_softc(dev); sc->vtscsi_dev = dev; VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev)); TAILQ_INIT(&sc->vtscsi_req_free); vtscsi_get_tunables(sc); vtscsi_add_sysctl(sc); virtio_set_feature_desc(dev, vtscsi_feature_desc); vtscsi_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT; if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT)) sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL; if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG)) sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG; virtio_read_device_config(dev, 0, &scsicfg, sizeof(struct virtio_scsi_config)); sc->vtscsi_max_channel = scsicfg.max_channel; sc->vtscsi_max_target = scsicfg.max_target; sc->vtscsi_max_lun = scsicfg.max_lun; sc->vtscsi_event_buf_size = scsicfg.event_info_size; vtscsi_write_device_config(sc); sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max); sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT); if (sc->vtscsi_sglist == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate sglist\n"); goto fail; } error = vtscsi_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = vtscsi_init_event_vq(sc); if (error) { device_printf(dev, "cannot populate the eventvq\n"); goto fail; } error = vtscsi_alloc_requests(sc); if (error) { device_printf(dev, "cannot allocate requests\n"); goto fail; } error = vtscsi_alloc_cam(sc); if (error) { device_printf(dev, "cannot allocate CAM structures\n"); goto fail; } TASK_INIT(&sc->vtscsi_control_intr_task, 0, vtscsi_control_vq_task, sc); TASK_INIT(&sc->vtscsi_event_intr_task, 0, vtscsi_event_vq_task, sc); TASK_INIT(&sc->vtscsi_request_intr_task, 0, vtscsi_request_vq_task, sc); sc->vtscsi_tq = taskqueue_create_fast("vtscsi_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->vtscsi_tq); if (sc->vtscsi_tq == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate taskqueue\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_CAM); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); goto fail; } taskqueue_start_threads(&sc->vtscsi_tq, 1, PI_DISK, "%s taskq", device_get_nameunit(dev)); vtscsi_enable_vqs_intr(sc); /* * Register with CAM after interrupts are enabled so we will get * notified of the probe responses. */ error = vtscsi_register_cam(sc); if (error) { device_printf(dev, "cannot register with CAM\n"); goto fail; } fail: if (error) vtscsi_detach(dev); return (error); } static int vtscsi_detach(device_t dev) { struct vtscsi_softc *sc; sc = device_get_softc(dev); VTSCSI_LOCK(sc); sc->vtscsi_flags |= VTSCSI_FLAG_DETACH; if (device_is_attached(dev)) vtscsi_stop(sc); VTSCSI_UNLOCK(sc); if (sc->vtscsi_tq != NULL) { taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_control_intr_task); taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_event_intr_task); taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_request_intr_task); taskqueue_free(sc->vtscsi_tq); sc->vtscsi_tq = NULL; } vtscsi_complete_vqs(sc); vtscsi_drain_vqs(sc); vtscsi_free_cam(sc); vtscsi_free_requests(sc); if (sc->vtscsi_sglist != NULL) { sglist_free(sc->vtscsi_sglist); sc->vtscsi_sglist = NULL; } VTSCSI_LOCK_DESTROY(sc); return (0); } static int vtscsi_suspend(device_t dev) { return (0); } static int vtscsi_resume(device_t dev) { return (0); } static void vtscsi_negotiate_features(struct vtscsi_softc *sc) { device_t dev; uint64_t features; dev = sc->vtscsi_dev; features = virtio_negotiate_features(dev, VTSCSI_FEATURES); sc->vtscsi_features = features; } static int vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max) { int nsegs; nsegs = VTSCSI_MIN_SEGMENTS; if (seg_max > 0) { nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1); if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); } else nsegs += 1; return (nsegs); } static int vtscsi_alloc_virtqueues(struct vtscsi_softc *sc) { device_t dev; struct vq_alloc_info vq_info[3]; int nvqs; dev = sc->vtscsi_dev; nvqs = 3; VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc, &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc, &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs, vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq, "%s request", device_get_nameunit(dev)); return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); } static void vtscsi_write_device_config(struct vtscsi_softc *sc) { virtio_write_dev_config_4(sc->vtscsi_dev, offsetof(struct virtio_scsi_config, sense_size), VIRTIO_SCSI_SENSE_SIZE); /* * This is the size in the virtio_scsi_cmd_req structure. Note * this value (32) is larger than the maximum CAM CDB size (16). */ virtio_write_dev_config_4(sc->vtscsi_dev, offsetof(struct virtio_scsi_config, cdb_size), VIRTIO_SCSI_CDB_SIZE); } static int vtscsi_reinit(struct vtscsi_softc *sc) { device_t dev; int error; dev = sc->vtscsi_dev; error = virtio_reinit(dev, sc->vtscsi_features); if (error == 0) { vtscsi_write_device_config(sc); vtscsi_reinit_event_vq(sc); virtio_reinit_complete(dev); vtscsi_enable_vqs_intr(sc); } vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error); return (error); } static int vtscsi_alloc_cam(struct vtscsi_softc *sc) { device_t dev; struct cam_devq *devq; int openings; dev = sc->vtscsi_dev; openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS; devq = cam_simq_alloc(openings); if (devq == NULL) { device_printf(dev, "cannot allocate SIM queue\n"); return (ENOMEM); } sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll, "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1, openings, devq); if (sc->vtscsi_sim == NULL) { cam_simq_free(devq); device_printf(dev, "cannot allocate SIM\n"); return (ENOMEM); } return (0); } static int vtscsi_register_cam(struct vtscsi_softc *sc) { device_t dev; int registered, error; dev = sc->vtscsi_dev; registered = 0; VTSCSI_LOCK(sc); if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) { error = ENOMEM; device_printf(dev, "cannot register XPT bus\n"); goto fail; } registered = 1; if (xpt_create_path(&sc->vtscsi_path, NULL, cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { error = ENOMEM; device_printf(dev, "cannot create bus path\n"); goto fail; } VTSCSI_UNLOCK(sc); /* * The async register apparently needs to be done without * the lock held, otherwise it can recurse on the lock. */ if (vtscsi_register_async(sc) != CAM_REQ_CMP) { error = EIO; device_printf(dev, "cannot register async callback\n"); VTSCSI_LOCK(sc); goto fail; } return (0); fail: if (sc->vtscsi_path != NULL) { xpt_free_path(sc->vtscsi_path); sc->vtscsi_path = NULL; } if (registered != 0) xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); VTSCSI_UNLOCK(sc); return (error); } static void vtscsi_free_cam(struct vtscsi_softc *sc) { VTSCSI_LOCK(sc); if (sc->vtscsi_path != NULL) { vtscsi_deregister_async(sc); xpt_free_path(sc->vtscsi_path); sc->vtscsi_path = NULL; xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); } if (sc->vtscsi_sim != NULL) { cam_sim_free(sc->vtscsi_sim, 1); sc->vtscsi_sim = NULL; } VTSCSI_UNLOCK(sc); } static void vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg) { struct cam_sim *sim; struct vtscsi_softc *sc; sim = cb_arg; sc = cam_sim_softc(sim); vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code); /* * TODO Once QEMU supports event reporting, we should * (un)subscribe to events here. */ switch (code) { case AC_FOUND_DEVICE: break; case AC_LOST_DEVICE: break; } } static int vtscsi_register_async(struct vtscsi_softc *sc) { struct ccb_setasync csa; VTSCSI_LOCK_NOTOWNED(sc); xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE; csa.callback = vtscsi_cam_async; csa.callback_arg = sc->vtscsi_sim; xpt_action((union ccb *) &csa); return (csa.ccb_h.status); } static void vtscsi_deregister_async(struct vtscsi_softc *sc) { struct ccb_setasync csa; xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = vtscsi_cam_async; csa.callback_arg = sc->vtscsi_sim; xpt_action((union ccb *) &csa); } static void vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb) { struct vtscsi_softc *sc; struct ccb_hdr *ccbh; sc = cam_sim_softc(sim); ccbh = &ccb->ccb_h; VTSCSI_LOCK_OWNED(sc); if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) { /* * The VTSCSI_MTX is briefly dropped between setting * VTSCSI_FLAG_DETACH and deregistering with CAM, so * drop any CCBs that come in during that window. */ ccbh->status = CAM_NO_HBA; xpt_done(ccb); return; } switch (ccbh->func_code) { case XPT_SCSI_IO: vtscsi_cam_scsi_io(sc, sim, ccb); break; case XPT_SET_TRAN_SETTINGS: ccbh->status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: vtscsi_cam_get_tran_settings(sc, ccb); break; case XPT_RESET_BUS: vtscsi_cam_reset_bus(sc, ccb); break; case XPT_RESET_DEV: vtscsi_cam_reset_dev(sc, ccb); break; case XPT_ABORT: vtscsi_cam_abort(sc, ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: vtscsi_cam_path_inquiry(sc, sim, ccb); break; default: vtscsi_dprintf(sc, VTSCSI_ERROR, "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code); ccbh->status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void vtscsi_cam_poll(struct cam_sim *sim) { struct vtscsi_softc *sc; sc = cam_sim_softc(sim); vtscsi_complete_vqs_locked(sc); } static void vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim, union ccb *ccb) { struct ccb_hdr *ccbh; struct ccb_scsiio *csio; int error; ccbh = &ccb->ccb_h; csio = &ccb->csio; if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) { error = EINVAL; ccbh->status = CAM_REQ_INVALID; goto done; } if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH && (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) { error = EINVAL; ccbh->status = CAM_REQ_INVALID; goto done; } error = vtscsi_start_scsi_cmd(sc, ccb); done: if (error) { vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status); xpt_done(ccb); } } static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb) { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC3; cts->transport = XPORT_SAS; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } static void vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb) { int error; error = vtscsi_reset_bus(sc); if (error == 0) ccb->ccb_h.status = CAM_REQ_CMP; else ccb->ccb_h.status = CAM_REQ_CMP_ERR; vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n", error, ccb, ccb->ccb_h.status); xpt_done(ccb); } static void vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb) { struct ccb_hdr *ccbh; struct vtscsi_request *req; int error; ccbh = &ccb->ccb_h; req = vtscsi_dequeue_request(sc); if (req == NULL) { error = EAGAIN; vtscsi_freeze_simq(sc, VTSCSI_REQUEST); goto fail; } req->vsr_ccb = ccb; error = vtscsi_execute_reset_dev_cmd(sc, req); if (error == 0) return; vtscsi_enqueue_request(sc, req); fail: vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", error, req, ccb); if (error == EAGAIN) ccbh->status = CAM_RESRC_UNAVAIL; else ccbh->status = CAM_REQ_CMP_ERR; xpt_done(ccb); } static void vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb) { struct vtscsi_request *req; struct ccb_hdr *ccbh; int error; ccbh = &ccb->ccb_h; req = vtscsi_dequeue_request(sc); if (req == NULL) { error = EAGAIN; vtscsi_freeze_simq(sc, VTSCSI_REQUEST); goto fail; } req->vsr_ccb = ccb; error = vtscsi_execute_abort_task_cmd(sc, req); if (error == 0) return; vtscsi_enqueue_request(sc, req); fail: vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", error, req, ccb); if (error == EAGAIN) ccbh->status = CAM_RESRC_UNAVAIL; else ccbh->status = CAM_REQ_CMP_ERR; xpt_done(ccb); } static void vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim, union ccb *ccb) { device_t dev; struct ccb_pathinq *cpi; dev = sc->vtscsi_dev; cpi = &ccb->cpi; vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb); cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN; if (vtscsi_bus_reset_disable != 0) cpi->hba_misc |= PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = sc->vtscsi_max_target; cpi->max_lun = sc->vtscsi_max_lun; cpi->initiator_id = VTSCSI_INITIATOR_ID; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "VirtIO", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 300000; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC3; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) * PAGE_SIZE; cpi->hba_vendor = virtio_get_vendor(dev); cpi->hba_device = virtio_get_device(dev); cpi->hba_subvendor = virtio_get_subvendor(dev); cpi->hba_subdevice = virtio_get_subdevice(dev); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); } static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg, struct ccb_scsiio *csio) { struct ccb_hdr *ccbh; struct bus_dma_segment *dseg; int i, error; ccbh = &csio->ccb_h; error = 0; if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { if ((ccbh->flags & CAM_DATA_PHYS) == 0) error = sglist_append(sg, csio->data_ptr, csio->dxfer_len); else error = sglist_append_phys(sg, (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len); } else { for (i = 0; i < csio->sglist_cnt && error == 0; i++) { dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) error = sglist_append(sg, (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len); else error = sglist_append_phys(sg, (vm_paddr_t) dseg->ds_addr, dseg->ds_len); } } return (error); } static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req, int *readable, int *writable) { struct sglist *sg; struct ccb_hdr *ccbh; struct ccb_scsiio *csio; struct virtio_scsi_cmd_req *cmd_req; struct virtio_scsi_cmd_resp *cmd_resp; int error; sg = sc->vtscsi_sglist; csio = &req->vsr_ccb->csio; ccbh = &csio->ccb_h; cmd_req = &req->vsr_cmd_req; cmd_resp = &req->vsr_cmd_resp; sglist_reset(sg); sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req)); if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) { error = vtscsi_sg_append_scsi_buf(sc, sg, csio); /* At least one segment must be left for the response. */ if (error || sg->sg_nseg == sg->sg_maxseg) goto fail; } *readable = sg->sg_nseg; sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp)); if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) { error = vtscsi_sg_append_scsi_buf(sc, sg, csio); if (error) goto fail; } *writable = sg->sg_nseg - *readable; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d " "writable=%d\n", req, ccbh, *readable, *writable); return (0); fail: /* * This should never happen unless maxio was incorrectly set. */ vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0); vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p " "nseg=%d maxseg=%d\n", error, req, ccbh, sg->sg_nseg, sg->sg_maxseg); return (EFBIG); } static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct sglist *sg; struct virtqueue *vq; struct ccb_scsiio *csio; struct ccb_hdr *ccbh; struct virtio_scsi_cmd_req *cmd_req; struct virtio_scsi_cmd_resp *cmd_resp; int readable, writable, error; sg = sc->vtscsi_sglist; vq = sc->vtscsi_request_vq; csio = &req->vsr_ccb->csio; ccbh = &csio->ccb_h; cmd_req = &req->vsr_cmd_req; cmd_resp = &req->vsr_cmd_resp; vtscsi_init_scsi_cmd_req(csio, cmd_req); error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable); if (error) return (error); req->vsr_complete = vtscsi_complete_scsi_cmd; cmd_resp->response = -1; error = virtqueue_enqueue(vq, req, sg, readable, writable); if (error) { vtscsi_dprintf(sc, VTSCSI_ERROR, "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh); ccbh->status = CAM_REQUEUE_REQ; vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ); return (error); } ccbh->status |= CAM_SIM_QUEUED; ccbh->ccbh_vtscsi_req = req; virtqueue_notify(vq); if (ccbh->timeout != CAM_TIME_INFINITY) { req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET; callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000, vtscsi_timedout_scsi_cmd, req); } vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n", req, ccbh); return (0); } static int vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb) { struct vtscsi_request *req; int error; req = vtscsi_dequeue_request(sc); if (req == NULL) { ccb->ccb_h.status = CAM_REQUEUE_REQ; vtscsi_freeze_simq(sc, VTSCSI_REQUEST); return (ENOBUFS); } req->vsr_ccb = ccb; error = vtscsi_execute_scsi_cmd(sc, req); if (error) vtscsi_enqueue_request(sc, req); return (error); } static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct virtio_scsi_ctrl_tmf_resp *tmf_resp; struct vtscsi_request *to_req; uint8_t response; tmf_resp = &req->vsr_tmf_resp; response = tmf_resp->response; to_req = req->vsr_timedout_req; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n", req, to_req, response); vtscsi_enqueue_request(sc, req); /* * The timedout request could have completed between when the * abort task was sent and when the host processed it. */ if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT) return; /* The timedout request was successfully aborted. */ if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) return; /* Don't bother if the device is going away. */ if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) return; /* The timedout request will be aborted by the reset. */ if (sc->vtscsi_flags & VTSCSI_FLAG_RESET) return; vtscsi_reset_bus(sc); } static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *to_req) { struct sglist *sg; struct ccb_hdr *to_ccbh; struct vtscsi_request *req; struct virtio_scsi_ctrl_tmf_req *tmf_req; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; int error; sg = sc->vtscsi_sglist; to_ccbh = &to_req->vsr_ccb->ccb_h; req = vtscsi_dequeue_request(sc); if (req == NULL) { error = ENOBUFS; goto fail; } tmf_req = &req->vsr_tmf_req; tmf_resp = &req->vsr_tmf_resp; vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, (uintptr_t) to_ccbh, tmf_req); sglist_reset(sg); sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); req->vsr_timedout_req = to_req; req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd; tmf_resp->response = -1; error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, VTSCSI_EXECUTE_ASYNC); if (error == 0) return (0); vtscsi_enqueue_request(sc, req); fail: vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p " "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh); return (error); } static void vtscsi_timedout_scsi_cmd(void *xreq) { struct vtscsi_softc *sc; struct vtscsi_request *to_req; to_req = xreq; sc = to_req->vsr_softc; vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n", to_req, to_req->vsr_ccb, to_req->vsr_state); /* Don't bother if the device is going away. */ if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) return; /* * Bail if the request is not in use. We likely raced when * stopping the callout handler or it has already been aborted. */ if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE || (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0) return; /* * Complete the request queue in case the timedout request is * actually just pending. */ vtscsi_complete_vq(sc, sc->vtscsi_request_vq); if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE) return; sc->vtscsi_stats.scsi_cmd_timeouts++; to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT; if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0) return; vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n"); vtscsi_reset_bus(sc); } static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp) { cam_status status; switch (cmd_resp->response) { case VIRTIO_SCSI_S_OK: status = CAM_REQ_CMP; break; case VIRTIO_SCSI_S_OVERRUN: status = CAM_DATA_RUN_ERR; break; case VIRTIO_SCSI_S_ABORTED: status = CAM_REQ_ABORTED; break; case VIRTIO_SCSI_S_BAD_TARGET: status = CAM_TID_INVALID; break; case VIRTIO_SCSI_S_RESET: status = CAM_SCSI_BUS_RESET; break; case VIRTIO_SCSI_S_BUSY: status = CAM_SCSI_BUSY; break; case VIRTIO_SCSI_S_TRANSPORT_FAILURE: case VIRTIO_SCSI_S_TARGET_FAILURE: case VIRTIO_SCSI_S_NEXUS_FAILURE: status = CAM_SCSI_IT_NEXUS_LOST; break; default: /* VIRTIO_SCSI_S_FAILURE */ status = CAM_REQ_CMP_ERR; break; } return (status); } static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc, struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp) { cam_status status; csio->scsi_status = cmd_resp->status; csio->resid = cmd_resp->resid; if (csio->scsi_status == SCSI_STATUS_OK) status = CAM_REQ_CMP; else status = CAM_SCSI_STATUS_ERROR; if (cmd_resp->sense_len > 0) { status |= CAM_AUTOSNS_VALID; if (cmd_resp->sense_len < csio->sense_len) csio->sense_resid = csio->sense_len - cmd_resp->sense_len; else csio->sense_resid = 0; bzero(&csio->sense_data, sizeof(csio->sense_data)); memcpy(cmd_resp->sense, &csio->sense_data, csio->sense_len - csio->sense_resid); } vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR, "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n", csio, csio->scsi_status, csio->resid, csio->sense_resid); return (status); } static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct ccb_hdr *ccbh; struct ccb_scsiio *csio; struct virtio_scsi_cmd_resp *cmd_resp; cam_status status; csio = &req->vsr_ccb->csio; ccbh = &csio->ccb_h; cmd_resp = &req->vsr_cmd_resp; KASSERT(ccbh->ccbh_vtscsi_req == req, ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req)); if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) callout_stop(&req->vsr_callout); status = vtscsi_scsi_cmd_cam_status(cmd_resp); if (status == CAM_REQ_ABORTED) { if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT) status = CAM_CMD_TIMEOUT; } else if (status == CAM_REQ_CMP) status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccbh->path, 1); } if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) status |= CAM_RELEASE_SIMQ; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n", req, ccbh, status); ccbh->status = status; xpt_done(req->vsr_ccb); vtscsi_enqueue_request(sc, req); } static void vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req) { /* XXX We probably shouldn't poll forever. */ req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED; do vtscsi_complete_vq(sc, sc->vtscsi_control_vq); while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0); req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED; } static int vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req, struct sglist *sg, int readable, int writable, int flag) { struct virtqueue *vq; int error; vq = sc->vtscsi_control_vq; MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL); error = virtqueue_enqueue(vq, req, sg, readable, writable); if (error) { /* * Return EAGAIN when the virtqueue does not have enough * descriptors available. */ if (error == ENOSPC || error == EMSGSIZE) error = EAGAIN; return (error); } virtqueue_notify(vq); if (flag == VTSCSI_EXECUTE_POLL) vtscsi_poll_ctrl_req(sc, req); return (0); } static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { union ccb *ccb; struct ccb_hdr *ccbh; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; ccb = req->vsr_ccb; ccbh = &ccb->ccb_h; tmf_resp = &req->vsr_tmf_resp; switch (tmf_resp->response) { case VIRTIO_SCSI_S_FUNCTION_COMPLETE: ccbh->status = CAM_REQ_CMP; break; case VIRTIO_SCSI_S_FUNCTION_REJECTED: ccbh->status = CAM_UA_ABORT; break; default: ccbh->status = CAM_REQ_CMP_ERR; break; } xpt_done(ccb); vtscsi_enqueue_request(sc, req); } static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct sglist *sg; struct ccb_abort *cab; struct ccb_hdr *ccbh; struct ccb_hdr *abort_ccbh; struct vtscsi_request *abort_req; struct virtio_scsi_ctrl_tmf_req *tmf_req; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; int error; sg = sc->vtscsi_sglist; cab = &req->vsr_ccb->cab; ccbh = &cab->ccb_h; tmf_req = &req->vsr_tmf_req; tmf_resp = &req->vsr_tmf_resp; /* CCB header and request that's to be aborted. */ abort_ccbh = &cab->abort_ccb->ccb_h; abort_req = abort_ccbh->ccbh_vtscsi_req; if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) { error = EINVAL; goto fail; } /* Only attempt to abort requests that could be in-flight. */ if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) { error = EALREADY; goto fail; } abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED; if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) callout_stop(&abort_req->vsr_callout); vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, (uintptr_t) abort_ccbh, tmf_req); sglist_reset(sg); sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); req->vsr_complete = vtscsi_complete_abort_task_cmd; tmf_resp->response = -1; error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, VTSCSI_EXECUTE_ASYNC); fail: vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p " "abort_req=%p\n", error, req, abort_ccbh, abort_req); return (error); } static void vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { union ccb *ccb; struct ccb_hdr *ccbh; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; ccb = req->vsr_ccb; ccbh = &ccb->ccb_h; tmf_resp = &req->vsr_tmf_resp; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n", req, ccb, tmf_resp->response); if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) { ccbh->status = CAM_REQ_CMP; vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id, ccbh->target_lun); } else ccbh->status = CAM_REQ_CMP_ERR; xpt_done(ccb); vtscsi_enqueue_request(sc, req); } static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) { struct sglist *sg; struct ccb_resetdev *crd; struct ccb_hdr *ccbh; struct virtio_scsi_ctrl_tmf_req *tmf_req; struct virtio_scsi_ctrl_tmf_resp *tmf_resp; uint32_t subtype; int error; sg = sc->vtscsi_sglist; crd = &req->vsr_ccb->crd; ccbh = &crd->ccb_h; tmf_req = &req->vsr_tmf_req; tmf_resp = &req->vsr_tmf_resp; if (ccbh->target_lun == CAM_LUN_WILDCARD) subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET; else subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req); sglist_reset(sg); sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); req->vsr_complete = vtscsi_complete_reset_dev_cmd; tmf_resp->response = -1; error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, VTSCSI_EXECUTE_ASYNC); vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n", error, req, ccbh); return (error); } static void vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id) { *target_id = lun[1]; *lun_id = (lun[2] << 8) | lun[3]; } static void vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[]) { lun[0] = 1; lun[1] = ccbh->target_id; lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F); lun[3] = (ccbh->target_lun >> 8) & 0xFF; } static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio, struct virtio_scsi_cmd_req *cmd_req) { uint8_t attr; switch (csio->tag_action) { case MSG_HEAD_OF_Q_TAG: attr = VIRTIO_SCSI_S_HEAD; break; case MSG_ORDERED_Q_TAG: attr = VIRTIO_SCSI_S_ORDERED; break; case MSG_ACA_TASK: attr = VIRTIO_SCSI_S_ACA; break; default: /* MSG_SIMPLE_Q_TAG */ attr = VIRTIO_SCSI_S_SIMPLE; break; } vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun); cmd_req->tag = (uintptr_t) csio; cmd_req->task_attr = attr; memcpy(cmd_req->cdb, csio->ccb_h.flags & CAM_CDB_POINTER ? csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes, csio->cdb_len); } static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req) { vtscsi_set_request_lun(ccbh, tmf_req->lun); tmf_req->type = VIRTIO_SCSI_T_TMF; tmf_req->subtype = subtype; tmf_req->tag = tag; } static void vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason) { int frozen; frozen = sc->vtscsi_frozen; if (reason & VTSCSI_REQUEST && (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0) sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS; if (reason & VTSCSI_REQUEST_VQ && (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0) sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL; /* Freeze the SIMQ if transitioned to frozen. */ if (frozen == 0 && sc->vtscsi_frozen != 0) { vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n"); xpt_freeze_simq(sc->vtscsi_sim, 1); } } static int vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason) { int thawed; if (sc->vtscsi_frozen == 0 || reason == 0) return (0); if (reason & VTSCSI_REQUEST && sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS; if (reason & VTSCSI_REQUEST_VQ && sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL; thawed = sc->vtscsi_frozen == 0; if (thawed != 0) vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n"); return (thawed); } static void vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; /* Use the wildcard path from our softc for bus announcements. */ if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) { xpt_async(ac_code, sc->vtscsi_path, NULL); return; } if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim), target_id, lun_id) != CAM_REQ_CMP) { vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n"); return; } xpt_async(ac_code, path, NULL); xpt_free_path(path); } static void vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id, lun_id_t lun_id) { union ccb *ccb; cam_status status; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n"); return; } status = xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sc->vtscsi_sim), target_id, lun_id); if (status != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void vtscsi_execute_rescan_bus(struct vtscsi_softc *sc) { vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); } static void vtscsi_transport_reset_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) { target_id_t target_id; lun_id_t lun_id; vtscsi_get_request_lun(event->lun, &target_id, &lun_id); switch (event->reason) { case VIRTIO_SCSI_EVT_RESET_RESCAN: case VIRTIO_SCSI_EVT_RESET_REMOVED: vtscsi_execute_rescan(sc, target_id, lun_id); break; default: device_printf(sc->vtscsi_dev, "unhandled transport event reason: %d\n", event->reason); break; } } static void vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) { int error; if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) { switch (event->event) { case VIRTIO_SCSI_T_TRANSPORT_RESET: vtscsi_transport_reset_event(sc, event); break; default: device_printf(sc->vtscsi_dev, "unhandled event: %d\n", event->event); break; } } else vtscsi_execute_rescan_bus(sc); /* * This should always be successful since the buffer * was just dequeued. */ error = vtscsi_enqueue_event_buf(sc, event); KASSERT(error == 0, ("cannot requeue event buffer: %d", error)); } static int vtscsi_enqueue_event_buf(struct vtscsi_softc *sc, struct virtio_scsi_event *event) { struct sglist *sg; struct virtqueue *vq; int size, error; sg = sc->vtscsi_sglist; vq = sc->vtscsi_event_vq; size = sc->vtscsi_event_buf_size; bzero(event, size); sglist_reset(sg); error = sglist_append(sg, event, size); if (error) return (error); error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg); if (error) return (error); virtqueue_notify(vq); return (0); } static int vtscsi_init_event_vq(struct vtscsi_softc *sc) { struct virtio_scsi_event *event; int i, size, error; /* * The first release of QEMU with VirtIO SCSI support would crash * when attempting to notify the event virtqueue. This was fixed * when hotplug support was added. */ if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) size = sc->vtscsi_event_buf_size; else size = 0; if (size < sizeof(struct virtio_scsi_event)) return (0); for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { event = &sc->vtscsi_event_bufs[i]; error = vtscsi_enqueue_event_buf(sc, event); if (error) break; } /* * Even just one buffer is enough. Missed events are * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag. */ if (i > 0) error = 0; return (error); } static void vtscsi_reinit_event_vq(struct vtscsi_softc *sc) { struct virtio_scsi_event *event; int i, error; if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 || sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event)) return; for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { event = &sc->vtscsi_event_bufs[i]; error = vtscsi_enqueue_event_buf(sc, event); if (error) break; } KASSERT(i > 0, ("cannot reinit event vq: %d", error)); } static void vtscsi_drain_event_vq(struct vtscsi_softc *sc) { struct virtqueue *vq; int last; vq = sc->vtscsi_event_vq; last = 0; while (virtqueue_drain(vq, &last) != NULL) ; KASSERT(virtqueue_empty(vq), ("eventvq not empty")); } static void vtscsi_complete_vqs_locked(struct vtscsi_softc *sc) { VTSCSI_LOCK_OWNED(sc); if (sc->vtscsi_request_vq != NULL) vtscsi_complete_vq(sc, sc->vtscsi_request_vq); if (sc->vtscsi_control_vq != NULL) vtscsi_complete_vq(sc, sc->vtscsi_control_vq); } static void vtscsi_complete_vqs(struct vtscsi_softc *sc) { VTSCSI_LOCK(sc); vtscsi_complete_vqs_locked(sc); VTSCSI_UNLOCK(sc); } static void vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req) { union ccb *ccb; int detach; ccb = req->vsr_ccb; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb); /* * The callout must be drained when detaching since the request is * about to be freed. The VTSCSI_MTX must not be held for this in * case the callout is pending because there is a deadlock potential. * Otherwise, the virtqueue is being drained because of a bus reset * so we only need to attempt to stop the callouts. */ detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0; if (detach != 0) VTSCSI_LOCK_NOTOWNED(sc); else VTSCSI_LOCK_OWNED(sc); if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) { if (detach != 0) callout_drain(&req->vsr_callout); else callout_stop(&req->vsr_callout); } if (ccb != NULL) { if (detach != 0) { VTSCSI_LOCK(sc); ccb->ccb_h.status = CAM_NO_HBA; } else ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); if (detach != 0) VTSCSI_UNLOCK(sc); } vtscsi_enqueue_request(sc, req); } static void vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq) { struct vtscsi_request *req; int last; last = 0; vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq); while ((req = virtqueue_drain(vq, &last)) != NULL) vtscsi_cancel_request(sc, req); KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); } static void vtscsi_drain_vqs(struct vtscsi_softc *sc) { if (sc->vtscsi_control_vq != NULL) vtscsi_drain_vq(sc, sc->vtscsi_control_vq); if (sc->vtscsi_request_vq != NULL) vtscsi_drain_vq(sc, sc->vtscsi_request_vq); if (sc->vtscsi_event_vq != NULL) vtscsi_drain_event_vq(sc); } static void vtscsi_stop(struct vtscsi_softc *sc) { vtscsi_disable_vqs_intr(sc); virtio_stop(sc->vtscsi_dev); } static int vtscsi_reset_bus(struct vtscsi_softc *sc) { int error; VTSCSI_LOCK_OWNED(sc); if (vtscsi_bus_reset_disable != 0) { device_printf(sc->vtscsi_dev, "bus reset disabled\n"); return (0); } sc->vtscsi_flags |= VTSCSI_FLAG_RESET; /* * vtscsi_stop() will cause the in-flight requests to be canceled. * Those requests are then completed here so CAM will retry them * after the reset is complete. */ vtscsi_stop(sc); vtscsi_complete_vqs_locked(sc); /* Rid the virtqueues of any remaining requests. */ vtscsi_drain_vqs(sc); /* * Any resource shortage that froze the SIMQ cannot persist across * a bus reset so ensure it gets thawed here. */ if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) xpt_release_simq(sc->vtscsi_sim, 0); error = vtscsi_reinit(sc); if (error) { device_printf(sc->vtscsi_dev, "reinitialization failed, stopping device...\n"); vtscsi_stop(sc); } else vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET; return (error); } static void vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req) { #ifdef INVARIANTS int req_nsegs, resp_nsegs; req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq)); resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp)); KASSERT(req_nsegs == 1, ("request crossed page boundary")); KASSERT(resp_nsegs == 1, ("response crossed page boundary")); #endif req->vsr_softc = sc; callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0); } static int vtscsi_alloc_requests(struct vtscsi_softc *sc) { struct vtscsi_request *req; int i, nreqs; /* * Commands destined for either the request or control queues come * from the same SIM queue. Use the size of the request virtqueue * as it (should) be much more frequently used. Some additional * requests are allocated for internal (TMF) use. */ nreqs = virtqueue_size(sc->vtscsi_request_vq); if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) nreqs /= VTSCSI_MIN_SEGMENTS; nreqs += VTSCSI_RESERVED_REQUESTS; for (i = 0; i < nreqs; i++) { req = malloc(sizeof(struct vtscsi_request), M_DEVBUF, M_NOWAIT); if (req == NULL) return (ENOMEM); vtscsi_init_request(sc, req); sc->vtscsi_nrequests++; vtscsi_enqueue_request(sc, req); } return (0); } static void vtscsi_free_requests(struct vtscsi_softc *sc) { struct vtscsi_request *req; while ((req = vtscsi_dequeue_request(sc)) != NULL) { KASSERT(callout_active(&req->vsr_callout) == 0, ("request callout still active")); sc->vtscsi_nrequests--; free(req, M_DEVBUF); } KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d", sc->vtscsi_nrequests)); } static void vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req) { KASSERT(req->vsr_softc == sc, ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc)); vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); /* A request is available so the SIMQ could be released. */ if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0) xpt_release_simq(sc->vtscsi_sim, 1); req->vsr_ccb = NULL; req->vsr_complete = NULL; req->vsr_ptr0 = NULL; req->vsr_state = VTSCSI_REQ_STATE_FREE; req->vsr_flags = 0; bzero(&req->vsr_ureq, sizeof(req->vsr_ureq)); bzero(&req->vsr_uresp, sizeof(req->vsr_uresp)); /* * We insert at the tail of the queue in order to make it * very unlikely a request will be reused if we race with * stopping its callout handler. */ TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link); } static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *sc) { struct vtscsi_request *req; req = TAILQ_FIRST(&sc->vtscsi_req_free); if (req != NULL) { req->vsr_state = VTSCSI_REQ_STATE_INUSE; TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link); } else sc->vtscsi_stats.dequeue_no_requests++; vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); return (req); } static void vtscsi_complete_request(struct vtscsi_request *req) { if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED) req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE; if (req->vsr_complete != NULL) req->vsr_complete(req->vsr_softc, req); } static void vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq) { struct vtscsi_request *req; VTSCSI_LOCK_OWNED(sc); while ((req = virtqueue_dequeue(vq, NULL)) != NULL) vtscsi_complete_request(req); } static void vtscsi_control_vq_task(void *arg, int pending) { struct vtscsi_softc *sc; struct virtqueue *vq; sc = arg; vq = sc->vtscsi_control_vq; VTSCSI_LOCK(sc); vtscsi_complete_vq(sc, sc->vtscsi_control_vq); if (virtqueue_enable_intr(vq) != 0) { virtqueue_disable_intr(vq); VTSCSI_UNLOCK(sc); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_control_intr_task); return; } VTSCSI_UNLOCK(sc); } static void vtscsi_event_vq_task(void *arg, int pending) { struct vtscsi_softc *sc; struct virtqueue *vq; struct virtio_scsi_event *event; sc = arg; vq = sc->vtscsi_event_vq; VTSCSI_LOCK(sc); while ((event = virtqueue_dequeue(vq, NULL)) != NULL) vtscsi_handle_event(sc, event); if (virtqueue_enable_intr(vq) != 0) { virtqueue_disable_intr(vq); VTSCSI_UNLOCK(sc); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_control_intr_task); return; } VTSCSI_UNLOCK(sc); } static void vtscsi_request_vq_task(void *arg, int pending) { struct vtscsi_softc *sc; struct virtqueue *vq; sc = arg; vq = sc->vtscsi_request_vq; VTSCSI_LOCK(sc); vtscsi_complete_vq(sc, sc->vtscsi_request_vq); if (virtqueue_enable_intr(vq) != 0) { virtqueue_disable_intr(vq); VTSCSI_UNLOCK(sc); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_request_intr_task); return; } VTSCSI_UNLOCK(sc); } -static int +static void vtscsi_control_vq_intr(void *xsc) { struct vtscsi_softc *sc; sc = xsc; virtqueue_disable_intr(sc->vtscsi_control_vq); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_control_intr_task); - - return (1); } -static int +static void vtscsi_event_vq_intr(void *xsc) { struct vtscsi_softc *sc; sc = xsc; virtqueue_disable_intr(sc->vtscsi_event_vq); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_event_intr_task); - - return (1); } -static int +static void vtscsi_request_vq_intr(void *xsc) { struct vtscsi_softc *sc; sc = xsc; virtqueue_disable_intr(sc->vtscsi_request_vq); taskqueue_enqueue_fast(sc->vtscsi_tq, &sc->vtscsi_request_intr_task); - - return (1); } static void vtscsi_disable_vqs_intr(struct vtscsi_softc *sc) { virtqueue_disable_intr(sc->vtscsi_control_vq); virtqueue_disable_intr(sc->vtscsi_event_vq); virtqueue_disable_intr(sc->vtscsi_request_vq); } static void vtscsi_enable_vqs_intr(struct vtscsi_softc *sc) { virtqueue_enable_intr(sc->vtscsi_control_vq); virtqueue_enable_intr(sc->vtscsi_event_vq); virtqueue_enable_intr(sc->vtscsi_request_vq); } static void vtscsi_get_tunables(struct vtscsi_softc *sc) { char tmpstr[64]; TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug); snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level", device_get_unit(sc->vtscsi_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug); } static void vtscsi_add_sysctl(struct vtscsi_softc *sc) { device_t dev; struct vtscsi_statistics *stats; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vtscsi_dev; stats = &sc->vtscsi_stats; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level", CTLFLAG_RW, &sc->vtscsi_debug, 0, "Debug level"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts", CTLFLAG_RD, &stats->scsi_cmd_timeouts, "SCSI command timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests", CTLFLAG_RD, &stats->dequeue_no_requests, "No available requests to dequeue"); } static void vtscsi_printf_req(struct vtscsi_request *req, const char *func, const char *fmt, ...) { struct vtscsi_softc *sc; union ccb *ccb; struct sbuf sb; va_list ap; char str[192]; char path_str[64]; if (req == NULL) return; sc = req->vsr_softc; ccb = req->vsr_ccb; va_start(ap, fmt); sbuf_new(&sb, str, sizeof(str), 0); if (ccb == NULL) { sbuf_printf(&sb, "(noperiph:%s%d:%u): ", cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim), cam_sim_bus(sc->vtscsi_sim)); } else { xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str)); sbuf_cat(&sb, path_str); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { scsi_command_string(&ccb->csio, &sb); sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len); } } sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func, sbuf_data(&sb)); } Index: projects/virtio/sys/dev/virtio/virtqueue.h =================================================================== --- projects/virtio/sys/dev/virtio/virtqueue.h (revision 245709) +++ projects/virtio/sys/dev/virtio/virtqueue.h (revision 245710) @@ -1,94 +1,94 @@ /*- * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _VIRTIO_VIRTQUEUE_H #define _VIRTIO_VIRTQUEUE_H struct virtqueue; struct sglist; /* Support for indirect buffer descriptors. */ #define VIRTIO_RING_F_INDIRECT_DESC (1 << 28) /* Support to suppress interrupt until specific index is reached. */ #define VIRTIO_RING_F_EVENT_IDX (1 << 29) /* Device callback for a virtqueue interrupt. */ -typedef int virtqueue_intr_t(void *); +typedef void virtqueue_intr_t(void *); #define VIRTQUEUE_MAX_NAME_SZ 32 /* One for each virtqueue the device wishes to allocate. */ struct vq_alloc_info { char vqai_name[VIRTQUEUE_MAX_NAME_SZ]; int vqai_maxindirsz; virtqueue_intr_t *vqai_intr; void *vqai_intr_arg; struct virtqueue **vqai_vq; }; #define VQ_ALLOC_INFO_INIT(_i,_nsegs,_intr,_arg,_vqp,_str,...) do { \ snprintf((_i)->vqai_name, VIRTQUEUE_MAX_NAME_SZ, _str, \ ##__VA_ARGS__); \ (_i)->vqai_maxindirsz = (_nsegs); \ (_i)->vqai_intr = (_intr); \ (_i)->vqai_intr_arg = (_arg); \ (_i)->vqai_vq = (_vqp); \ } while (0) uint64_t virtqueue_filter_features(uint64_t features); int virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align, vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp); void *virtqueue_drain(struct virtqueue *vq, int *last); void virtqueue_free(struct virtqueue *vq); int virtqueue_reinit(struct virtqueue *vq, uint16_t size); int virtqueue_intr_filter(struct virtqueue *vq); void virtqueue_intr(struct virtqueue *vq); int virtqueue_enable_intr(struct virtqueue *vq); int virtqueue_postpone_intr(struct virtqueue *vq); void virtqueue_disable_intr(struct virtqueue *vq); /* Get physical address of the virtqueue ring. */ vm_paddr_t virtqueue_paddr(struct virtqueue *vq); int virtqueue_full(struct virtqueue *vq); int virtqueue_empty(struct virtqueue *vq); int virtqueue_size(struct virtqueue *vq); int virtqueue_nused(struct virtqueue *vq); void virtqueue_notify(struct virtqueue *vq); void virtqueue_dump(struct virtqueue *vq); int virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg, int readable, int writable); void *virtqueue_dequeue(struct virtqueue *vq, uint32_t *len); void *virtqueue_poll(struct virtqueue *vq, uint32_t *len); #endif /* _VIRTIO_VIRTQUEUE_H */