diff --git a/sys/dev/virtio/mmio/virtio_mmio.c b/sys/dev/virtio/mmio/virtio_mmio.c
index 36c6a255c385..6059ef76eceb 100644
--- a/sys/dev/virtio/mmio/virtio_mmio.c
+++ b/sys/dev/virtio/mmio/virtio_mmio.c
@@ -1,1027 +1,1027 @@
/*-
* Copyright (c) 2014 Ruslan Bukin
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Portions of this software were developed by Andrew Turner
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* VirtIO MMIO interface.
* This driver is heavily based on VirtIO PCI interface driver.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "virtio_mmio_if.h"
#include "virtio_bus_if.h"
#include "virtio_if.h"
struct vtmmio_virtqueue {
struct virtqueue *vtv_vq;
int vtv_no_intr;
};
static int vtmmio_detach(device_t);
static int vtmmio_suspend(device_t);
static int vtmmio_resume(device_t);
static int vtmmio_shutdown(device_t);
static void vtmmio_driver_added(device_t, driver_t *);
static void vtmmio_child_detached(device_t, device_t);
static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *);
static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t);
static uint64_t vtmmio_negotiate_features(device_t, uint64_t);
static int vtmmio_finalize_features(device_t);
-static int vtmmio_with_feature(device_t, uint64_t);
+static bool vtmmio_with_feature(device_t, uint64_t);
static void vtmmio_set_virtqueue(struct vtmmio_softc *sc,
struct virtqueue *vq, uint32_t size);
static int vtmmio_alloc_virtqueues(device_t, int, int,
struct vq_alloc_info *);
static int vtmmio_setup_intr(device_t, enum intr_type);
static void vtmmio_stop(device_t);
static void vtmmio_poll(device_t);
static int vtmmio_reinit(device_t, uint64_t);
static void vtmmio_reinit_complete(device_t);
static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t);
static int vtmmio_config_generation(device_t);
static uint8_t vtmmio_get_status(device_t);
static void vtmmio_set_status(device_t, uint8_t);
static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int);
static uint64_t vtmmio_read_dev_config_8(struct vtmmio_softc *, bus_size_t);
static void vtmmio_write_dev_config(device_t, bus_size_t, const void *, int);
static void vtmmio_describe_features(struct vtmmio_softc *, const char *,
uint64_t);
static void vtmmio_probe_and_attach_child(struct vtmmio_softc *);
static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int);
static void vtmmio_free_interrupts(struct vtmmio_softc *);
static void vtmmio_free_virtqueues(struct vtmmio_softc *);
static void vtmmio_release_child_resources(struct vtmmio_softc *);
static void vtmmio_reset(struct vtmmio_softc *);
static void vtmmio_select_virtqueue(struct vtmmio_softc *, int);
static void vtmmio_vq_intr(void *);
/*
* I/O port read/write wrappers.
*/
#define vtmmio_write_config_1(sc, o, v) \
do { \
if (sc->platform != NULL) \
VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
bus_write_1((sc)->res[0], (o), (v)); \
if (sc->platform != NULL) \
VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
} while (0)
#define vtmmio_write_config_2(sc, o, v) \
do { \
if (sc->platform != NULL) \
VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
bus_write_2((sc)->res[0], (o), (v)); \
if (sc->platform != NULL) \
VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
} while (0)
#define vtmmio_write_config_4(sc, o, v) \
do { \
if (sc->platform != NULL) \
VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
bus_write_4((sc)->res[0], (o), (v)); \
if (sc->platform != NULL) \
VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
} while (0)
#define vtmmio_read_config_1(sc, o) \
bus_read_1((sc)->res[0], (o))
#define vtmmio_read_config_2(sc, o) \
bus_read_2((sc)->res[0], (o))
#define vtmmio_read_config_4(sc, o) \
bus_read_4((sc)->res[0], (o))
static device_method_t vtmmio_methods[] = {
/* Device interface. */
DEVMETHOD(device_attach, vtmmio_attach),
DEVMETHOD(device_detach, vtmmio_detach),
DEVMETHOD(device_suspend, vtmmio_suspend),
DEVMETHOD(device_resume, vtmmio_resume),
DEVMETHOD(device_shutdown, vtmmio_shutdown),
/* Bus interface. */
DEVMETHOD(bus_driver_added, vtmmio_driver_added),
DEVMETHOD(bus_child_detached, vtmmio_child_detached),
DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo),
DEVMETHOD(bus_read_ivar, vtmmio_read_ivar),
DEVMETHOD(bus_write_ivar, vtmmio_write_ivar),
/* VirtIO bus interface. */
DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features),
DEVMETHOD(virtio_bus_finalize_features, vtmmio_finalize_features),
DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature),
DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr),
DEVMETHOD(virtio_bus_stop, vtmmio_stop),
DEVMETHOD(virtio_bus_poll, vtmmio_poll),
DEVMETHOD(virtio_bus_reinit, vtmmio_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue),
DEVMETHOD(virtio_bus_config_generation, vtmmio_config_generation),
DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config),
DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config),
DEVMETHOD_END
};
DEFINE_CLASS_0(virtio_mmio, vtmmio_driver, vtmmio_methods,
sizeof(struct vtmmio_softc));
MODULE_VERSION(virtio_mmio, 1);
int
vtmmio_probe(device_t dev)
{
struct vtmmio_softc *sc;
int rid;
uint32_t magic, version;
sc = device_get_softc(dev);
rid = 0;
sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->res[0] == NULL) {
device_printf(dev, "Cannot allocate memory window.\n");
return (ENXIO);
}
magic = vtmmio_read_config_4(sc, VIRTIO_MMIO_MAGIC_VALUE);
if (magic != VIRTIO_MMIO_MAGIC_VIRT) {
device_printf(dev, "Bad magic value %#x\n", magic);
bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
return (ENXIO);
}
version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION);
if (version < 1 || version > 2) {
device_printf(dev, "Unsupported version: %#x\n", version);
bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
return (ENXIO);
}
if (vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID) == 0) {
bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
return (ENXIO);
}
bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
device_set_desc(dev, "VirtIO MMIO adapter");
return (BUS_PROBE_DEFAULT);
}
static int
vtmmio_setup_intr(device_t dev, enum intr_type type)
{
struct vtmmio_softc *sc;
int rid;
int err;
sc = device_get_softc(dev);
if (sc->platform != NULL) {
err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev,
vtmmio_vq_intr, sc);
if (err == 0) {
/* Okay we have backend-specific interrupts */
return (0);
}
}
rid = 0;
sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (!sc->res[1]) {
device_printf(dev, "Can't allocate interrupt\n");
return (ENXIO);
}
if (bus_setup_intr(dev, sc->res[1], type | INTR_MPSAFE,
NULL, vtmmio_vq_intr, sc, &sc->ih)) {
device_printf(dev, "Can't setup the interrupt\n");
return (ENXIO);
}
return (0);
}
int
vtmmio_attach(device_t dev)
{
struct vtmmio_softc *sc;
device_t child;
int rid;
sc = device_get_softc(dev);
sc->dev = dev;
rid = 0;
sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->res[0] == NULL) {
device_printf(dev, "Cannot allocate memory window.\n");
return (ENXIO);
}
sc->vtmmio_version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION);
vtmmio_reset(sc);
/* Tell the host we've noticed this device. */
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
if ((child = device_add_child(dev, NULL, -1)) == NULL) {
device_printf(dev, "Cannot create child device.\n");
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
vtmmio_detach(dev);
return (ENOMEM);
}
sc->vtmmio_child_dev = child;
vtmmio_probe_and_attach_child(sc);
return (0);
}
static int
vtmmio_detach(device_t dev)
{
struct vtmmio_softc *sc;
device_t child;
int error;
sc = device_get_softc(dev);
if ((child = sc->vtmmio_child_dev) != NULL) {
error = device_delete_child(dev, child);
if (error)
return (error);
sc->vtmmio_child_dev = NULL;
}
vtmmio_reset(sc);
if (sc->res[0] != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, 0,
sc->res[0]);
sc->res[0] = NULL;
}
return (0);
}
static int
vtmmio_suspend(device_t dev)
{
return (bus_generic_suspend(dev));
}
static int
vtmmio_resume(device_t dev)
{
return (bus_generic_resume(dev));
}
static int
vtmmio_shutdown(device_t dev)
{
(void) bus_generic_shutdown(dev);
/* Forcibly stop the host device. */
vtmmio_stop(dev);
return (0);
}
static void
vtmmio_driver_added(device_t dev, driver_t *driver)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
vtmmio_probe_and_attach_child(sc);
}
static void
vtmmio_child_detached(device_t dev, device_t child)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
vtmmio_reset(sc);
vtmmio_release_child_resources(sc);
}
static int
vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (sc->vtmmio_child_dev != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_DEVTYPE:
case VIRTIO_IVAR_SUBDEVICE:
*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID);
break;
case VIRTIO_IVAR_VENDOR:
*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
break;
case VIRTIO_IVAR_SUBVENDOR:
case VIRTIO_IVAR_DEVICE:
/*
* Dummy value for fields not present in this bus. Used by
* bus-agnostic virtio_child_pnpinfo.
*/
*result = 0;
break;
case VIRTIO_IVAR_MODERN:
/*
* There are several modern (aka MMIO v2) spec compliance
* issues with this driver, but keep the status quo.
*/
*result = sc->vtmmio_version > 1;
break;
default:
return (ENOENT);
}
return (0);
}
static int
vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (sc->vtmmio_child_dev != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_FEATURE_DESC:
sc->vtmmio_child_feat_desc = (void *) value;
break;
default:
return (ENOENT);
}
return (0);
}
static uint64_t
vtmmio_negotiate_features(device_t dev, uint64_t child_features)
{
struct vtmmio_softc *sc;
uint64_t host_features, features;
sc = device_get_softc(dev);
if (sc->vtmmio_version > 1) {
child_features |= VIRTIO_F_VERSION_1;
}
vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1);
host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES);
host_features <<= 32;
vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0);
host_features |= vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES);
vtmmio_describe_features(sc, "host", host_features);
/*
* Limit negotiated features to what the driver, virtqueue, and
* host all support.
*/
features = host_features & child_features;
features = virtio_filter_transport_features(features);
sc->vtmmio_features = features;
vtmmio_describe_features(sc, "negotiated", features);
vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 1);
vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features >> 32);
vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 0);
vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features);
return (features);
}
static int
vtmmio_finalize_features(device_t dev)
{
struct vtmmio_softc *sc;
uint8_t status;
sc = device_get_softc(dev);
if (sc->vtmmio_version > 1) {
/*
* Must re-read the status after setting it to verify the
* negotiated features were accepted by the device.
*/
vtmmio_set_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
status = vtmmio_get_status(dev);
if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) {
device_printf(dev, "desired features were not accepted\n");
return (ENOTSUP);
}
}
return (0);
}
-static int
+static bool
vtmmio_with_feature(device_t dev, uint64_t feature)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
return ((sc->vtmmio_features & feature) != 0);
}
static void
vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq,
uint32_t size)
{
vm_paddr_t paddr;
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size);
if (sc->vtmmio_version == 1) {
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN,
VIRTIO_MMIO_VRING_ALIGN);
paddr = virtqueue_paddr(vq);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
paddr >> PAGE_SHIFT);
} else {
paddr = virtqueue_desc_paddr(vq);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_LOW,
paddr);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_HIGH,
((uint64_t)paddr) >> 32);
paddr = virtqueue_avail_paddr(vq);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_LOW,
paddr);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_HIGH,
((uint64_t)paddr) >> 32);
paddr = virtqueue_used_paddr(vq);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_LOW,
paddr);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_HIGH,
((uint64_t)paddr) >> 32);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 1);
}
}
static int
vtmmio_alloc_virtqueues(device_t dev, int flags, int nvqs,
struct vq_alloc_info *vq_info)
{
struct vtmmio_virtqueue *vqx;
struct vq_alloc_info *info;
struct vtmmio_softc *sc;
struct virtqueue *vq;
uint32_t size;
int idx, error;
sc = device_get_softc(dev);
if (sc->vtmmio_nvqs != 0)
return (EALREADY);
if (nvqs <= 0)
return (EINVAL);
sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtmmio_vqs == NULL)
return (ENOMEM);
if (sc->vtmmio_version == 1) {
vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE,
(1 << PAGE_SHIFT));
}
for (idx = 0; idx < nvqs; idx++) {
vqx = &sc->vtmmio_vqs[idx];
info = &vq_info[idx];
vtmmio_select_virtqueue(sc, idx);
size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
error = virtqueue_alloc(dev, idx, size,
VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_VRING_ALIGN,
~(vm_paddr_t)0, info, &vq);
if (error) {
device_printf(dev,
"cannot allocate virtqueue %d: %d\n",
idx, error);
break;
}
vtmmio_set_virtqueue(sc, vq, size);
vqx->vtv_vq = *info->vqai_vq = vq;
vqx->vtv_no_intr = info->vqai_intr == NULL;
sc->vtmmio_nvqs++;
}
if (error)
vtmmio_free_virtqueues(sc);
return (error);
}
static void
vtmmio_stop(device_t dev)
{
vtmmio_reset(device_get_softc(dev));
}
static void
vtmmio_poll(device_t dev)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (sc->platform != NULL)
VIRTIO_MMIO_POLL(sc->platform);
}
static int
vtmmio_reinit(device_t dev, uint64_t features)
{
struct vtmmio_softc *sc;
int idx, error;
sc = device_get_softc(dev);
if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
vtmmio_stop(dev);
/*
* Quickly drive the status through ACK and DRIVER. The device
* does not become usable again until vtmmio_reinit_complete().
*/
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
/*
* TODO: Check that features are not added as to what was
* originally negotiated.
*/
vtmmio_negotiate_features(dev, features);
error = vtmmio_finalize_features(dev);
if (error) {
device_printf(dev, "cannot finalize features during reinit\n");
return (error);
}
if (sc->vtmmio_version == 1) {
vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE,
(1 << PAGE_SHIFT));
}
for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
error = vtmmio_reinit_virtqueue(sc, idx);
if (error)
return (error);
}
return (0);
}
static void
vtmmio_reinit_complete(device_t dev)
{
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
}
static void
vtmmio_notify_virtqueue(device_t dev, uint16_t queue, bus_size_t offset)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
MPASS(offset == VIRTIO_MMIO_QUEUE_NOTIFY);
vtmmio_write_config_4(sc, offset, queue);
}
static int
vtmmio_config_generation(device_t dev)
{
struct vtmmio_softc *sc;
uint32_t gen;
sc = device_get_softc(dev);
if (sc->vtmmio_version > 1)
gen = vtmmio_read_config_4(sc, VIRTIO_MMIO_CONFIG_GENERATION);
else
gen = 0;
return (gen);
}
static uint8_t
vtmmio_get_status(device_t dev)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS));
}
static void
vtmmio_set_status(device_t dev, uint8_t status)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (status != VIRTIO_CONFIG_STATUS_RESET)
status |= vtmmio_get_status(dev);
vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status);
}
static void
vtmmio_read_dev_config(device_t dev, bus_size_t offset,
void *dst, int length)
{
struct vtmmio_softc *sc;
bus_size_t off;
uint8_t *d;
int size;
sc = device_get_softc(dev);
off = VIRTIO_MMIO_CONFIG + offset;
/*
* The non-legacy MMIO specification adds the following restriction:
*
* 4.2.2.2: For the device-specific configuration space, the driver
* MUST use 8 bit wide accesses for 8 bit wide fields, 16 bit wide
* and aligned accesses for 16 bit wide fields and 32 bit wide and
* aligned accesses for 32 and 64 bit wide fields.
*
* The endianness also varies between non-legacy and legacy:
*
* 2.4: Note: The device configuration space uses the little-endian
* format for multi-byte fields.
*
* 2.4.3: Note that for legacy interfaces, device configuration space
* is generally the guest’s native endian, rather than PCI’s
* little-endian. The correct endian-ness is documented for each
* device.
*/
if (sc->vtmmio_version > 1) {
switch (length) {
case 1:
*(uint8_t *)dst = vtmmio_read_config_1(sc, off);
break;
case 2:
*(uint16_t *)dst =
le16toh(vtmmio_read_config_2(sc, off));
break;
case 4:
*(uint32_t *)dst =
le32toh(vtmmio_read_config_4(sc, off));
break;
case 8:
*(uint64_t *)dst = vtmmio_read_dev_config_8(sc, off);
break;
default:
panic("%s: invalid length %d\n", __func__, length);
}
return;
}
for (d = dst; length > 0; d += size, off += size, length -= size) {
#ifdef ALLOW_WORD_ALIGNED_ACCESS
if (length >= 4) {
size = 4;
*(uint32_t *)d = vtmmio_read_config_4(sc, off);
} else if (length >= 2) {
size = 2;
*(uint16_t *)d = vtmmio_read_config_2(sc, off);
} else
#endif
{
size = 1;
*d = vtmmio_read_config_1(sc, off);
}
}
}
static uint64_t
vtmmio_read_dev_config_8(struct vtmmio_softc *sc, bus_size_t off)
{
device_t dev;
int gen;
uint32_t val0, val1;
dev = sc->dev;
do {
gen = vtmmio_config_generation(dev);
val0 = le32toh(vtmmio_read_config_4(sc, off));
val1 = le32toh(vtmmio_read_config_4(sc, off + 4));
} while (gen != vtmmio_config_generation(dev));
return (((uint64_t) val1 << 32) | val0);
}
static void
vtmmio_write_dev_config(device_t dev, bus_size_t offset,
const void *src, int length)
{
struct vtmmio_softc *sc;
bus_size_t off;
const uint8_t *s;
int size;
sc = device_get_softc(dev);
off = VIRTIO_MMIO_CONFIG + offset;
/*
* The non-legacy MMIO specification adds size and alignment
* restrctions. It also changes the endianness from native-endian to
* little-endian. See vtmmio_read_dev_config.
*/
if (sc->vtmmio_version > 1) {
switch (length) {
case 1:
vtmmio_write_config_1(sc, off, *(const uint8_t *)src);
break;
case 2:
vtmmio_write_config_2(sc, off,
htole16(*(const uint16_t *)src));
break;
case 4:
vtmmio_write_config_4(sc, off,
htole32(*(const uint32_t *)src));
break;
case 8:
vtmmio_write_config_4(sc, off,
htole32(*(const uint64_t *)src));
vtmmio_write_config_4(sc, off + 4,
htole32((*(const uint64_t *)src) >> 32));
break;
default:
panic("%s: invalid length %d\n", __func__, length);
}
return;
}
for (s = src; length > 0; s += size, off += size, length -= size) {
#ifdef ALLOW_WORD_ALIGNED_ACCESS
if (length >= 4) {
size = 4;
vtmmio_write_config_4(sc, off, *(uint32_t *)s);
} else if (length >= 2) {
size = 2;
vtmmio_write_config_2(sc, off, *(uint16_t *)s);
} else
#endif
{
size = 1;
vtmmio_write_config_1(sc, off, *s);
}
}
}
static void
vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg,
uint64_t features)
{
device_t dev, child;
dev = sc->dev;
child = sc->vtmmio_child_dev;
if (device_is_attached(child) || bootverbose == 0)
return;
virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc);
}
static void
vtmmio_probe_and_attach_child(struct vtmmio_softc *sc)
{
device_t dev, child;
dev = sc->dev;
child = sc->vtmmio_child_dev;
if (child == NULL)
return;
if (device_get_state(child) != DS_NOTPRESENT) {
return;
}
if (device_probe(child) != 0) {
return;
}
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
if (device_attach(child) != 0) {
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
vtmmio_reset(sc);
vtmmio_release_child_resources(sc);
/* Reset status for future attempt. */
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
} else {
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
VIRTIO_ATTACH_COMPLETED(child);
}
}
static int
vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx)
{
struct vtmmio_virtqueue *vqx;
struct virtqueue *vq;
int error;
uint16_t size;
vqx = &sc->vtmmio_vqs[idx];
vq = vqx->vtv_vq;
KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
vtmmio_select_virtqueue(sc, idx);
size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
error = virtqueue_reinit(vq, size);
if (error)
return (error);
vtmmio_set_virtqueue(sc, vq, size);
return (0);
}
static void
vtmmio_free_interrupts(struct vtmmio_softc *sc)
{
if (sc->ih != NULL)
bus_teardown_intr(sc->dev, sc->res[1], sc->ih);
if (sc->res[1] != NULL)
bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]);
}
static void
vtmmio_free_virtqueues(struct vtmmio_softc *sc)
{
struct vtmmio_virtqueue *vqx;
int idx;
for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
vqx = &sc->vtmmio_vqs[idx];
vtmmio_select_virtqueue(sc, idx);
if (sc->vtmmio_version > 1) {
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 0);
vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_READY);
} else
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0);
virtqueue_free(vqx->vtv_vq);
vqx->vtv_vq = NULL;
}
free(sc->vtmmio_vqs, M_DEVBUF);
sc->vtmmio_vqs = NULL;
sc->vtmmio_nvqs = 0;
}
static void
vtmmio_release_child_resources(struct vtmmio_softc *sc)
{
vtmmio_free_interrupts(sc);
vtmmio_free_virtqueues(sc);
}
static void
vtmmio_reset(struct vtmmio_softc *sc)
{
/*
* Setting the status to RESET sets the host device to
* the original, uninitialized state.
*/
vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET);
}
static void
vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx)
{
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx);
}
static void
vtmmio_vq_intr(void *arg)
{
struct vtmmio_virtqueue *vqx;
struct vtmmio_softc *sc;
struct virtqueue *vq;
uint32_t status;
int idx;
sc = arg;
status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS);
vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status);
/* The config changed */
if (status & VIRTIO_MMIO_INT_CONFIG)
if (sc->vtmmio_child_dev != NULL)
VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev);
/* Notify all virtqueues. */
if (status & VIRTIO_MMIO_INT_VRING) {
for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
vqx = &sc->vtmmio_vqs[idx];
if (vqx->vtv_no_intr == 0) {
vq = vqx->vtv_vq;
virtqueue_intr(vq);
}
}
}
}
diff --git a/sys/dev/virtio/pci/virtio_pci.c b/sys/dev/virtio/pci/virtio_pci.c
index 4849affae58c..1470dc83949c 100644
--- a/sys/dev/virtio/pci/virtio_pci.c
+++ b/sys/dev/virtio/pci/virtio_pci.c
@@ -1,999 +1,999 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2017, Bryan Venteicher
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "virtio_pci_if.h"
#include "virtio_if.h"
static void vtpci_describe_features(struct vtpci_common *, const char *,
uint64_t);
static int vtpci_alloc_msix(struct vtpci_common *, int);
static int vtpci_alloc_msi(struct vtpci_common *);
static int vtpci_alloc_intr_msix_pervq(struct vtpci_common *);
static int vtpci_alloc_intr_msix_shared(struct vtpci_common *);
static int vtpci_alloc_intr_msi(struct vtpci_common *);
static int vtpci_alloc_intr_intx(struct vtpci_common *);
static int vtpci_alloc_interrupt(struct vtpci_common *, int, int,
struct vtpci_interrupt *);
static void vtpci_free_interrupt(struct vtpci_common *,
struct vtpci_interrupt *);
static void vtpci_free_interrupts(struct vtpci_common *);
static void vtpci_free_virtqueues(struct vtpci_common *);
static void vtpci_cleanup_setup_intr_attempt(struct vtpci_common *);
static int vtpci_alloc_intr_resources(struct vtpci_common *);
static int vtpci_setup_intx_interrupt(struct vtpci_common *,
enum intr_type);
static int vtpci_setup_pervq_msix_interrupts(struct vtpci_common *,
enum intr_type);
static int vtpci_set_host_msix_vectors(struct vtpci_common *);
static int vtpci_setup_msix_interrupts(struct vtpci_common *,
enum intr_type);
static int vtpci_setup_intrs(struct vtpci_common *, enum intr_type);
static int vtpci_reinit_virtqueue(struct vtpci_common *, int);
static void vtpci_intx_intr(void *);
static int vtpci_vq_shared_intr_filter(void *);
static void vtpci_vq_shared_intr(void *);
static int vtpci_vq_intr_filter(void *);
static void vtpci_vq_intr(void *);
static void vtpci_config_intr(void *);
static void vtpci_setup_sysctl(struct vtpci_common *);
#define vtpci_setup_msi_interrupt vtpci_setup_intx_interrupt
/*
* This module contains two drivers:
* - virtio_pci_legacy for pre-V1 support
* - virtio_pci_modern for V1 support
*/
MODULE_VERSION(virtio_pci, 1);
MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
int vtpci_disable_msix = 0;
TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
static uint8_t
vtpci_read_isr(struct vtpci_common *cn)
{
return (VIRTIO_PCI_READ_ISR(cn->vtpci_dev));
}
static uint16_t
vtpci_get_vq_size(struct vtpci_common *cn, int idx)
{
return (VIRTIO_PCI_GET_VQ_SIZE(cn->vtpci_dev, idx));
}
static bus_size_t
vtpci_get_vq_notify_off(struct vtpci_common *cn, int idx)
{
return (VIRTIO_PCI_GET_VQ_NOTIFY_OFF(cn->vtpci_dev, idx));
}
static void
vtpci_set_vq(struct vtpci_common *cn, struct virtqueue *vq)
{
VIRTIO_PCI_SET_VQ(cn->vtpci_dev, vq);
}
static void
vtpci_disable_vq(struct vtpci_common *cn, int idx)
{
VIRTIO_PCI_DISABLE_VQ(cn->vtpci_dev, idx);
}
static int
vtpci_register_cfg_msix(struct vtpci_common *cn, struct vtpci_interrupt *intr)
{
return (VIRTIO_PCI_REGISTER_CFG_MSIX(cn->vtpci_dev, intr));
}
static int
vtpci_register_vq_msix(struct vtpci_common *cn, int idx,
struct vtpci_interrupt *intr)
{
return (VIRTIO_PCI_REGISTER_VQ_MSIX(cn->vtpci_dev, idx, intr));
}
void
vtpci_init(struct vtpci_common *cn, device_t dev, bool modern)
{
cn->vtpci_dev = dev;
pci_enable_busmaster(dev);
if (modern)
cn->vtpci_flags |= VTPCI_FLAG_MODERN;
if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
cn->vtpci_flags |= VTPCI_FLAG_NO_MSI;
if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0)
cn->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
vtpci_setup_sysctl(cn);
}
int
vtpci_add_child(struct vtpci_common *cn)
{
device_t dev, child;
dev = cn->vtpci_dev;
child = device_add_child(dev, NULL, -1);
if (child == NULL) {
device_printf(dev, "cannot create child device\n");
return (ENOMEM);
}
cn->vtpci_child_dev = child;
return (0);
}
int
vtpci_delete_child(struct vtpci_common *cn)
{
device_t dev, child;
int error;
dev = cn->vtpci_dev;
child = cn->vtpci_child_dev;
if (child != NULL) {
error = device_delete_child(dev, child);
if (error)
return (error);
cn->vtpci_child_dev = NULL;
}
return (0);
}
void
vtpci_child_detached(struct vtpci_common *cn)
{
vtpci_release_child_resources(cn);
cn->vtpci_child_feat_desc = NULL;
cn->vtpci_host_features = 0;
cn->vtpci_features = 0;
}
int
vtpci_reinit(struct vtpci_common *cn)
{
int idx, error;
for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
error = vtpci_reinit_virtqueue(cn, idx);
if (error)
return (error);
}
if (vtpci_is_msix_enabled(cn)) {
error = vtpci_set_host_msix_vectors(cn);
if (error)
return (error);
}
return (0);
}
static void
vtpci_describe_features(struct vtpci_common *cn, const char *msg,
uint64_t features)
{
device_t dev, child;
dev = cn->vtpci_dev;
child = cn->vtpci_child_dev;
if (device_is_attached(child) || bootverbose == 0)
return;
virtio_describe(dev, msg, features, cn->vtpci_child_feat_desc);
}
uint64_t
vtpci_negotiate_features(struct vtpci_common *cn,
uint64_t child_features, uint64_t host_features)
{
uint64_t features;
cn->vtpci_host_features = host_features;
vtpci_describe_features(cn, "host", host_features);
/*
* Limit negotiated features to what the driver, virtqueue, and
* host all support.
*/
features = host_features & child_features;
features = virtio_filter_transport_features(features);
cn->vtpci_features = features;
vtpci_describe_features(cn, "negotiated", features);
return (features);
}
-int
+bool
vtpci_with_feature(struct vtpci_common *cn, uint64_t feature)
{
return ((cn->vtpci_features & feature) != 0);
}
int
vtpci_read_ivar(struct vtpci_common *cn, int index, uintptr_t *result)
{
device_t dev;
int error;
dev = cn->vtpci_dev;
error = 0;
switch (index) {
case VIRTIO_IVAR_SUBDEVICE:
*result = pci_get_subdevice(dev);
break;
case VIRTIO_IVAR_VENDOR:
*result = pci_get_vendor(dev);
break;
case VIRTIO_IVAR_DEVICE:
*result = pci_get_device(dev);
break;
case VIRTIO_IVAR_SUBVENDOR:
*result = pci_get_subvendor(dev);
break;
case VIRTIO_IVAR_MODERN:
*result = vtpci_is_modern(cn);
break;
default:
error = ENOENT;
}
return (error);
}
int
vtpci_write_ivar(struct vtpci_common *cn, int index, uintptr_t value)
{
int error;
error = 0;
switch (index) {
case VIRTIO_IVAR_FEATURE_DESC:
cn->vtpci_child_feat_desc = (void *) value;
break;
default:
error = ENOENT;
}
return (error);
}
int
vtpci_alloc_virtqueues(struct vtpci_common *cn, int flags, int nvqs,
struct vq_alloc_info *vq_info)
{
device_t dev;
int idx, align, error;
dev = cn->vtpci_dev;
/*
* This is VIRTIO_PCI_VRING_ALIGN from legacy VirtIO. In modern VirtIO,
* the tables do not have to be allocated contiguously, but we do so
* anyways.
*/
align = 4096;
if (cn->vtpci_nvqs != 0)
return (EALREADY);
if (nvqs <= 0)
return (EINVAL);
cn->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (cn->vtpci_vqs == NULL)
return (ENOMEM);
for (idx = 0; idx < nvqs; idx++) {
struct vtpci_virtqueue *vqx;
struct vq_alloc_info *info;
struct virtqueue *vq;
bus_size_t notify_offset;
uint16_t size;
vqx = &cn->vtpci_vqs[idx];
info = &vq_info[idx];
size = vtpci_get_vq_size(cn, idx);
notify_offset = vtpci_get_vq_notify_off(cn, idx);
error = virtqueue_alloc(dev, idx, size, notify_offset, align,
~(vm_paddr_t)0, info, &vq);
if (error) {
device_printf(dev,
"cannot allocate virtqueue %d: %d\n", idx, error);
break;
}
vtpci_set_vq(cn, vq);
vqx->vtv_vq = *info->vqai_vq = vq;
vqx->vtv_no_intr = info->vqai_intr == NULL;
cn->vtpci_nvqs++;
}
if (error)
vtpci_free_virtqueues(cn);
return (error);
}
static int
vtpci_alloc_msix(struct vtpci_common *cn, int nvectors)
{
device_t dev;
int nmsix, cnt, required;
dev = cn->vtpci_dev;
/* Allocate an additional vector for the config changes. */
required = nvectors + 1;
nmsix = pci_msix_count(dev);
if (nmsix < required)
return (1);
cnt = required;
if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
cn->vtpci_nmsix_resources = required;
return (0);
}
pci_release_msi(dev);
return (1);
}
static int
vtpci_alloc_msi(struct vtpci_common *cn)
{
device_t dev;
int nmsi, cnt, required;
dev = cn->vtpci_dev;
required = 1;
nmsi = pci_msi_count(dev);
if (nmsi < required)
return (1);
cnt = required;
if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required)
return (0);
pci_release_msi(dev);
return (1);
}
static int
vtpci_alloc_intr_msix_pervq(struct vtpci_common *cn)
{
int i, nvectors, error;
if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX)
return (ENOTSUP);
for (nvectors = 0, i = 0; i < cn->vtpci_nvqs; i++) {
if (cn->vtpci_vqs[i].vtv_no_intr == 0)
nvectors++;
}
error = vtpci_alloc_msix(cn, nvectors);
if (error)
return (error);
cn->vtpci_flags |= VTPCI_FLAG_MSIX;
return (0);
}
static int
vtpci_alloc_intr_msix_shared(struct vtpci_common *cn)
{
int error;
if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX)
return (ENOTSUP);
error = vtpci_alloc_msix(cn, 1);
if (error)
return (error);
cn->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
return (0);
}
static int
vtpci_alloc_intr_msi(struct vtpci_common *cn)
{
int error;
/* Only BHyVe supports MSI. */
if (cn->vtpci_flags & VTPCI_FLAG_NO_MSI)
return (ENOTSUP);
error = vtpci_alloc_msi(cn);
if (error)
return (error);
cn->vtpci_flags |= VTPCI_FLAG_MSI;
return (0);
}
static int
vtpci_alloc_intr_intx(struct vtpci_common *cn)
{
cn->vtpci_flags |= VTPCI_FLAG_INTX;
return (0);
}
static int
vtpci_alloc_interrupt(struct vtpci_common *cn, int rid, int flags,
struct vtpci_interrupt *intr)
{
struct resource *irq;
irq = bus_alloc_resource_any(cn->vtpci_dev, SYS_RES_IRQ, &rid, flags);
if (irq == NULL)
return (ENXIO);
intr->vti_irq = irq;
intr->vti_rid = rid;
return (0);
}
static void
vtpci_free_interrupt(struct vtpci_common *cn, struct vtpci_interrupt *intr)
{
device_t dev;
dev = cn->vtpci_dev;
if (intr->vti_handler != NULL) {
bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler);
intr->vti_handler = NULL;
}
if (intr->vti_irq != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid,
intr->vti_irq);
intr->vti_irq = NULL;
intr->vti_rid = -1;
}
}
static void
vtpci_free_interrupts(struct vtpci_common *cn)
{
struct vtpci_interrupt *intr;
int i, nvq_intrs;
vtpci_free_interrupt(cn, &cn->vtpci_device_interrupt);
if (cn->vtpci_nmsix_resources != 0) {
nvq_intrs = cn->vtpci_nmsix_resources - 1;
cn->vtpci_nmsix_resources = 0;
if ((intr = cn->vtpci_msix_vq_interrupts) != NULL) {
for (i = 0; i < nvq_intrs; i++, intr++)
vtpci_free_interrupt(cn, intr);
free(cn->vtpci_msix_vq_interrupts, M_DEVBUF);
cn->vtpci_msix_vq_interrupts = NULL;
}
}
if (cn->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX))
pci_release_msi(cn->vtpci_dev);
cn->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK;
}
static void
vtpci_free_virtqueues(struct vtpci_common *cn)
{
struct vtpci_virtqueue *vqx;
int idx;
for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
vtpci_disable_vq(cn, idx);
vqx = &cn->vtpci_vqs[idx];
virtqueue_free(vqx->vtv_vq);
vqx->vtv_vq = NULL;
}
free(cn->vtpci_vqs, M_DEVBUF);
cn->vtpci_vqs = NULL;
cn->vtpci_nvqs = 0;
}
void
vtpci_release_child_resources(struct vtpci_common *cn)
{
vtpci_free_interrupts(cn);
vtpci_free_virtqueues(cn);
}
static void
vtpci_cleanup_setup_intr_attempt(struct vtpci_common *cn)
{
int idx;
if (cn->vtpci_flags & VTPCI_FLAG_MSIX) {
vtpci_register_cfg_msix(cn, NULL);
for (idx = 0; idx < cn->vtpci_nvqs; idx++)
vtpci_register_vq_msix(cn, idx, NULL);
}
vtpci_free_interrupts(cn);
}
static int
vtpci_alloc_intr_resources(struct vtpci_common *cn)
{
struct vtpci_interrupt *intr;
int i, rid, flags, nvq_intrs, error;
flags = RF_ACTIVE;
if (cn->vtpci_flags & VTPCI_FLAG_INTX) {
rid = 0;
flags |= RF_SHAREABLE;
} else
rid = 1;
/*
* When using INTX or MSI interrupts, this resource handles all
* interrupts. When using MSIX, this resource handles just the
* configuration changed interrupt.
*/
intr = &cn->vtpci_device_interrupt;
error = vtpci_alloc_interrupt(cn, rid, flags, intr);
if (error || cn->vtpci_flags & (VTPCI_FLAG_INTX | VTPCI_FLAG_MSI))
return (error);
/*
* Now allocate the interrupts for the virtqueues. This may be one
* for all the virtqueues, or one for each virtqueue. Subtract one
* below for because of the configuration changed interrupt.
*/
nvq_intrs = cn->vtpci_nmsix_resources - 1;
cn->vtpci_msix_vq_interrupts = malloc(nvq_intrs *
sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO);
if (cn->vtpci_msix_vq_interrupts == NULL)
return (ENOMEM);
intr = cn->vtpci_msix_vq_interrupts;
for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) {
error = vtpci_alloc_interrupt(cn, rid, flags, intr);
if (error)
return (error);
}
return (0);
}
static int
vtpci_setup_intx_interrupt(struct vtpci_common *cn, enum intr_type type)
{
struct vtpci_interrupt *intr;
int error;
intr = &cn->vtpci_device_interrupt;
error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type, NULL,
vtpci_intx_intr, cn, &intr->vti_handler);
return (error);
}
static int
vtpci_setup_pervq_msix_interrupts(struct vtpci_common *cn, enum intr_type type)
{
struct vtpci_virtqueue *vqx;
struct vtpci_interrupt *intr;
int i, error;
intr = cn->vtpci_msix_vq_interrupts;
for (i = 0; i < cn->vtpci_nvqs; i++) {
vqx = &cn->vtpci_vqs[i];
if (vqx->vtv_no_intr)
continue;
error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type,
vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq,
&intr->vti_handler);
if (error)
return (error);
intr++;
}
return (0);
}
static int
vtpci_set_host_msix_vectors(struct vtpci_common *cn)
{
struct vtpci_interrupt *intr, *tintr;
int idx, error;
intr = &cn->vtpci_device_interrupt;
error = vtpci_register_cfg_msix(cn, intr);
if (error)
return (error);
intr = cn->vtpci_msix_vq_interrupts;
for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
if (cn->vtpci_vqs[idx].vtv_no_intr)
tintr = NULL;
else
tintr = intr;
error = vtpci_register_vq_msix(cn, idx, tintr);
if (error)
break;
/*
* For shared MSIX, all the virtqueues share the first
* interrupt.
*/
if (!cn->vtpci_vqs[idx].vtv_no_intr &&
(cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0)
intr++;
}
return (error);
}
static int
vtpci_setup_msix_interrupts(struct vtpci_common *cn, enum intr_type type)
{
struct vtpci_interrupt *intr;
int error;
intr = &cn->vtpci_device_interrupt;
error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type, NULL,
vtpci_config_intr, cn, &intr->vti_handler);
if (error)
return (error);
if (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) {
intr = &cn->vtpci_msix_vq_interrupts[0];
error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type,
vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, cn,
&intr->vti_handler);
} else
error = vtpci_setup_pervq_msix_interrupts(cn, type);
return (error ? error : vtpci_set_host_msix_vectors(cn));
}
static int
vtpci_setup_intrs(struct vtpci_common *cn, enum intr_type type)
{
int error;
type |= INTR_MPSAFE;
KASSERT(cn->vtpci_flags & VTPCI_FLAG_ITYPE_MASK,
("%s: no interrupt type selected %#x", __func__, cn->vtpci_flags));
error = vtpci_alloc_intr_resources(cn);
if (error)
return (error);
if (cn->vtpci_flags & VTPCI_FLAG_INTX)
error = vtpci_setup_intx_interrupt(cn, type);
else if (cn->vtpci_flags & VTPCI_FLAG_MSI)
error = vtpci_setup_msi_interrupt(cn, type);
else
error = vtpci_setup_msix_interrupts(cn, type);
return (error);
}
int
vtpci_setup_interrupts(struct vtpci_common *cn, enum intr_type type)
{
device_t dev;
int attempt, error;
dev = cn->vtpci_dev;
for (attempt = 0; attempt < 5; attempt++) {
/*
* Start with the most desirable interrupt configuration and
* fallback towards less desirable ones.
*/
switch (attempt) {
case 0:
error = vtpci_alloc_intr_msix_pervq(cn);
break;
case 1:
error = vtpci_alloc_intr_msix_shared(cn);
break;
case 2:
error = vtpci_alloc_intr_msi(cn);
break;
case 3:
error = vtpci_alloc_intr_intx(cn);
break;
default:
device_printf(dev,
"exhausted all interrupt allocation attempts\n");
return (ENXIO);
}
if (error == 0 && vtpci_setup_intrs(cn, type) == 0)
break;
vtpci_cleanup_setup_intr_attempt(cn);
}
if (bootverbose) {
if (cn->vtpci_flags & VTPCI_FLAG_INTX)
device_printf(dev, "using legacy interrupt\n");
else if (cn->vtpci_flags & VTPCI_FLAG_MSI)
device_printf(dev, "using MSI interrupt\n");
else if (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX)
device_printf(dev, "using shared MSIX interrupts\n");
else
device_printf(dev, "using per VQ MSIX interrupts\n");
}
return (0);
}
static int
vtpci_reinit_virtqueue(struct vtpci_common *cn, int idx)
{
struct vtpci_virtqueue *vqx;
struct virtqueue *vq;
int error;
vqx = &cn->vtpci_vqs[idx];
vq = vqx->vtv_vq;
KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
error = virtqueue_reinit(vq, vtpci_get_vq_size(cn, idx));
if (error == 0)
vtpci_set_vq(cn, vq);
return (error);
}
static void
vtpci_intx_intr(void *xcn)
{
struct vtpci_common *cn;
struct vtpci_virtqueue *vqx;
int i;
uint8_t isr;
cn = xcn;
isr = vtpci_read_isr(cn);
if (isr & VIRTIO_PCI_ISR_CONFIG)
vtpci_config_intr(cn);
if (isr & VIRTIO_PCI_ISR_INTR) {
vqx = &cn->vtpci_vqs[0];
for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) {
if (vqx->vtv_no_intr == 0)
virtqueue_intr(vqx->vtv_vq);
}
}
}
static int
vtpci_vq_shared_intr_filter(void *xcn)
{
struct vtpci_common *cn;
struct vtpci_virtqueue *vqx;
int i, rc;
cn = xcn;
vqx = &cn->vtpci_vqs[0];
rc = 0;
for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) {
if (vqx->vtv_no_intr == 0)
rc |= virtqueue_intr_filter(vqx->vtv_vq);
}
return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
}
static void
vtpci_vq_shared_intr(void *xcn)
{
struct vtpci_common *cn;
struct vtpci_virtqueue *vqx;
int i;
cn = xcn;
vqx = &cn->vtpci_vqs[0];
for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) {
if (vqx->vtv_no_intr == 0)
virtqueue_intr(vqx->vtv_vq);
}
}
static int
vtpci_vq_intr_filter(void *xvq)
{
struct virtqueue *vq;
int rc;
vq = xvq;
rc = virtqueue_intr_filter(vq);
return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
}
static void
vtpci_vq_intr(void *xvq)
{
struct virtqueue *vq;
vq = xvq;
virtqueue_intr(vq);
}
static void
vtpci_config_intr(void *xcn)
{
struct vtpci_common *cn;
device_t child;
cn = xcn;
child = cn->vtpci_child_dev;
if (child != NULL)
VIRTIO_CONFIG_CHANGE(child);
}
static int
vtpci_feature_sysctl(struct sysctl_req *req, struct vtpci_common *cn,
uint64_t features)
{
struct sbuf *sb;
int error;
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
error = virtio_describe_sbuf(sb, features, cn->vtpci_child_feat_desc);
sbuf_delete(sb);
return (error);
}
static int
vtpci_host_features_sysctl(SYSCTL_HANDLER_ARGS)
{
struct vtpci_common *cn;
cn = arg1;
return (vtpci_feature_sysctl(req, cn, cn->vtpci_host_features));
}
static int
vtpci_negotiated_features_sysctl(SYSCTL_HANDLER_ARGS)
{
struct vtpci_common *cn;
cn = arg1;
return (vtpci_feature_sysctl(req, cn, cn->vtpci_features));
}
static void
vtpci_setup_sysctl(struct vtpci_common *cn)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
dev = cn->vtpci_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nvqs",
CTLFLAG_RD, &cn->vtpci_nvqs, 0, "Number of virtqueues");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "host_features",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, cn, 0,
vtpci_host_features_sysctl, "A", "Features supported by the host");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "negotiated_features",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, cn, 0,
vtpci_negotiated_features_sysctl, "A", "Features negotiated");
}
diff --git a/sys/dev/virtio/pci/virtio_pci.h b/sys/dev/virtio/pci/virtio_pci.h
index 2d123ebdbb53..401cba25bbf4 100644
--- a/sys/dev/virtio/pci/virtio_pci.h
+++ b/sys/dev/virtio/pci/virtio_pci.h
@@ -1,130 +1,130 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2017, Bryan Venteicher
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _VIRTIO_PCI_H
#define _VIRTIO_PCI_H
struct vtpci_interrupt {
struct resource *vti_irq;
int vti_rid;
void *vti_handler;
};
struct vtpci_virtqueue {
struct virtqueue *vtv_vq;
int vtv_no_intr;
int vtv_notify_offset;
};
struct vtpci_common {
device_t vtpci_dev;
uint64_t vtpci_host_features;
uint64_t vtpci_features;
struct vtpci_virtqueue *vtpci_vqs;
int vtpci_nvqs;
uint32_t vtpci_flags;
#define VTPCI_FLAG_NO_MSI 0x0001
#define VTPCI_FLAG_NO_MSIX 0x0002
#define VTPCI_FLAG_MODERN 0x0004
#define VTPCI_FLAG_INTX 0x1000
#define VTPCI_FLAG_MSI 0x2000
#define VTPCI_FLAG_MSIX 0x4000
#define VTPCI_FLAG_SHARED_MSIX 0x8000
#define VTPCI_FLAG_ITYPE_MASK 0xF000
/* The VirtIO PCI "bus" will only ever have one child. */
device_t vtpci_child_dev;
struct virtio_feature_desc *vtpci_child_feat_desc;
/*
* Ideally, each virtqueue that the driver provides a callback for will
* receive its own MSIX vector. If there are not sufficient vectors
* available, then attempt to have all the VQs share one vector. For
* MSIX, the configuration changed notifications must be on their own
* vector.
*
* If MSIX is not available, attempt to have the whole device share
* one MSI vector, and then, finally, one intx interrupt.
*/
struct vtpci_interrupt vtpci_device_interrupt;
struct vtpci_interrupt *vtpci_msix_vq_interrupts;
int vtpci_nmsix_resources;
};
extern int vtpci_disable_msix;
static inline device_t
vtpci_child_device(struct vtpci_common *cn)
{
return (cn->vtpci_child_dev);
}
static inline bool
vtpci_is_msix_available(struct vtpci_common *cn)
{
return ((cn->vtpci_flags & VTPCI_FLAG_NO_MSIX) == 0);
}
static inline bool
vtpci_is_msix_enabled(struct vtpci_common *cn)
{
return ((cn->vtpci_flags & VTPCI_FLAG_MSIX) != 0);
}
static inline bool
vtpci_is_modern(struct vtpci_common *cn)
{
return ((cn->vtpci_flags & VTPCI_FLAG_MODERN) != 0);
}
static inline int
vtpci_virtqueue_count(struct vtpci_common *cn)
{
return (cn->vtpci_nvqs);
}
void vtpci_init(struct vtpci_common *cn, device_t dev, bool modern);
int vtpci_add_child(struct vtpci_common *cn);
int vtpci_delete_child(struct vtpci_common *cn);
void vtpci_child_detached(struct vtpci_common *cn);
int vtpci_reinit(struct vtpci_common *cn);
uint64_t vtpci_negotiate_features(struct vtpci_common *cn,
uint64_t child_features, uint64_t host_features);
-int vtpci_with_feature(struct vtpci_common *cn, uint64_t feature);
+bool vtpci_with_feature(struct vtpci_common *cn, uint64_t feature);
int vtpci_read_ivar(struct vtpci_common *cn, int index, uintptr_t *result);
int vtpci_write_ivar(struct vtpci_common *cn, int index, uintptr_t value);
int vtpci_alloc_virtqueues(struct vtpci_common *cn, int flags, int nvqs,
struct vq_alloc_info *vq_info);
int vtpci_setup_interrupts(struct vtpci_common *cn, enum intr_type type);
void vtpci_release_child_resources(struct vtpci_common *cn);
#endif /* _VIRTIO_PCI_H */
diff --git a/sys/dev/virtio/pci/virtio_pci_legacy.c b/sys/dev/virtio/pci/virtio_pci_legacy.c
index 88a8323bdd6f..72d637fb0f55 100644
--- a/sys/dev/virtio/pci/virtio_pci_legacy.c
+++ b/sys/dev/virtio/pci/virtio_pci_legacy.c
@@ -1,767 +1,767 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2011, Bryan Venteicher
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for the legacy VirtIO PCI interface. */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "virtio_bus_if.h"
#include "virtio_pci_if.h"
#include "virtio_if.h"
struct vtpci_legacy_softc {
device_t vtpci_dev;
struct vtpci_common vtpci_common;
int vtpci_res_type;
struct resource *vtpci_res;
struct resource *vtpci_msix_table_res;
struct resource *vtpci_msix_pba_res;
};
static int vtpci_legacy_probe(device_t);
static int vtpci_legacy_attach(device_t);
static int vtpci_legacy_detach(device_t);
static int vtpci_legacy_suspend(device_t);
static int vtpci_legacy_resume(device_t);
static int vtpci_legacy_shutdown(device_t);
static void vtpci_legacy_driver_added(device_t, driver_t *);
static void vtpci_legacy_child_detached(device_t, device_t);
static int vtpci_legacy_read_ivar(device_t, device_t, int, uintptr_t *);
static int vtpci_legacy_write_ivar(device_t, device_t, int, uintptr_t);
static uint8_t vtpci_legacy_read_isr(device_t);
static uint16_t vtpci_legacy_get_vq_size(device_t, int);
static bus_size_t vtpci_legacy_get_vq_notify_off(device_t, int);
static void vtpci_legacy_set_vq(device_t, struct virtqueue *);
static void vtpci_legacy_disable_vq(device_t, int);
static int vtpci_legacy_register_cfg_msix(device_t,
struct vtpci_interrupt *);
static int vtpci_legacy_register_vq_msix(device_t, int idx,
struct vtpci_interrupt *);
static uint64_t vtpci_legacy_negotiate_features(device_t, uint64_t);
-static int vtpci_legacy_with_feature(device_t, uint64_t);
+static bool vtpci_legacy_with_feature(device_t, uint64_t);
static int vtpci_legacy_alloc_virtqueues(device_t, int, int,
struct vq_alloc_info *);
static int vtpci_legacy_setup_interrupts(device_t, enum intr_type);
static void vtpci_legacy_stop(device_t);
static int vtpci_legacy_reinit(device_t, uint64_t);
static void vtpci_legacy_reinit_complete(device_t);
static void vtpci_legacy_notify_vq(device_t, uint16_t, bus_size_t);
static void vtpci_legacy_read_dev_config(device_t, bus_size_t, void *, int);
static void vtpci_legacy_write_dev_config(device_t, bus_size_t, const void *, int);
static bool vtpci_legacy_setup_msix(struct vtpci_legacy_softc *sc);
static void vtpci_legacy_teardown_msix(struct vtpci_legacy_softc *sc);
static int vtpci_legacy_alloc_resources(struct vtpci_legacy_softc *);
static void vtpci_legacy_free_resources(struct vtpci_legacy_softc *);
static void vtpci_legacy_probe_and_attach_child(struct vtpci_legacy_softc *);
static uint8_t vtpci_legacy_get_status(struct vtpci_legacy_softc *);
static void vtpci_legacy_set_status(struct vtpci_legacy_softc *, uint8_t);
static void vtpci_legacy_select_virtqueue(struct vtpci_legacy_softc *, int);
static void vtpci_legacy_reset(struct vtpci_legacy_softc *);
#define VIRTIO_PCI_LEGACY_CONFIG(_sc) \
VIRTIO_PCI_CONFIG_OFF(vtpci_is_msix_enabled(&(_sc)->vtpci_common))
#define vtpci_legacy_read_config_1(sc, o) \
bus_read_1((sc)->vtpci_res, (o))
#define vtpci_legacy_write_config_1(sc, o, v) \
bus_write_1((sc)->vtpci_res, (o), (v))
/*
* VirtIO specifies that PCI Configuration area is guest endian. However,
* since PCI devices are inherently little-endian, on big-endian systems
* the bus layer transparently converts it to BE. For virtio-legacy, this
* conversion is undesired, so an extra byte swap is required to fix it.
*/
#define vtpci_legacy_read_config_2(sc, o) \
le16toh(bus_read_2((sc)->vtpci_res, (o)))
#define vtpci_legacy_read_config_4(sc, o) \
le32toh(bus_read_4((sc)->vtpci_res, (o)))
#define vtpci_legacy_write_config_2(sc, o, v) \
bus_write_2((sc)->vtpci_res, (o), (htole16(v)))
#define vtpci_legacy_write_config_4(sc, o, v) \
bus_write_4((sc)->vtpci_res, (o), (htole32(v)))
/* PCI Header LE. On BE systems the bus layer takes care of byte swapping. */
#define vtpci_legacy_read_header_2(sc, o) \
bus_read_2((sc)->vtpci_res, (o))
#define vtpci_legacy_read_header_4(sc, o) \
bus_read_4((sc)->vtpci_res, (o))
#define vtpci_legacy_write_header_2(sc, o, v) \
bus_write_2((sc)->vtpci_res, (o), (v))
#define vtpci_legacy_write_header_4(sc, o, v) \
bus_write_4((sc)->vtpci_res, (o), (v))
static device_method_t vtpci_legacy_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vtpci_legacy_probe),
DEVMETHOD(device_attach, vtpci_legacy_attach),
DEVMETHOD(device_detach, vtpci_legacy_detach),
DEVMETHOD(device_suspend, vtpci_legacy_suspend),
DEVMETHOD(device_resume, vtpci_legacy_resume),
DEVMETHOD(device_shutdown, vtpci_legacy_shutdown),
/* Bus interface. */
DEVMETHOD(bus_driver_added, vtpci_legacy_driver_added),
DEVMETHOD(bus_child_detached, vtpci_legacy_child_detached),
DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo),
DEVMETHOD(bus_read_ivar, vtpci_legacy_read_ivar),
DEVMETHOD(bus_write_ivar, vtpci_legacy_write_ivar),
/* VirtIO PCI interface. */
DEVMETHOD(virtio_pci_read_isr, vtpci_legacy_read_isr),
DEVMETHOD(virtio_pci_get_vq_size, vtpci_legacy_get_vq_size),
DEVMETHOD(virtio_pci_get_vq_notify_off, vtpci_legacy_get_vq_notify_off),
DEVMETHOD(virtio_pci_set_vq, vtpci_legacy_set_vq),
DEVMETHOD(virtio_pci_disable_vq, vtpci_legacy_disable_vq),
DEVMETHOD(virtio_pci_register_cfg_msix, vtpci_legacy_register_cfg_msix),
DEVMETHOD(virtio_pci_register_vq_msix, vtpci_legacy_register_vq_msix),
/* VirtIO bus interface. */
DEVMETHOD(virtio_bus_negotiate_features, vtpci_legacy_negotiate_features),
DEVMETHOD(virtio_bus_with_feature, vtpci_legacy_with_feature),
DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_legacy_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtpci_legacy_setup_interrupts),
DEVMETHOD(virtio_bus_stop, vtpci_legacy_stop),
DEVMETHOD(virtio_bus_reinit, vtpci_legacy_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtpci_legacy_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtpci_legacy_notify_vq),
DEVMETHOD(virtio_bus_read_device_config, vtpci_legacy_read_dev_config),
DEVMETHOD(virtio_bus_write_device_config, vtpci_legacy_write_dev_config),
DEVMETHOD_END
};
static driver_t vtpci_legacy_driver = {
.name = "virtio_pci",
.methods = vtpci_legacy_methods,
.size = sizeof(struct vtpci_legacy_softc)
};
DRIVER_MODULE(virtio_pci_legacy, pci, vtpci_legacy_driver, 0, 0);
static int
vtpci_legacy_probe(device_t dev)
{
char desc[64];
const char *name;
if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
return (ENXIO);
if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
pci_get_device(dev) > VIRTIO_PCI_DEVICEID_LEGACY_MAX)
return (ENXIO);
if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
return (ENXIO);
name = virtio_device_name(pci_get_subdevice(dev));
if (name == NULL)
name = "Unknown";
snprintf(desc, sizeof(desc), "VirtIO PCI (legacy) %s adapter", name);
device_set_desc_copy(dev, desc);
/* Prefer transitional modern VirtIO PCI. */
return (BUS_PROBE_LOW_PRIORITY);
}
static int
vtpci_legacy_attach(device_t dev)
{
struct vtpci_legacy_softc *sc;
int error;
sc = device_get_softc(dev);
sc->vtpci_dev = dev;
vtpci_init(&sc->vtpci_common, dev, false);
error = vtpci_legacy_alloc_resources(sc);
if (error) {
device_printf(dev, "cannot map I/O space nor memory space\n");
return (error);
}
if (vtpci_is_msix_available(&sc->vtpci_common) &&
!vtpci_legacy_setup_msix(sc)) {
device_printf(dev, "cannot setup MSI-x resources\n");
error = ENXIO;
goto fail;
}
vtpci_legacy_reset(sc);
/* Tell the host we've noticed this device. */
vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
error = vtpci_add_child(&sc->vtpci_common);
if (error)
goto fail;
vtpci_legacy_probe_and_attach_child(sc);
return (0);
fail:
vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
vtpci_legacy_detach(dev);
return (error);
}
static int
vtpci_legacy_detach(device_t dev)
{
struct vtpci_legacy_softc *sc;
int error;
sc = device_get_softc(dev);
error = vtpci_delete_child(&sc->vtpci_common);
if (error)
return (error);
vtpci_legacy_reset(sc);
vtpci_legacy_teardown_msix(sc);
vtpci_legacy_free_resources(sc);
return (0);
}
static int
vtpci_legacy_suspend(device_t dev)
{
return (bus_generic_suspend(dev));
}
static int
vtpci_legacy_resume(device_t dev)
{
return (bus_generic_resume(dev));
}
static int
vtpci_legacy_shutdown(device_t dev)
{
(void) bus_generic_shutdown(dev);
/* Forcibly stop the host device. */
vtpci_legacy_stop(dev);
return (0);
}
static void
vtpci_legacy_driver_added(device_t dev, driver_t *driver)
{
vtpci_legacy_probe_and_attach_child(device_get_softc(dev));
}
static void
vtpci_legacy_child_detached(device_t dev, device_t child)
{
struct vtpci_legacy_softc *sc;
sc = device_get_softc(dev);
vtpci_legacy_reset(sc);
vtpci_child_detached(&sc->vtpci_common);
/* After the reset, retell the host we've noticed this device. */
vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
}
static int
vtpci_legacy_read_ivar(device_t dev, device_t child, int index,
uintptr_t *result)
{
struct vtpci_legacy_softc *sc;
struct vtpci_common *cn;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
if (vtpci_child_device(cn) != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_DEVTYPE:
*result = pci_get_subdevice(dev);
break;
default:
return (vtpci_read_ivar(cn, index, result));
}
return (0);
}
static int
vtpci_legacy_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
{
struct vtpci_legacy_softc *sc;
struct vtpci_common *cn;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
if (vtpci_child_device(cn) != child)
return (ENOENT);
switch (index) {
default:
return (vtpci_write_ivar(cn, index, value));
}
return (0);
}
static uint64_t
vtpci_legacy_negotiate_features(device_t dev, uint64_t child_features)
{
struct vtpci_legacy_softc *sc;
uint64_t host_features, features;
sc = device_get_softc(dev);
host_features = vtpci_legacy_read_header_4(sc, VIRTIO_PCI_HOST_FEATURES);
features = vtpci_negotiate_features(&sc->vtpci_common,
child_features, host_features);
vtpci_legacy_write_header_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
return (features);
}
-static int
+static bool
vtpci_legacy_with_feature(device_t dev, uint64_t feature)
{
struct vtpci_legacy_softc *sc;
sc = device_get_softc(dev);
return (vtpci_with_feature(&sc->vtpci_common, feature));
}
static int
vtpci_legacy_alloc_virtqueues(device_t dev, int flags, int nvqs,
struct vq_alloc_info *vq_info)
{
struct vtpci_legacy_softc *sc;
struct vtpci_common *cn;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
return (vtpci_alloc_virtqueues(cn, flags, nvqs, vq_info));
}
static int
vtpci_legacy_setup_interrupts(device_t dev, enum intr_type type)
{
struct vtpci_legacy_softc *sc;
sc = device_get_softc(dev);
return (vtpci_setup_interrupts(&sc->vtpci_common, type));
}
static void
vtpci_legacy_stop(device_t dev)
{
vtpci_legacy_reset(device_get_softc(dev));
}
static int
vtpci_legacy_reinit(device_t dev, uint64_t features)
{
struct vtpci_legacy_softc *sc;
struct vtpci_common *cn;
int error;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
/*
* Redrive the device initialization. This is a bit of an abuse of
* the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
* play nice.
*
* We do not allow the host device to change from what was originally
* negotiated beyond what the guest driver changed. MSIX state should
* not change, number of virtqueues and their size remain the same, etc.
* This will need to be rethought when we want to support migration.
*/
if (vtpci_legacy_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
vtpci_legacy_stop(dev);
/*
* Quickly drive the status through ACK and DRIVER. The device does
* not become usable again until DRIVER_OK in reinit complete.
*/
vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
vtpci_legacy_negotiate_features(dev, features);
error = vtpci_reinit(cn);
if (error)
return (error);
return (0);
}
static void
vtpci_legacy_reinit_complete(device_t dev)
{
struct vtpci_legacy_softc *sc;
sc = device_get_softc(dev);
vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
}
static void
vtpci_legacy_notify_vq(device_t dev, uint16_t queue, bus_size_t offset)
{
struct vtpci_legacy_softc *sc;
sc = device_get_softc(dev);
MPASS(offset == VIRTIO_PCI_QUEUE_NOTIFY);
vtpci_legacy_write_header_2(sc, offset, queue);
}
static uint8_t
vtpci_legacy_get_status(struct vtpci_legacy_softc *sc)
{
return (vtpci_legacy_read_config_1(sc, VIRTIO_PCI_STATUS));
}
static void
vtpci_legacy_set_status(struct vtpci_legacy_softc *sc, uint8_t status)
{
if (status != VIRTIO_CONFIG_STATUS_RESET)
status |= vtpci_legacy_get_status(sc);
vtpci_legacy_write_config_1(sc, VIRTIO_PCI_STATUS, status);
}
static void
vtpci_legacy_read_dev_config(device_t dev, bus_size_t offset,
void *dst, int length)
{
struct vtpci_legacy_softc *sc;
bus_size_t off;
uint8_t *d;
int i;
sc = device_get_softc(dev);
off = VIRTIO_PCI_LEGACY_CONFIG(sc) + offset;
d = dst;
for (i = 0; i < length; i++) {
d[i] = vtpci_legacy_read_config_1(sc, off + i);
}
}
static void
vtpci_legacy_write_dev_config(device_t dev, bus_size_t offset,
const void *src, int length)
{
struct vtpci_legacy_softc *sc;
bus_size_t off;
const uint8_t *s;
int i;
sc = device_get_softc(dev);
off = VIRTIO_PCI_LEGACY_CONFIG(sc) + offset;
s = src;
for (i = 0; i < length; i++) {
vtpci_legacy_write_config_1(sc, off + i, s[i]);
}
}
static bool
vtpci_legacy_setup_msix(struct vtpci_legacy_softc *sc)
{
device_t dev;
int rid, table_rid;
dev = sc->vtpci_dev;
rid = table_rid = pci_msix_table_bar(dev);
if (rid != PCIR_BAR(0)) {
sc->vtpci_msix_table_res = bus_alloc_resource_any(
dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (sc->vtpci_msix_table_res == NULL)
return (false);
}
rid = pci_msix_pba_bar(dev);
if (rid != table_rid && rid != PCIR_BAR(0)) {
sc->vtpci_msix_pba_res = bus_alloc_resource_any(
dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (sc->vtpci_msix_pba_res == NULL)
return (false);
}
return (true);
}
static void
vtpci_legacy_teardown_msix(struct vtpci_legacy_softc *sc)
{
device_t dev;
dev = sc->vtpci_dev;
if (sc->vtpci_msix_pba_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->vtpci_msix_pba_res),
sc->vtpci_msix_pba_res);
sc->vtpci_msix_pba_res = NULL;
}
if (sc->vtpci_msix_table_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->vtpci_msix_table_res),
sc->vtpci_msix_table_res);
sc->vtpci_msix_table_res = NULL;
}
}
static int
vtpci_legacy_alloc_resources(struct vtpci_legacy_softc *sc)
{
const int res_types[] = { SYS_RES_IOPORT, SYS_RES_MEMORY };
device_t dev;
int rid, i;
dev = sc->vtpci_dev;
/*
* Most hypervisors export the common configuration structure in IO
* space, but some use memory space; try both.
*/
for (i = 0; nitems(res_types); i++) {
rid = PCIR_BAR(0);
sc->vtpci_res_type = res_types[i];
sc->vtpci_res = bus_alloc_resource_any(dev, res_types[i], &rid,
RF_ACTIVE);
if (sc->vtpci_res != NULL)
break;
}
if (sc->vtpci_res == NULL)
return (ENXIO);
return (0);
}
static void
vtpci_legacy_free_resources(struct vtpci_legacy_softc *sc)
{
device_t dev;
dev = sc->vtpci_dev;
if (sc->vtpci_res != NULL) {
bus_release_resource(dev, sc->vtpci_res_type, PCIR_BAR(0),
sc->vtpci_res);
sc->vtpci_res = NULL;
}
}
static void
vtpci_legacy_probe_and_attach_child(struct vtpci_legacy_softc *sc)
{
device_t dev, child;
dev = sc->vtpci_dev;
child = vtpci_child_device(&sc->vtpci_common);
if (child == NULL || device_get_state(child) != DS_NOTPRESENT)
return;
if (device_probe(child) != 0)
return;
vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
if (device_attach(child) != 0) {
vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
/* Reset status for future attempt. */
vtpci_legacy_child_detached(dev, child);
} else {
vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
VIRTIO_ATTACH_COMPLETED(child);
}
}
static int
vtpci_legacy_register_msix(struct vtpci_legacy_softc *sc, int offset,
struct vtpci_interrupt *intr)
{
uint16_t vector;
if (intr != NULL) {
/* Map from guest rid to host vector. */
vector = intr->vti_rid - 1;
} else
vector = VIRTIO_MSI_NO_VECTOR;
vtpci_legacy_write_header_2(sc, offset, vector);
return (vtpci_legacy_read_header_2(sc, offset) == vector ? 0 : ENODEV);
}
static int
vtpci_legacy_register_cfg_msix(device_t dev, struct vtpci_interrupt *intr)
{
struct vtpci_legacy_softc *sc;
int error;
sc = device_get_softc(dev);
error = vtpci_legacy_register_msix(sc, VIRTIO_MSI_CONFIG_VECTOR, intr);
if (error) {
device_printf(dev,
"unable to register config MSIX interrupt\n");
return (error);
}
return (0);
}
static int
vtpci_legacy_register_vq_msix(device_t dev, int idx,
struct vtpci_interrupt *intr)
{
struct vtpci_legacy_softc *sc;
int error;
sc = device_get_softc(dev);
vtpci_legacy_select_virtqueue(sc, idx);
error = vtpci_legacy_register_msix(sc, VIRTIO_MSI_QUEUE_VECTOR, intr);
if (error) {
device_printf(dev,
"unable to register virtqueue MSIX interrupt\n");
return (error);
}
return (0);
}
static void
vtpci_legacy_reset(struct vtpci_legacy_softc *sc)
{
/*
* Setting the status to RESET sets the host device to the
* original, uninitialized state.
*/
vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_RESET);
(void) vtpci_legacy_get_status(sc);
}
static void
vtpci_legacy_select_virtqueue(struct vtpci_legacy_softc *sc, int idx)
{
vtpci_legacy_write_header_2(sc, VIRTIO_PCI_QUEUE_SEL, idx);
}
static uint8_t
vtpci_legacy_read_isr(device_t dev)
{
struct vtpci_legacy_softc *sc;
sc = device_get_softc(dev);
return (vtpci_legacy_read_config_1(sc, VIRTIO_PCI_ISR));
}
static uint16_t
vtpci_legacy_get_vq_size(device_t dev, int idx)
{
struct vtpci_legacy_softc *sc;
sc = device_get_softc(dev);
vtpci_legacy_select_virtqueue(sc, idx);
return (vtpci_legacy_read_header_2(sc, VIRTIO_PCI_QUEUE_NUM));
}
static bus_size_t
vtpci_legacy_get_vq_notify_off(device_t dev, int idx)
{
return (VIRTIO_PCI_QUEUE_NOTIFY);
}
static void
vtpci_legacy_set_vq(device_t dev, struct virtqueue *vq)
{
struct vtpci_legacy_softc *sc;
sc = device_get_softc(dev);
vtpci_legacy_select_virtqueue(sc, virtqueue_index(vq));
vtpci_legacy_write_header_4(sc, VIRTIO_PCI_QUEUE_PFN,
virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
}
static void
vtpci_legacy_disable_vq(device_t dev, int idx)
{
struct vtpci_legacy_softc *sc;
sc = device_get_softc(dev);
vtpci_legacy_select_virtqueue(sc, idx);
vtpci_legacy_write_header_4(sc, VIRTIO_PCI_QUEUE_PFN, 0);
}
diff --git a/sys/dev/virtio/pci/virtio_pci_modern.c b/sys/dev/virtio/pci/virtio_pci_modern.c
index 2fa921dd9141..84d9511798e0 100644
--- a/sys/dev/virtio/pci/virtio_pci_modern.c
+++ b/sys/dev/virtio/pci/virtio_pci_modern.c
@@ -1,1445 +1,1445 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2017, Bryan Venteicher
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for the modern VirtIO PCI interface. */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "virtio_bus_if.h"
#include "virtio_pci_if.h"
#include "virtio_if.h"
struct vtpci_modern_resource_map {
struct resource_map vtrm_map;
int vtrm_cap_offset;
int vtrm_bar;
int vtrm_offset;
int vtrm_length;
int vtrm_type; /* SYS_RES_{MEMORY, IOPORT} */
};
struct vtpci_modern_bar_resource {
struct resource *vtbr_res;
int vtbr_type;
};
struct vtpci_modern_softc {
device_t vtpci_dev;
struct vtpci_common vtpci_common;
uint32_t vtpci_notify_offset_multiplier;
uint16_t vtpci_devid;
int vtpci_msix_bar;
struct resource *vtpci_msix_res;
struct vtpci_modern_resource_map vtpci_common_res_map;
struct vtpci_modern_resource_map vtpci_notify_res_map;
struct vtpci_modern_resource_map vtpci_isr_res_map;
struct vtpci_modern_resource_map vtpci_device_res_map;
#define VTPCI_MODERN_MAX_BARS 6
struct vtpci_modern_bar_resource vtpci_bar_res[VTPCI_MODERN_MAX_BARS];
};
static int vtpci_modern_probe(device_t);
static int vtpci_modern_attach(device_t);
static int vtpci_modern_detach(device_t);
static int vtpci_modern_suspend(device_t);
static int vtpci_modern_resume(device_t);
static int vtpci_modern_shutdown(device_t);
static void vtpci_modern_driver_added(device_t, driver_t *);
static void vtpci_modern_child_detached(device_t, device_t);
static int vtpci_modern_read_ivar(device_t, device_t, int, uintptr_t *);
static int vtpci_modern_write_ivar(device_t, device_t, int, uintptr_t);
static uint8_t vtpci_modern_read_isr(device_t);
static uint16_t vtpci_modern_get_vq_size(device_t, int);
static bus_size_t vtpci_modern_get_vq_notify_off(device_t, int);
static void vtpci_modern_set_vq(device_t, struct virtqueue *);
static void vtpci_modern_disable_vq(device_t, int);
static int vtpci_modern_register_msix(struct vtpci_modern_softc *, int,
struct vtpci_interrupt *);
static int vtpci_modern_register_cfg_msix(device_t,
struct vtpci_interrupt *);
static int vtpci_modern_register_vq_msix(device_t, int idx,
struct vtpci_interrupt *);
static uint64_t vtpci_modern_negotiate_features(device_t, uint64_t);
static int vtpci_modern_finalize_features(device_t);
-static int vtpci_modern_with_feature(device_t, uint64_t);
+static bool vtpci_modern_with_feature(device_t, uint64_t);
static int vtpci_modern_alloc_virtqueues(device_t, int, int,
struct vq_alloc_info *);
static int vtpci_modern_setup_interrupts(device_t, enum intr_type);
static void vtpci_modern_stop(device_t);
static int vtpci_modern_reinit(device_t, uint64_t);
static void vtpci_modern_reinit_complete(device_t);
static void vtpci_modern_notify_vq(device_t, uint16_t, bus_size_t);
static int vtpci_modern_config_generation(device_t);
static void vtpci_modern_read_dev_config(device_t, bus_size_t, void *, int);
static void vtpci_modern_write_dev_config(device_t, bus_size_t, const void *, int);
static int vtpci_modern_probe_configs(device_t);
static int vtpci_modern_find_cap(device_t, uint8_t, int *);
static int vtpci_modern_map_configs(struct vtpci_modern_softc *);
static void vtpci_modern_unmap_configs(struct vtpci_modern_softc *);
static int vtpci_modern_find_cap_resource(struct vtpci_modern_softc *,
uint8_t, int, int, struct vtpci_modern_resource_map *);
static int vtpci_modern_bar_type(struct vtpci_modern_softc *, int);
static struct resource *vtpci_modern_get_bar_resource(
struct vtpci_modern_softc *, int, int);
static struct resource *vtpci_modern_alloc_bar_resource(
struct vtpci_modern_softc *, int, int);
static void vtpci_modern_free_bar_resources(struct vtpci_modern_softc *);
static int vtpci_modern_alloc_resource_map(struct vtpci_modern_softc *,
struct vtpci_modern_resource_map *);
static void vtpci_modern_free_resource_map(struct vtpci_modern_softc *,
struct vtpci_modern_resource_map *);
static void vtpci_modern_alloc_msix_resource(struct vtpci_modern_softc *);
static void vtpci_modern_free_msix_resource(struct vtpci_modern_softc *);
static void vtpci_modern_probe_and_attach_child(struct vtpci_modern_softc *);
static uint64_t vtpci_modern_read_features(struct vtpci_modern_softc *);
static void vtpci_modern_write_features(struct vtpci_modern_softc *,
uint64_t);
static void vtpci_modern_select_virtqueue(struct vtpci_modern_softc *, int);
static uint8_t vtpci_modern_get_status(struct vtpci_modern_softc *);
static void vtpci_modern_set_status(struct vtpci_modern_softc *, uint8_t);
static void vtpci_modern_reset(struct vtpci_modern_softc *);
static void vtpci_modern_enable_virtqueues(struct vtpci_modern_softc *);
static uint8_t vtpci_modern_read_common_1(struct vtpci_modern_softc *,
bus_size_t);
static uint16_t vtpci_modern_read_common_2(struct vtpci_modern_softc *,
bus_size_t);
static uint32_t vtpci_modern_read_common_4(struct vtpci_modern_softc *,
bus_size_t);
static void vtpci_modern_write_common_1(struct vtpci_modern_softc *,
bus_size_t, uint8_t);
static void vtpci_modern_write_common_2(struct vtpci_modern_softc *,
bus_size_t, uint16_t);
static void vtpci_modern_write_common_4(struct vtpci_modern_softc *,
bus_size_t, uint32_t);
static void vtpci_modern_write_common_8(struct vtpci_modern_softc *,
bus_size_t, uint64_t);
static void vtpci_modern_write_notify_2(struct vtpci_modern_softc *,
bus_size_t, uint16_t);
static uint8_t vtpci_modern_read_isr_1(struct vtpci_modern_softc *,
bus_size_t);
static uint8_t vtpci_modern_read_device_1(struct vtpci_modern_softc *,
bus_size_t);
static uint16_t vtpci_modern_read_device_2(struct vtpci_modern_softc *,
bus_size_t);
static uint32_t vtpci_modern_read_device_4(struct vtpci_modern_softc *,
bus_size_t);
static uint64_t vtpci_modern_read_device_8(struct vtpci_modern_softc *,
bus_size_t);
static void vtpci_modern_write_device_1(struct vtpci_modern_softc *,
bus_size_t, uint8_t);
static void vtpci_modern_write_device_2(struct vtpci_modern_softc *,
bus_size_t, uint16_t);
static void vtpci_modern_write_device_4(struct vtpci_modern_softc *,
bus_size_t, uint32_t);
static void vtpci_modern_write_device_8(struct vtpci_modern_softc *,
bus_size_t, uint64_t);
/* Tunables. */
static int vtpci_modern_transitional = 0;
TUNABLE_INT("hw.virtio.pci.transitional", &vtpci_modern_transitional);
static device_method_t vtpci_modern_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vtpci_modern_probe),
DEVMETHOD(device_attach, vtpci_modern_attach),
DEVMETHOD(device_detach, vtpci_modern_detach),
DEVMETHOD(device_suspend, vtpci_modern_suspend),
DEVMETHOD(device_resume, vtpci_modern_resume),
DEVMETHOD(device_shutdown, vtpci_modern_shutdown),
/* Bus interface. */
DEVMETHOD(bus_driver_added, vtpci_modern_driver_added),
DEVMETHOD(bus_child_detached, vtpci_modern_child_detached),
DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo),
DEVMETHOD(bus_read_ivar, vtpci_modern_read_ivar),
DEVMETHOD(bus_write_ivar, vtpci_modern_write_ivar),
/* VirtIO PCI interface. */
DEVMETHOD(virtio_pci_read_isr, vtpci_modern_read_isr),
DEVMETHOD(virtio_pci_get_vq_size, vtpci_modern_get_vq_size),
DEVMETHOD(virtio_pci_get_vq_notify_off, vtpci_modern_get_vq_notify_off),
DEVMETHOD(virtio_pci_set_vq, vtpci_modern_set_vq),
DEVMETHOD(virtio_pci_disable_vq, vtpci_modern_disable_vq),
DEVMETHOD(virtio_pci_register_cfg_msix, vtpci_modern_register_cfg_msix),
DEVMETHOD(virtio_pci_register_vq_msix, vtpci_modern_register_vq_msix),
/* VirtIO bus interface. */
DEVMETHOD(virtio_bus_negotiate_features, vtpci_modern_negotiate_features),
DEVMETHOD(virtio_bus_finalize_features, vtpci_modern_finalize_features),
DEVMETHOD(virtio_bus_with_feature, vtpci_modern_with_feature),
DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_modern_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtpci_modern_setup_interrupts),
DEVMETHOD(virtio_bus_stop, vtpci_modern_stop),
DEVMETHOD(virtio_bus_reinit, vtpci_modern_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtpci_modern_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtpci_modern_notify_vq),
DEVMETHOD(virtio_bus_config_generation, vtpci_modern_config_generation),
DEVMETHOD(virtio_bus_read_device_config, vtpci_modern_read_dev_config),
DEVMETHOD(virtio_bus_write_device_config, vtpci_modern_write_dev_config),
DEVMETHOD_END
};
static driver_t vtpci_modern_driver = {
.name = "virtio_pci",
.methods = vtpci_modern_methods,
.size = sizeof(struct vtpci_modern_softc)
};
DRIVER_MODULE(virtio_pci_modern, pci, vtpci_modern_driver, 0, 0);
static int
vtpci_modern_probe(device_t dev)
{
char desc[64];
const char *name;
uint16_t devid;
if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
return (ENXIO);
if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MODERN_MAX)
return (ENXIO);
if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MODERN_MIN) {
if (!vtpci_modern_transitional)
return (ENXIO);
devid = pci_get_subdevice(dev);
} else
devid = pci_get_device(dev) - VIRTIO_PCI_DEVICEID_MODERN_MIN;
if (vtpci_modern_probe_configs(dev) != 0)
return (ENXIO);
name = virtio_device_name(devid);
if (name == NULL)
name = "Unknown";
snprintf(desc, sizeof(desc), "VirtIO PCI (modern) %s adapter", name);
device_set_desc_copy(dev, desc);
return (BUS_PROBE_DEFAULT);
}
static int
vtpci_modern_attach(device_t dev)
{
struct vtpci_modern_softc *sc;
int error;
sc = device_get_softc(dev);
sc->vtpci_dev = dev;
vtpci_init(&sc->vtpci_common, dev, true);
if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MODERN_MIN)
sc->vtpci_devid = pci_get_subdevice(dev);
else
sc->vtpci_devid = pci_get_device(dev) -
VIRTIO_PCI_DEVICEID_MODERN_MIN;
error = vtpci_modern_map_configs(sc);
if (error) {
device_printf(dev, "cannot map configs\n");
vtpci_modern_unmap_configs(sc);
return (error);
}
vtpci_modern_reset(sc);
/* Tell the host we've noticed this device. */
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
error = vtpci_add_child(&sc->vtpci_common);
if (error)
goto fail;
vtpci_modern_probe_and_attach_child(sc);
return (0);
fail:
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
vtpci_modern_detach(dev);
return (error);
}
static int
vtpci_modern_detach(device_t dev)
{
struct vtpci_modern_softc *sc;
int error;
sc = device_get_softc(dev);
error = vtpci_delete_child(&sc->vtpci_common);
if (error)
return (error);
vtpci_modern_reset(sc);
vtpci_modern_unmap_configs(sc);
return (0);
}
static int
vtpci_modern_suspend(device_t dev)
{
return (bus_generic_suspend(dev));
}
static int
vtpci_modern_resume(device_t dev)
{
return (bus_generic_resume(dev));
}
static int
vtpci_modern_shutdown(device_t dev)
{
(void) bus_generic_shutdown(dev);
/* Forcibly stop the host device. */
vtpci_modern_stop(dev);
return (0);
}
static void
vtpci_modern_driver_added(device_t dev, driver_t *driver)
{
vtpci_modern_probe_and_attach_child(device_get_softc(dev));
}
static void
vtpci_modern_child_detached(device_t dev, device_t child)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_reset(sc);
vtpci_child_detached(&sc->vtpci_common);
/* After the reset, retell the host we've noticed this device. */
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
}
static int
vtpci_modern_read_ivar(device_t dev, device_t child, int index,
uintptr_t *result)
{
struct vtpci_modern_softc *sc;
struct vtpci_common *cn;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
if (vtpci_child_device(cn) != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_DEVTYPE:
*result = sc->vtpci_devid;
break;
default:
return (vtpci_read_ivar(cn, index, result));
}
return (0);
}
static int
vtpci_modern_write_ivar(device_t dev, device_t child, int index,
uintptr_t value)
{
struct vtpci_modern_softc *sc;
struct vtpci_common *cn;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
if (vtpci_child_device(cn) != child)
return (ENOENT);
switch (index) {
default:
return (vtpci_write_ivar(cn, index, value));
}
return (0);
}
static uint64_t
vtpci_modern_negotiate_features(device_t dev, uint64_t child_features)
{
struct vtpci_modern_softc *sc;
uint64_t host_features, features;
sc = device_get_softc(dev);
host_features = vtpci_modern_read_features(sc);
/*
* Since the driver was added as a child of the modern PCI bus,
* always add the V1 flag.
*/
child_features |= VIRTIO_F_VERSION_1;
features = vtpci_negotiate_features(&sc->vtpci_common,
child_features, host_features);
vtpci_modern_write_features(sc, features);
return (features);
}
static int
vtpci_modern_finalize_features(device_t dev)
{
struct vtpci_modern_softc *sc;
uint8_t status;
sc = device_get_softc(dev);
/*
* Must re-read the status after setting it to verify the negotiated
* features were accepted by the device.
*/
vtpci_modern_set_status(sc, VIRTIO_CONFIG_S_FEATURES_OK);
status = vtpci_modern_get_status(sc);
if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) {
device_printf(dev, "desired features were not accepted\n");
return (ENOTSUP);
}
return (0);
}
-static int
+static bool
vtpci_modern_with_feature(device_t dev, uint64_t feature)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
return (vtpci_with_feature(&sc->vtpci_common, feature));
}
static uint64_t
vtpci_modern_read_features(struct vtpci_modern_softc *sc)
{
uint32_t features0, features1;
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_DFSELECT, 0);
features0 = vtpci_modern_read_common_4(sc, VIRTIO_PCI_COMMON_DF);
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_DFSELECT, 1);
features1 = vtpci_modern_read_common_4(sc, VIRTIO_PCI_COMMON_DF);
return (((uint64_t) features1 << 32) | features0);
}
static void
vtpci_modern_write_features(struct vtpci_modern_softc *sc, uint64_t features)
{
uint32_t features0, features1;
features0 = features;
features1 = features >> 32;
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GFSELECT, 0);
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GF, features0);
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GFSELECT, 1);
vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GF, features1);
}
static int
vtpci_modern_alloc_virtqueues(device_t dev, int flags, int nvqs,
struct vq_alloc_info *vq_info)
{
struct vtpci_modern_softc *sc;
struct vtpci_common *cn;
uint16_t max_nvqs;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
max_nvqs = vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_NUMQ);
if (nvqs > max_nvqs) {
device_printf(sc->vtpci_dev, "requested virtqueue count %d "
"exceeds max %d\n", nvqs, max_nvqs);
return (E2BIG);
}
return (vtpci_alloc_virtqueues(cn, flags, nvqs, vq_info));
}
static int
vtpci_modern_setup_interrupts(device_t dev, enum intr_type type)
{
struct vtpci_modern_softc *sc;
int error;
sc = device_get_softc(dev);
error = vtpci_setup_interrupts(&sc->vtpci_common, type);
if (error == 0)
vtpci_modern_enable_virtqueues(sc);
return (error);
}
static void
vtpci_modern_stop(device_t dev)
{
vtpci_modern_reset(device_get_softc(dev));
}
static int
vtpci_modern_reinit(device_t dev, uint64_t features)
{
struct vtpci_modern_softc *sc;
struct vtpci_common *cn;
int error;
sc = device_get_softc(dev);
cn = &sc->vtpci_common;
/*
* Redrive the device initialization. This is a bit of an abuse of
* the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
* play nice.
*
* We do not allow the host device to change from what was originally
* negotiated beyond what the guest driver changed. MSIX state should
* not change, number of virtqueues and their size remain the same, etc.
* This will need to be rethought when we want to support migration.
*/
if (vtpci_modern_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
vtpci_modern_stop(dev);
/*
* Quickly drive the status through ACK and DRIVER. The device does
* not become usable again until DRIVER_OK in reinit complete.
*/
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK);
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
/*
* TODO: Check that features are not added as to what was
* originally negotiated.
*/
vtpci_modern_negotiate_features(dev, features);
error = vtpci_modern_finalize_features(dev);
if (error) {
device_printf(dev, "cannot finalize features during reinit\n");
return (error);
}
error = vtpci_reinit(cn);
if (error)
return (error);
return (0);
}
static void
vtpci_modern_reinit_complete(device_t dev)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_enable_virtqueues(sc);
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
}
static void
vtpci_modern_notify_vq(device_t dev, uint16_t queue, bus_size_t offset)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_write_notify_2(sc, offset, queue);
}
static uint8_t
vtpci_modern_get_status(struct vtpci_modern_softc *sc)
{
return (vtpci_modern_read_common_1(sc, VIRTIO_PCI_COMMON_STATUS));
}
static void
vtpci_modern_set_status(struct vtpci_modern_softc *sc, uint8_t status)
{
if (status != VIRTIO_CONFIG_STATUS_RESET)
status |= vtpci_modern_get_status(sc);
vtpci_modern_write_common_1(sc, VIRTIO_PCI_COMMON_STATUS, status);
}
static int
vtpci_modern_config_generation(device_t dev)
{
struct vtpci_modern_softc *sc;
uint8_t gen;
sc = device_get_softc(dev);
gen = vtpci_modern_read_common_1(sc, VIRTIO_PCI_COMMON_CFGGENERATION);
return (gen);
}
static void
vtpci_modern_read_dev_config(device_t dev, bus_size_t offset, void *dst,
int length)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
if (sc->vtpci_device_res_map.vtrm_map.r_size == 0) {
panic("%s: attempt to read dev config but not present",
__func__);
}
switch (length) {
case 1:
*(uint8_t *) dst = vtpci_modern_read_device_1(sc, offset);
break;
case 2:
*(uint16_t *) dst = virtio_htog16(true,
vtpci_modern_read_device_2(sc, offset));
break;
case 4:
*(uint32_t *) dst = virtio_htog32(true,
vtpci_modern_read_device_4(sc, offset));
break;
case 8:
*(uint64_t *) dst = virtio_htog64(true,
vtpci_modern_read_device_8(sc, offset));
break;
default:
panic("%s: device %s invalid device read length %d offset %d",
__func__, device_get_nameunit(dev), length, (int) offset);
}
}
static void
vtpci_modern_write_dev_config(device_t dev, bus_size_t offset, const void *src,
int length)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
if (sc->vtpci_device_res_map.vtrm_map.r_size == 0) {
panic("%s: attempt to write dev config but not present",
__func__);
}
switch (length) {
case 1:
vtpci_modern_write_device_1(sc, offset, *(const uint8_t *) src);
break;
case 2: {
uint16_t val = virtio_gtoh16(true, *(const uint16_t *) src);
vtpci_modern_write_device_2(sc, offset, val);
break;
}
case 4: {
uint32_t val = virtio_gtoh32(true, *(const uint32_t *) src);
vtpci_modern_write_device_4(sc, offset, val);
break;
}
case 8: {
uint64_t val = virtio_gtoh64(true, *(const uint64_t *) src);
vtpci_modern_write_device_8(sc, offset, val);
break;
}
default:
panic("%s: device %s invalid device write length %d offset %d",
__func__, device_get_nameunit(dev), length, (int) offset);
}
}
static int
vtpci_modern_probe_configs(device_t dev)
{
int error;
/*
* These config capabilities must be present. The DEVICE_CFG
* capability is only present if the device requires it.
*/
error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_COMMON_CFG, NULL);
if (error) {
device_printf(dev, "cannot find COMMON_CFG capability\n");
return (error);
}
error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_NOTIFY_CFG, NULL);
if (error) {
device_printf(dev, "cannot find NOTIFY_CFG capability\n");
return (error);
}
error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_ISR_CFG, NULL);
if (error) {
device_printf(dev, "cannot find ISR_CFG capability\n");
return (error);
}
return (0);
}
static int
vtpci_modern_find_cap(device_t dev, uint8_t cfg_type, int *cap_offset)
{
uint32_t type, bar;
int capreg, error;
for (error = pci_find_cap(dev, PCIY_VENDOR, &capreg);
error == 0;
error = pci_find_next_cap(dev, PCIY_VENDOR, capreg, &capreg)) {
type = pci_read_config(dev, capreg +
offsetof(struct virtio_pci_cap, cfg_type), 1);
bar = pci_read_config(dev, capreg +
offsetof(struct virtio_pci_cap, bar), 1);
/* Must ignore reserved BARs. */
if (bar >= VTPCI_MODERN_MAX_BARS)
continue;
if (type == cfg_type) {
if (cap_offset != NULL)
*cap_offset = capreg;
break;
}
}
return (error);
}
static int
vtpci_modern_map_common_config(struct vtpci_modern_softc *sc)
{
device_t dev;
int error;
dev = sc->vtpci_dev;
error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_COMMON_CFG,
sizeof(struct virtio_pci_common_cfg), 4, &sc->vtpci_common_res_map);
if (error) {
device_printf(dev, "cannot find cap COMMON_CFG resource\n");
return (error);
}
error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_common_res_map);
if (error) {
device_printf(dev, "cannot alloc resource for COMMON_CFG\n");
return (error);
}
return (0);
}
static int
vtpci_modern_map_notify_config(struct vtpci_modern_softc *sc)
{
device_t dev;
int cap_offset, error;
dev = sc->vtpci_dev;
error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_NOTIFY_CFG,
-1, 2, &sc->vtpci_notify_res_map);
if (error) {
device_printf(dev, "cannot find cap NOTIFY_CFG resource\n");
return (error);
}
cap_offset = sc->vtpci_notify_res_map.vtrm_cap_offset;
sc->vtpci_notify_offset_multiplier = pci_read_config(dev, cap_offset +
offsetof(struct virtio_pci_notify_cap, notify_off_multiplier), 4);
error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_notify_res_map);
if (error) {
device_printf(dev, "cannot alloc resource for NOTIFY_CFG\n");
return (error);
}
return (0);
}
static int
vtpci_modern_map_isr_config(struct vtpci_modern_softc *sc)
{
device_t dev;
int error;
dev = sc->vtpci_dev;
error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_ISR_CFG,
sizeof(uint8_t), 1, &sc->vtpci_isr_res_map);
if (error) {
device_printf(dev, "cannot find cap ISR_CFG resource\n");
return (error);
}
error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_isr_res_map);
if (error) {
device_printf(dev, "cannot alloc resource for ISR_CFG\n");
return (error);
}
return (0);
}
static int
vtpci_modern_map_device_config(struct vtpci_modern_softc *sc)
{
device_t dev;
int error;
dev = sc->vtpci_dev;
error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_DEVICE_CFG,
-1, 4, &sc->vtpci_device_res_map);
if (error == ENOENT) {
/* Device configuration is optional depending on device. */
return (0);
} else if (error) {
device_printf(dev, "cannot find cap DEVICE_CFG resource\n");
return (error);
}
error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_device_res_map);
if (error) {
device_printf(dev, "cannot alloc resource for DEVICE_CFG\n");
return (error);
}
return (0);
}
static int
vtpci_modern_map_configs(struct vtpci_modern_softc *sc)
{
int error;
error = vtpci_modern_map_common_config(sc);
if (error)
return (error);
error = vtpci_modern_map_notify_config(sc);
if (error)
return (error);
error = vtpci_modern_map_isr_config(sc);
if (error)
return (error);
error = vtpci_modern_map_device_config(sc);
if (error)
return (error);
vtpci_modern_alloc_msix_resource(sc);
return (0);
}
static void
vtpci_modern_unmap_configs(struct vtpci_modern_softc *sc)
{
vtpci_modern_free_resource_map(sc, &sc->vtpci_common_res_map);
vtpci_modern_free_resource_map(sc, &sc->vtpci_notify_res_map);
vtpci_modern_free_resource_map(sc, &sc->vtpci_isr_res_map);
vtpci_modern_free_resource_map(sc, &sc->vtpci_device_res_map);
vtpci_modern_free_bar_resources(sc);
vtpci_modern_free_msix_resource(sc);
sc->vtpci_notify_offset_multiplier = 0;
}
static int
vtpci_modern_find_cap_resource(struct vtpci_modern_softc *sc, uint8_t cfg_type,
int min_size, int alignment, struct vtpci_modern_resource_map *res)
{
device_t dev;
int cap_offset, offset, length, error;
uint8_t bar, cap_length;
dev = sc->vtpci_dev;
error = vtpci_modern_find_cap(dev, cfg_type, &cap_offset);
if (error)
return (error);
cap_length = pci_read_config(dev,
cap_offset + offsetof(struct virtio_pci_cap, cap_len), 1);
if (cap_length < sizeof(struct virtio_pci_cap)) {
device_printf(dev, "cap %u length %d less than expected\n",
cfg_type, cap_length);
return (ENXIO);
}
bar = pci_read_config(dev,
cap_offset + offsetof(struct virtio_pci_cap, bar), 1);
offset = pci_read_config(dev,
cap_offset + offsetof(struct virtio_pci_cap, offset), 4);
length = pci_read_config(dev,
cap_offset + offsetof(struct virtio_pci_cap, length), 4);
if (min_size != -1 && length < min_size) {
device_printf(dev, "cap %u struct length %d less than min %d\n",
cfg_type, length, min_size);
return (ENXIO);
}
if (offset % alignment) {
device_printf(dev, "cap %u struct offset %d not aligned to %d\n",
cfg_type, offset, alignment);
return (ENXIO);
}
/* BMV: TODO Can we determine the size of the BAR here? */
res->vtrm_cap_offset = cap_offset;
res->vtrm_bar = bar;
res->vtrm_offset = offset;
res->vtrm_length = length;
res->vtrm_type = vtpci_modern_bar_type(sc, bar);
return (0);
}
static int
vtpci_modern_bar_type(struct vtpci_modern_softc *sc, int bar)
{
uint32_t val;
/*
* The BAR described by a config capability may be either an IOPORT or
* MEM, but we must know the type when calling bus_alloc_resource().
*/
val = pci_read_config(sc->vtpci_dev, PCIR_BAR(bar), 4);
if (PCI_BAR_IO(val))
return (SYS_RES_IOPORT);
else
return (SYS_RES_MEMORY);
}
static struct resource *
vtpci_modern_get_bar_resource(struct vtpci_modern_softc *sc, int bar, int type)
{
struct resource *res;
MPASS(bar >= 0 && bar < VTPCI_MODERN_MAX_BARS);
res = sc->vtpci_bar_res[bar].vtbr_res;
MPASS(res == NULL || sc->vtpci_bar_res[bar].vtbr_type == type);
return (res);
}
static struct resource *
vtpci_modern_alloc_bar_resource(struct vtpci_modern_softc *sc, int bar,
int type)
{
struct resource *res;
int rid;
MPASS(bar >= 0 && bar < VTPCI_MODERN_MAX_BARS);
MPASS(type == SYS_RES_MEMORY || type == SYS_RES_IOPORT);
res = sc->vtpci_bar_res[bar].vtbr_res;
if (res != NULL) {
MPASS(sc->vtpci_bar_res[bar].vtbr_type == type);
return (res);
}
rid = PCIR_BAR(bar);
res = bus_alloc_resource_any(sc->vtpci_dev, type, &rid,
RF_ACTIVE | RF_UNMAPPED);
if (res != NULL) {
sc->vtpci_bar_res[bar].vtbr_res = res;
sc->vtpci_bar_res[bar].vtbr_type = type;
}
return (res);
}
static void
vtpci_modern_free_bar_resources(struct vtpci_modern_softc *sc)
{
device_t dev;
struct resource *res;
int bar, rid, type;
dev = sc->vtpci_dev;
for (bar = 0; bar < VTPCI_MODERN_MAX_BARS; bar++) {
res = sc->vtpci_bar_res[bar].vtbr_res;
type = sc->vtpci_bar_res[bar].vtbr_type;
if (res != NULL) {
rid = PCIR_BAR(bar);
bus_release_resource(dev, type, rid, res);
sc->vtpci_bar_res[bar].vtbr_res = NULL;
sc->vtpci_bar_res[bar].vtbr_type = 0;
}
}
}
static int
vtpci_modern_alloc_resource_map(struct vtpci_modern_softc *sc,
struct vtpci_modern_resource_map *map)
{
struct resource_map_request req;
struct resource *res;
int type;
type = map->vtrm_type;
res = vtpci_modern_alloc_bar_resource(sc, map->vtrm_bar, type);
if (res == NULL)
return (ENXIO);
resource_init_map_request(&req);
req.offset = map->vtrm_offset;
req.length = map->vtrm_length;
return (bus_map_resource(sc->vtpci_dev, type, res, &req,
&map->vtrm_map));
}
static void
vtpci_modern_free_resource_map(struct vtpci_modern_softc *sc,
struct vtpci_modern_resource_map *map)
{
struct resource *res;
int type;
type = map->vtrm_type;
res = vtpci_modern_get_bar_resource(sc, map->vtrm_bar, type);
if (res != NULL && map->vtrm_map.r_size != 0) {
bus_unmap_resource(sc->vtpci_dev, type, res, &map->vtrm_map);
bzero(map, sizeof(struct vtpci_modern_resource_map));
}
}
static void
vtpci_modern_alloc_msix_resource(struct vtpci_modern_softc *sc)
{
device_t dev;
int bar;
dev = sc->vtpci_dev;
if (!vtpci_is_msix_available(&sc->vtpci_common) ||
(bar = pci_msix_table_bar(dev)) == -1)
return;
/* TODO: Can this BAR be in the 0-5 range? */
sc->vtpci_msix_bar = bar;
if ((sc->vtpci_msix_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&bar, RF_ACTIVE)) == NULL)
device_printf(dev, "Unable to map MSIX table\n");
}
static void
vtpci_modern_free_msix_resource(struct vtpci_modern_softc *sc)
{
device_t dev;
dev = sc->vtpci_dev;
if (sc->vtpci_msix_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, sc->vtpci_msix_bar,
sc->vtpci_msix_res);
sc->vtpci_msix_bar = 0;
sc->vtpci_msix_res = NULL;
}
}
static void
vtpci_modern_probe_and_attach_child(struct vtpci_modern_softc *sc)
{
device_t dev, child;
dev = sc->vtpci_dev;
child = vtpci_child_device(&sc->vtpci_common);
if (child == NULL || device_get_state(child) != DS_NOTPRESENT)
return;
if (device_probe(child) != 0)
return;
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER);
if (device_attach(child) != 0) {
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED);
/* Reset state for later attempt. */
vtpci_modern_child_detached(dev, child);
} else {
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK);
VIRTIO_ATTACH_COMPLETED(child);
}
}
static int
vtpci_modern_register_msix(struct vtpci_modern_softc *sc, int offset,
struct vtpci_interrupt *intr)
{
uint16_t vector;
if (intr != NULL) {
/* Map from guest rid to host vector. */
vector = intr->vti_rid - 1;
} else
vector = VIRTIO_MSI_NO_VECTOR;
vtpci_modern_write_common_2(sc, offset, vector);
return (vtpci_modern_read_common_2(sc, offset) == vector ? 0 : ENODEV);
}
static int
vtpci_modern_register_cfg_msix(device_t dev, struct vtpci_interrupt *intr)
{
struct vtpci_modern_softc *sc;
int error;
sc = device_get_softc(dev);
error = vtpci_modern_register_msix(sc, VIRTIO_PCI_COMMON_MSIX, intr);
if (error) {
device_printf(dev,
"unable to register config MSIX interrupt\n");
return (error);
}
return (0);
}
static int
vtpci_modern_register_vq_msix(device_t dev, int idx,
struct vtpci_interrupt *intr)
{
struct vtpci_modern_softc *sc;
int error;
sc = device_get_softc(dev);
vtpci_modern_select_virtqueue(sc, idx);
error = vtpci_modern_register_msix(sc, VIRTIO_PCI_COMMON_Q_MSIX, intr);
if (error) {
device_printf(dev,
"unable to register virtqueue MSIX interrupt\n");
return (error);
}
return (0);
}
static void
vtpci_modern_reset(struct vtpci_modern_softc *sc)
{
/*
* Setting the status to RESET sets the host device to the
* original, uninitialized state. Must poll the status until
* the reset is complete.
*/
vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_RESET);
while (vtpci_modern_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET)
cpu_spinwait();
}
static void
vtpci_modern_select_virtqueue(struct vtpci_modern_softc *sc, int idx)
{
vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_SELECT, idx);
}
static uint8_t
vtpci_modern_read_isr(device_t dev)
{
return (vtpci_modern_read_isr_1(device_get_softc(dev), 0));
}
static uint16_t
vtpci_modern_get_vq_size(device_t dev, int idx)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_select_virtqueue(sc, idx);
return (vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_Q_SIZE));
}
static bus_size_t
vtpci_modern_get_vq_notify_off(device_t dev, int idx)
{
struct vtpci_modern_softc *sc;
uint16_t q_notify_off;
sc = device_get_softc(dev);
vtpci_modern_select_virtqueue(sc, idx);
q_notify_off = vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_Q_NOFF);
return (q_notify_off * sc->vtpci_notify_offset_multiplier);
}
static void
vtpci_modern_set_vq(device_t dev, struct virtqueue *vq)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_select_virtqueue(sc, virtqueue_index(vq));
/* BMV: Currently we never adjust the device's proposed VQ size. */
vtpci_modern_write_common_2(sc,
VIRTIO_PCI_COMMON_Q_SIZE, virtqueue_size(vq));
vtpci_modern_write_common_8(sc,
VIRTIO_PCI_COMMON_Q_DESCLO, virtqueue_desc_paddr(vq));
vtpci_modern_write_common_8(sc,
VIRTIO_PCI_COMMON_Q_AVAILLO, virtqueue_avail_paddr(vq));
vtpci_modern_write_common_8(sc,
VIRTIO_PCI_COMMON_Q_USEDLO, virtqueue_used_paddr(vq));
}
static void
vtpci_modern_disable_vq(device_t dev, int idx)
{
struct vtpci_modern_softc *sc;
sc = device_get_softc(dev);
vtpci_modern_select_virtqueue(sc, idx);
vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_DESCLO, 0ULL);
vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_AVAILLO, 0ULL);
vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_USEDLO, 0ULL);
}
static void
vtpci_modern_enable_virtqueues(struct vtpci_modern_softc *sc)
{
int idx;
for (idx = 0; idx < sc->vtpci_common.vtpci_nvqs; idx++) {
vtpci_modern_select_virtqueue(sc, idx);
vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_ENABLE, 1);
}
}
static uint8_t
vtpci_modern_read_common_1(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_1(&sc->vtpci_common_res_map.vtrm_map, off));
}
static uint16_t
vtpci_modern_read_common_2(struct vtpci_modern_softc *sc, bus_size_t off)
{
return virtio_htog16(true,
bus_read_2(&sc->vtpci_common_res_map.vtrm_map, off));
}
static uint32_t
vtpci_modern_read_common_4(struct vtpci_modern_softc *sc, bus_size_t off)
{
return virtio_htog32(true,
bus_read_4(&sc->vtpci_common_res_map.vtrm_map, off));
}
static void
vtpci_modern_write_common_1(struct vtpci_modern_softc *sc, bus_size_t off,
uint8_t val)
{
bus_write_1(&sc->vtpci_common_res_map.vtrm_map, off, val);
}
static void
vtpci_modern_write_common_2(struct vtpci_modern_softc *sc, bus_size_t off,
uint16_t val)
{
bus_write_2(&sc->vtpci_common_res_map.vtrm_map,
off, virtio_gtoh16(true, val));
}
static void
vtpci_modern_write_common_4(struct vtpci_modern_softc *sc, bus_size_t off,
uint32_t val)
{
bus_write_4(&sc->vtpci_common_res_map.vtrm_map,
off, virtio_gtoh32(true, val));
}
static void
vtpci_modern_write_common_8(struct vtpci_modern_softc *sc, bus_size_t off,
uint64_t val)
{
uint32_t val0, val1;
val0 = (uint32_t) val;
val1 = val >> 32;
vtpci_modern_write_common_4(sc, off, val0);
vtpci_modern_write_common_4(sc, off + 4, val1);
}
static void
vtpci_modern_write_notify_2(struct vtpci_modern_softc *sc, bus_size_t off,
uint16_t val)
{
bus_write_2(&sc->vtpci_notify_res_map.vtrm_map, off, val);
}
static uint8_t
vtpci_modern_read_isr_1(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_1(&sc->vtpci_isr_res_map.vtrm_map, off));
}
static uint8_t
vtpci_modern_read_device_1(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_1(&sc->vtpci_device_res_map.vtrm_map, off));
}
static uint16_t
vtpci_modern_read_device_2(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_2(&sc->vtpci_device_res_map.vtrm_map, off));
}
static uint32_t
vtpci_modern_read_device_4(struct vtpci_modern_softc *sc, bus_size_t off)
{
return (bus_read_4(&sc->vtpci_device_res_map.vtrm_map, off));
}
static uint64_t
vtpci_modern_read_device_8(struct vtpci_modern_softc *sc, bus_size_t off)
{
device_t dev;
int gen;
uint32_t val0, val1;
dev = sc->vtpci_dev;
/*
* Treat the 64-bit field as two 32-bit fields. Use the generation
* to ensure a consistent read.
*/
do {
gen = vtpci_modern_config_generation(dev);
val0 = vtpci_modern_read_device_4(sc, off);
val1 = vtpci_modern_read_device_4(sc, off + 4);
} while (gen != vtpci_modern_config_generation(dev));
return (((uint64_t) val1 << 32) | val0);
}
static void
vtpci_modern_write_device_1(struct vtpci_modern_softc *sc, bus_size_t off,
uint8_t val)
{
bus_write_1(&sc->vtpci_device_res_map.vtrm_map, off, val);
}
static void
vtpci_modern_write_device_2(struct vtpci_modern_softc *sc, bus_size_t off,
uint16_t val)
{
bus_write_2(&sc->vtpci_device_res_map.vtrm_map, off, val);
}
static void
vtpci_modern_write_device_4(struct vtpci_modern_softc *sc, bus_size_t off,
uint32_t val)
{
bus_write_4(&sc->vtpci_device_res_map.vtrm_map, off, val);
}
static void
vtpci_modern_write_device_8(struct vtpci_modern_softc *sc, bus_size_t off,
uint64_t val)
{
uint32_t val0, val1;
val0 = (uint32_t) val;
val1 = val >> 32;
vtpci_modern_write_device_4(sc, off, val0);
vtpci_modern_write_device_4(sc, off + 4, val1);
}
diff --git a/sys/dev/virtio/virtio.c b/sys/dev/virtio/virtio.c
index 0637e0299333..45a657542e28 100644
--- a/sys/dev/virtio/virtio.c
+++ b/sys/dev/virtio/virtio.c
@@ -1,379 +1,379 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2011, Bryan Venteicher
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "virtio_bus_if.h"
static int virtio_modevent(module_t, int, void *);
static const char *virtio_feature_name(uint64_t, struct virtio_feature_desc *);
static struct virtio_ident {
uint16_t devid;
const char *name;
} virtio_ident_table[] = {
{ VIRTIO_ID_NETWORK, "Network" },
{ VIRTIO_ID_BLOCK, "Block" },
{ VIRTIO_ID_CONSOLE, "Console" },
{ VIRTIO_ID_ENTROPY, "Entropy" },
{ VIRTIO_ID_BALLOON, "Balloon" },
{ VIRTIO_ID_IOMEMORY, "IOMemory" },
{ VIRTIO_ID_RPMSG, "Remote Processor Messaging" },
{ VIRTIO_ID_SCSI, "SCSI" },
{ VIRTIO_ID_9P, "9P Transport" },
{ VIRTIO_ID_RPROC_SERIAL, "Remote Processor Serial" },
{ VIRTIO_ID_CAIF, "CAIF" },
{ VIRTIO_ID_GPU, "GPU" },
{ VIRTIO_ID_INPUT, "Input" },
{ VIRTIO_ID_VSOCK, "VSOCK Transport" },
{ VIRTIO_ID_CRYPTO, "Crypto" },
{ 0, NULL }
};
/* Device independent features. */
static struct virtio_feature_desc virtio_common_feature_desc[] = {
{ VIRTIO_F_NOTIFY_ON_EMPTY, "NotifyOnEmpty" }, /* Legacy */
{ VIRTIO_F_ANY_LAYOUT, "AnyLayout" }, /* Legacy */
{ VIRTIO_RING_F_INDIRECT_DESC, "RingIndirectDesc" },
{ VIRTIO_RING_F_EVENT_IDX, "RingEventIdx" },
{ VIRTIO_F_BAD_FEATURE, "BadFeature" }, /* Legacy */
{ VIRTIO_F_VERSION_1, "Version1" },
{ VIRTIO_F_IOMMU_PLATFORM, "IOMMUPlatform" },
{ 0, NULL }
};
const char *
virtio_device_name(uint16_t devid)
{
struct virtio_ident *ident;
for (ident = virtio_ident_table; ident->name != NULL; ident++) {
if (ident->devid == devid)
return (ident->name);
}
return (NULL);
}
static const char *
virtio_feature_name(uint64_t val, struct virtio_feature_desc *desc)
{
int i, j;
struct virtio_feature_desc *descs[2] = { desc,
virtio_common_feature_desc };
for (i = 0; i < 2; i++) {
if (descs[i] == NULL)
continue;
for (j = 0; descs[i][j].vfd_val != 0; j++) {
if (val == descs[i][j].vfd_val)
return (descs[i][j].vfd_str);
}
}
return (NULL);
}
int
virtio_describe_sbuf(struct sbuf *sb, uint64_t features,
struct virtio_feature_desc *desc)
{
const char *name;
uint64_t val;
int n;
sbuf_printf(sb, "%#jx", (uintmax_t) features);
for (n = 0, val = 1ULL << 63; val != 0; val >>= 1) {
/*
* BAD_FEATURE is used to detect broken Linux clients
* and therefore is not applicable to FreeBSD.
*/
if (((features & val) == 0) || val == VIRTIO_F_BAD_FEATURE)
continue;
if (n++ == 0)
sbuf_cat(sb, " <");
else
sbuf_cat(sb, ",");
name = virtio_feature_name(val, desc);
if (name == NULL)
sbuf_printf(sb, "%#jx", (uintmax_t) val);
else
sbuf_cat(sb, name);
}
if (n > 0)
sbuf_cat(sb, ">");
return (sbuf_finish(sb));
}
void
virtio_describe(device_t dev, const char *msg, uint64_t features,
struct virtio_feature_desc *desc)
{
struct sbuf sb;
char *buf;
int error;
if ((buf = malloc(1024, M_TEMP, M_NOWAIT)) == NULL) {
error = ENOMEM;
goto out;
}
sbuf_new(&sb, buf, 1024, SBUF_FIXEDLEN);
sbuf_printf(&sb, "%s features: ", msg);
error = virtio_describe_sbuf(&sb, features, desc);
if (error == 0)
device_printf(dev, "%s\n", sbuf_data(&sb));
sbuf_delete(&sb);
free(buf, M_TEMP);
out:
if (error != 0) {
device_printf(dev, "%s features: %#jx\n", msg,
(uintmax_t) features);
}
}
uint64_t
virtio_filter_transport_features(uint64_t features)
{
uint64_t transport, mask;
transport = (1ULL <<
(VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START)) - 1;
transport <<= VIRTIO_TRANSPORT_F_START;
mask = -1ULL & ~transport;
mask |= VIRTIO_RING_F_INDIRECT_DESC;
mask |= VIRTIO_RING_F_EVENT_IDX;
mask |= VIRTIO_F_VERSION_1;
return (features & mask);
}
-int
+bool
virtio_bus_is_modern(device_t dev)
{
uintptr_t modern;
virtio_read_ivar(dev, VIRTIO_IVAR_MODERN, &modern);
return (modern != 0);
}
void
virtio_read_device_config_array(device_t dev, bus_size_t offset, void *dst,
int size, int count)
{
int i, gen;
do {
gen = virtio_config_generation(dev);
for (i = 0; i < count; i++) {
virtio_read_device_config(dev, offset + i * size,
(uint8_t *) dst + i * size, size);
}
} while (gen != virtio_config_generation(dev));
}
/*
* VirtIO bus method wrappers.
*/
void
virtio_read_ivar(device_t dev, int ivar, uintptr_t *val)
{
*val = -1;
BUS_READ_IVAR(device_get_parent(dev), dev, ivar, val);
}
void
virtio_write_ivar(device_t dev, int ivar, uintptr_t val)
{
BUS_WRITE_IVAR(device_get_parent(dev), dev, ivar, val);
}
uint64_t
virtio_negotiate_features(device_t dev, uint64_t child_features)
{
return (VIRTIO_BUS_NEGOTIATE_FEATURES(device_get_parent(dev),
child_features));
}
int
virtio_finalize_features(device_t dev)
{
return (VIRTIO_BUS_FINALIZE_FEATURES(device_get_parent(dev)));
}
int
virtio_alloc_virtqueues(device_t dev, int flags, int nvqs,
struct vq_alloc_info *info)
{
return (VIRTIO_BUS_ALLOC_VIRTQUEUES(device_get_parent(dev), flags,
nvqs, info));
}
int
virtio_setup_intr(device_t dev, enum intr_type type)
{
return (VIRTIO_BUS_SETUP_INTR(device_get_parent(dev), type));
}
-int
+bool
virtio_with_feature(device_t dev, uint64_t feature)
{
return (VIRTIO_BUS_WITH_FEATURE(device_get_parent(dev), feature));
}
void
virtio_stop(device_t dev)
{
VIRTIO_BUS_STOP(device_get_parent(dev));
}
int
virtio_reinit(device_t dev, uint64_t features)
{
return (VIRTIO_BUS_REINIT(device_get_parent(dev), features));
}
void
virtio_reinit_complete(device_t dev)
{
VIRTIO_BUS_REINIT_COMPLETE(device_get_parent(dev));
}
int
virtio_config_generation(device_t dev)
{
return (VIRTIO_BUS_CONFIG_GENERATION(device_get_parent(dev)));
}
void
virtio_read_device_config(device_t dev, bus_size_t offset, void *dst, int len)
{
VIRTIO_BUS_READ_DEVICE_CONFIG(device_get_parent(dev),
offset, dst, len);
}
void
virtio_write_device_config(device_t dev, bus_size_t offset, const void *dst, int len)
{
VIRTIO_BUS_WRITE_DEVICE_CONFIG(device_get_parent(dev),
offset, dst, len);
}
int
virtio_child_pnpinfo(device_t busdev __unused, device_t child, struct sbuf *sb)
{
/*
* All of these PCI fields will be only 16 bits, but on the vtmmio bus
* the corresponding fields (only "vendor" and "device_type") are 32
* bits. Many virtio drivers can attach below either bus.
* Gratuitously expand these two fields to 32-bits to allow sharing PNP
* match table data between the mostly-similar buses.
*
* Subdevice and device_type are redundant in both buses, so I don't
* see a lot of PNP utility in exposing the same value under a
* different name.
*/
sbuf_printf(sb, "vendor=0x%08x device=0x%04x subvendor=0x%04x "
"device_type=0x%08x", (unsigned)virtio_get_vendor(child),
(unsigned)virtio_get_device(child),
(unsigned)virtio_get_subvendor(child),
(unsigned)virtio_get_device_type(child));
return (0);
}
static int
virtio_modevent(module_t mod, int type, void *unused)
{
int error;
switch (type) {
case MOD_LOAD:
case MOD_QUIESCE:
case MOD_UNLOAD:
case MOD_SHUTDOWN:
error = 0;
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static moduledata_t virtio_mod = {
"virtio",
virtio_modevent,
0
};
DECLARE_MODULE(virtio, virtio_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
MODULE_VERSION(virtio, 1);
diff --git a/sys/dev/virtio/virtio.h b/sys/dev/virtio/virtio.h
index b3ef98cbd167..96ebaf653428 100644
--- a/sys/dev/virtio/virtio.h
+++ b/sys/dev/virtio/virtio.h
@@ -1,191 +1,191 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2014, Bryan Venteicher
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _VIRTIO_H_
#define _VIRTIO_H_
#include
#include
#include
#ifdef _KERNEL
struct sbuf;
struct vq_alloc_info;
/*
* Each virtqueue indirect descriptor list must be physically contiguous.
* To allow us to malloc(9) each list individually, limit the number
* supported to what will fit in one page. With 4KB pages, this is a limit
* of 256 descriptors. If there is ever a need for more, we can switch to
* contigmalloc(9) for the larger allocations, similar to what
* bus_dmamem_alloc(9) does.
*
* Note the sizeof(struct vring_desc) is 16 bytes.
*/
#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
/*
* VirtIO instance variables indices.
*/
#define VIRTIO_IVAR_DEVTYPE 1
#define VIRTIO_IVAR_FEATURE_DESC 2
#define VIRTIO_IVAR_VENDOR 3
#define VIRTIO_IVAR_DEVICE 4
#define VIRTIO_IVAR_SUBVENDOR 5
#define VIRTIO_IVAR_SUBDEVICE 6
#define VIRTIO_IVAR_MODERN 7
struct virtio_feature_desc {
uint64_t vfd_val;
const char *vfd_str;
};
#define VIRTIO_DRIVER_MODULE(name, driver, evh, arg) \
DRIVER_MODULE(name, virtio_mmio, driver, evh, arg); \
DRIVER_MODULE(name, virtio_pci, driver, evh, arg)
struct virtio_pnp_match {
uint32_t device_type;
const char *description;
};
#define VIRTIO_SIMPLE_PNPINFO(driver, devtype, desc) \
static const struct virtio_pnp_match driver ## _match = { \
.device_type = devtype, \
.description = desc, \
}; \
MODULE_PNP_INFO("U32:device_type;D:#", virtio_mmio, driver, \
&driver ## _match, 1); \
MODULE_PNP_INFO("U32:device_type;D:#", virtio_pci, driver, \
&driver ## _match, 1)
#define VIRTIO_SIMPLE_PROBE(dev, driver) \
(virtio_simple_probe(dev, &driver ## _match))
const char *virtio_device_name(uint16_t devid);
void virtio_describe(device_t dev, const char *msg,
uint64_t features, struct virtio_feature_desc *desc);
int virtio_describe_sbuf(struct sbuf *sb, uint64_t features,
struct virtio_feature_desc *desc);
uint64_t virtio_filter_transport_features(uint64_t features);
-int virtio_bus_is_modern(device_t dev);
+bool virtio_bus_is_modern(device_t dev);
void virtio_read_device_config_array(device_t dev, bus_size_t offset,
void *dst, int size, int count);
/*
* VirtIO Bus Methods.
*/
void virtio_read_ivar(device_t dev, int ivar, uintptr_t *val);
void virtio_write_ivar(device_t dev, int ivar, uintptr_t val);
uint64_t virtio_negotiate_features(device_t dev, uint64_t child_features);
int virtio_finalize_features(device_t dev);
int virtio_alloc_virtqueues(device_t dev, int flags, int nvqs,
struct vq_alloc_info *info);
int virtio_setup_intr(device_t dev, enum intr_type type);
-int virtio_with_feature(device_t dev, uint64_t feature);
+bool virtio_with_feature(device_t dev, uint64_t feature);
void virtio_stop(device_t dev);
int virtio_config_generation(device_t dev);
int virtio_reinit(device_t dev, uint64_t features);
void virtio_reinit_complete(device_t dev);
int virtio_child_pnpinfo(device_t busdev, device_t child, struct sbuf *sb);
/*
* Read/write a variable amount from the device specific (ie, network)
* configuration region. This region is encoded in the same endian as
* the guest.
*/
void virtio_read_device_config(device_t dev, bus_size_t offset,
void *dst, int length);
void virtio_write_device_config(device_t dev, bus_size_t offset,
const void *src, int length);
/* Inlined device specific read/write functions for common lengths. */
#define VIRTIO_RDWR_DEVICE_CONFIG(size, type) \
static inline type \
__CONCAT(virtio_read_dev_config_,size)(device_t dev, \
bus_size_t offset) \
{ \
type val; \
virtio_read_device_config(dev, offset, &val, sizeof(type)); \
return (val); \
} \
\
static inline void \
__CONCAT(virtio_write_dev_config_,size)(device_t dev, \
bus_size_t offset, type val) \
{ \
virtio_write_device_config(dev, offset, &val, sizeof(type)); \
}
VIRTIO_RDWR_DEVICE_CONFIG(1, uint8_t);
VIRTIO_RDWR_DEVICE_CONFIG(2, uint16_t);
VIRTIO_RDWR_DEVICE_CONFIG(4, uint32_t);
#undef VIRTIO_RDWR_DEVICE_CONFIG
#define VIRTIO_READ_IVAR(name, ivar) \
static inline int \
__CONCAT(virtio_get_,name)(device_t dev) \
{ \
uintptr_t val; \
virtio_read_ivar(dev, ivar, &val); \
return ((int) val); \
}
VIRTIO_READ_IVAR(device_type, VIRTIO_IVAR_DEVTYPE);
VIRTIO_READ_IVAR(vendor, VIRTIO_IVAR_VENDOR);
VIRTIO_READ_IVAR(device, VIRTIO_IVAR_DEVICE);
VIRTIO_READ_IVAR(subvendor, VIRTIO_IVAR_SUBVENDOR);
VIRTIO_READ_IVAR(subdevice, VIRTIO_IVAR_SUBDEVICE);
VIRTIO_READ_IVAR(modern, VIRTIO_IVAR_MODERN);
#undef VIRTIO_READ_IVAR
#define VIRTIO_WRITE_IVAR(name, ivar) \
static inline void \
__CONCAT(virtio_set_,name)(device_t dev, void *val) \
{ \
virtio_write_ivar(dev, ivar, (uintptr_t) val); \
}
VIRTIO_WRITE_IVAR(feature_desc, VIRTIO_IVAR_FEATURE_DESC);
#undef VIRTIO_WRITE_IVAR
static inline int
virtio_simple_probe(device_t dev, const struct virtio_pnp_match *match)
{
if (virtio_get_device_type(dev) != match->device_type)
return (ENXIO);
device_set_desc(dev, match->description);
return (BUS_PROBE_DEFAULT);
}
#endif /* _KERNEL */
#endif /* _VIRTIO_H_ */
diff --git a/sys/dev/virtio/virtio_bus_if.m b/sys/dev/virtio/virtio_bus_if.m
index 8d7b6db7d488..848c6ac05b62 100644
--- a/sys/dev/virtio/virtio_bus_if.m
+++ b/sys/dev/virtio/virtio_bus_if.m
@@ -1,116 +1,116 @@
#-
# Copyright (c) 2011, Bryan Venteicher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#include
#include
INTERFACE virtio_bus;
HEADER {
struct vq_alloc_info;
};
CODE {
static int
virtio_bus_default_finalize_features(device_t dev)
{
return (0);
}
static int
virtio_bus_default_config_generation(device_t dev)
{
return (0);
}
};
METHOD uint64_t negotiate_features {
device_t dev;
uint64_t child_features;
};
METHOD int finalize_features {
device_t dev;
} DEFAULT virtio_bus_default_finalize_features;
-METHOD int with_feature {
+METHOD bool with_feature {
device_t dev;
uint64_t feature;
};
METHOD int alloc_virtqueues {
device_t dev;
int flags;
int nvqs;
struct vq_alloc_info *info;
};
METHOD int setup_intr {
device_t dev;
enum intr_type type;
};
METHOD void stop {
device_t dev;
};
METHOD int reinit {
device_t dev;
uint64_t features;
};
METHOD void reinit_complete {
device_t dev;
};
METHOD void notify_vq {
device_t dev;
uint16_t queue;
bus_size_t offset;
};
METHOD int config_generation {
device_t dev;
} DEFAULT virtio_bus_default_config_generation;
METHOD void read_device_config {
device_t dev;
bus_size_t offset;
void *dst;
int len;
};
METHOD void write_device_config {
device_t dev;
bus_size_t offset;
const void *src;
int len;
};
METHOD void poll {
device_t dev;
};
diff --git a/sys/dev/virtio/virtqueue.c b/sys/dev/virtio/virtqueue.c
index cc7b0a8ed064..53e7de587195 100644
--- a/sys/dev/virtio/virtqueue.c
+++ b/sys/dev/virtio/virtqueue.c
@@ -1,881 +1,881 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2011, Bryan Venteicher
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Implements the virtqueue interface as basically described
* in the original VirtIO paper.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "virtio_bus_if.h"
struct virtqueue {
device_t vq_dev;
uint16_t vq_queue_index;
uint16_t vq_nentries;
uint32_t vq_flags;
#define VIRTQUEUE_FLAG_MODERN 0x0001
#define VIRTQUEUE_FLAG_INDIRECT 0x0002
#define VIRTQUEUE_FLAG_EVENT_IDX 0x0004
int vq_max_indirect_size;
bus_size_t vq_notify_offset;
virtqueue_intr_t *vq_intrhand;
void *vq_intrhand_arg;
struct vring vq_ring;
uint16_t vq_free_cnt;
uint16_t vq_queued_cnt;
/*
* Head of the free chain in the descriptor table. If
* there are no free descriptors, this will be set to
* VQ_RING_DESC_CHAIN_END.
*/
uint16_t vq_desc_head_idx;
/*
* Last consumed descriptor in the used table,
* trails vq_ring.used->idx.
*/
uint16_t vq_used_cons_idx;
void *vq_ring_mem;
int vq_indirect_mem_size;
int vq_alignment;
int vq_ring_size;
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
struct vq_desc_extra {
void *cookie;
struct vring_desc *indirect;
vm_paddr_t indirect_paddr;
uint16_t ndescs;
} vq_descx[0];
};
/*
* The maximum virtqueue size is 2^15. Use that value as the end of
* descriptor chain terminator since it will never be a valid index
* in the descriptor table. This is used to verify we are correctly
* handling vq_free_cnt.
*/
#define VQ_RING_DESC_CHAIN_END 32768
#define VQASSERT(_vq, _exp, _msg, ...) \
KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
##__VA_ARGS__))
#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
"invalid ring index: %d, max: %d", (_idx), \
(_vq)->vq_nentries)
#define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
VQ_RING_DESC_CHAIN_END, "full ring terminated " \
"incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
static int virtqueue_init_indirect(struct virtqueue *vq, int);
static void virtqueue_free_indirect(struct virtqueue *vq);
static void virtqueue_init_indirect_list(struct virtqueue *,
struct vring_desc *);
static void vq_ring_init(struct virtqueue *);
static void vq_ring_update_avail(struct virtqueue *, uint16_t);
static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
struct vring_desc *, uint16_t, struct sglist *, int, int);
-static int vq_ring_use_indirect(struct virtqueue *, int);
+static bool vq_ring_use_indirect(struct virtqueue *, int);
static void vq_ring_enqueue_indirect(struct virtqueue *, void *,
struct sglist *, int, int);
static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
static int vq_ring_must_notify_host(struct virtqueue *);
static void vq_ring_notify_host(struct virtqueue *);
static void vq_ring_free_chain(struct virtqueue *, uint16_t);
SDT_PROVIDER_DEFINE(virtqueue);
SDT_PROBE_DEFINE6(virtqueue, , enqueue_segments, entry, "struct virtqueue *",
"struct vring_desc *", "uint16_t", "struct sglist *", "int", "int");
SDT_PROBE_DEFINE1(virtqueue, , enqueue_segments, return, "uint16_t");
#define vq_modern(_vq) (((_vq)->vq_flags & VIRTQUEUE_FLAG_MODERN) != 0)
#define vq_htog16(_vq, _val) virtio_htog16(vq_modern(_vq), _val)
#define vq_htog32(_vq, _val) virtio_htog32(vq_modern(_vq), _val)
#define vq_htog64(_vq, _val) virtio_htog64(vq_modern(_vq), _val)
#define vq_gtoh16(_vq, _val) virtio_gtoh16(vq_modern(_vq), _val)
#define vq_gtoh32(_vq, _val) virtio_gtoh32(vq_modern(_vq), _val)
#define vq_gtoh64(_vq, _val) virtio_gtoh64(vq_modern(_vq), _val)
int
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
bus_size_t notify_offset, int align, vm_paddr_t highaddr,
struct vq_alloc_info *info, struct virtqueue **vqp)
{
struct virtqueue *vq;
int error;
*vqp = NULL;
error = 0;
if (size == 0) {
device_printf(dev,
"virtqueue %d (%s) does not exist (size is zero)\n",
queue, info->vqai_name);
return (ENODEV);
} else if (!powerof2(size)) {
device_printf(dev,
"virtqueue %d (%s) size is not a power of 2: %d\n",
queue, info->vqai_name, size);
return (ENXIO);
} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
device_printf(dev, "virtqueue %d (%s) requested too many "
"indirect descriptors: %d, max %d\n",
queue, info->vqai_name, info->vqai_maxindirsz,
VIRTIO_MAX_INDIRECT);
return (EINVAL);
}
vq = malloc(sizeof(struct virtqueue) +
size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
if (vq == NULL) {
device_printf(dev, "cannot allocate virtqueue\n");
return (ENOMEM);
}
vq->vq_dev = dev;
strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
vq->vq_queue_index = queue;
vq->vq_notify_offset = notify_offset;
vq->vq_alignment = align;
vq->vq_nentries = size;
vq->vq_free_cnt = size;
vq->vq_intrhand = info->vqai_intr;
vq->vq_intrhand_arg = info->vqai_intr_arg;
if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_F_VERSION_1) != 0)
vq->vq_flags |= VIRTQUEUE_FLAG_MODERN;
if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
if (info->vqai_maxindirsz > 1) {
error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
if (error)
goto fail;
}
vq->vq_ring_size = round_page(vring_size(size, align));
vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
if (vq->vq_ring_mem == NULL) {
device_printf(dev,
"cannot allocate memory for virtqueue ring\n");
error = ENOMEM;
goto fail;
}
vq_ring_init(vq);
virtqueue_disable_intr(vq);
*vqp = vq;
fail:
if (error)
virtqueue_free(vq);
return (error);
}
static int
virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
{
device_t dev;
struct vq_desc_extra *dxp;
int i, size;
dev = vq->vq_dev;
if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
/*
* Indirect descriptors requested by the driver but not
* negotiated. Return zero to keep the initialization
* going: we'll run fine without.
*/
if (bootverbose)
device_printf(dev, "virtqueue %d (%s) requested "
"indirect descriptors but not negotiated\n",
vq->vq_queue_index, vq->vq_name);
return (0);
}
size = indirect_size * sizeof(struct vring_desc);
vq->vq_max_indirect_size = indirect_size;
vq->vq_indirect_mem_size = size;
vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
for (i = 0; i < vq->vq_nentries; i++) {
dxp = &vq->vq_descx[i];
dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
if (dxp->indirect == NULL) {
device_printf(dev, "cannot allocate indirect list\n");
return (ENOMEM);
}
dxp->indirect_paddr = vtophys(dxp->indirect);
virtqueue_init_indirect_list(vq, dxp->indirect);
}
return (0);
}
static void
virtqueue_free_indirect(struct virtqueue *vq)
{
struct vq_desc_extra *dxp;
int i;
for (i = 0; i < vq->vq_nentries; i++) {
dxp = &vq->vq_descx[i];
if (dxp->indirect == NULL)
break;
free(dxp->indirect, M_DEVBUF);
dxp->indirect = NULL;
dxp->indirect_paddr = 0;
}
vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
vq->vq_indirect_mem_size = 0;
}
static void
virtqueue_init_indirect_list(struct virtqueue *vq,
struct vring_desc *indirect)
{
int i;
bzero(indirect, vq->vq_indirect_mem_size);
for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
indirect[i].next = vq_gtoh16(vq, i + 1);
indirect[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
}
int
virtqueue_reinit(struct virtqueue *vq, uint16_t size)
{
struct vq_desc_extra *dxp;
int i;
if (vq->vq_nentries != size) {
device_printf(vq->vq_dev,
"%s: '%s' changed size; old=%hu, new=%hu\n",
__func__, vq->vq_name, vq->vq_nentries, size);
return (EINVAL);
}
/* Warn if the virtqueue was not properly cleaned up. */
if (vq->vq_free_cnt != vq->vq_nentries) {
device_printf(vq->vq_dev,
"%s: warning '%s' virtqueue not empty, "
"leaking %d entries\n", __func__, vq->vq_name,
vq->vq_nentries - vq->vq_free_cnt);
}
vq->vq_desc_head_idx = 0;
vq->vq_used_cons_idx = 0;
vq->vq_queued_cnt = 0;
vq->vq_free_cnt = vq->vq_nentries;
/* To be safe, reset all our allocated memory. */
bzero(vq->vq_ring_mem, vq->vq_ring_size);
for (i = 0; i < vq->vq_nentries; i++) {
dxp = &vq->vq_descx[i];
dxp->cookie = NULL;
dxp->ndescs = 0;
if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
virtqueue_init_indirect_list(vq, dxp->indirect);
}
vq_ring_init(vq);
virtqueue_disable_intr(vq);
return (0);
}
void
virtqueue_free(struct virtqueue *vq)
{
if (vq->vq_free_cnt != vq->vq_nentries) {
device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
"leaking %d entries\n", vq->vq_name,
vq->vq_nentries - vq->vq_free_cnt);
}
if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
virtqueue_free_indirect(vq);
if (vq->vq_ring_mem != NULL) {
contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
vq->vq_ring_size = 0;
vq->vq_ring_mem = NULL;
}
free(vq, M_DEVBUF);
}
vm_paddr_t
virtqueue_paddr(struct virtqueue *vq)
{
return (vtophys(vq->vq_ring_mem));
}
vm_paddr_t
virtqueue_desc_paddr(struct virtqueue *vq)
{
return (vtophys(vq->vq_ring.desc));
}
vm_paddr_t
virtqueue_avail_paddr(struct virtqueue *vq)
{
return (vtophys(vq->vq_ring.avail));
}
vm_paddr_t
virtqueue_used_paddr(struct virtqueue *vq)
{
return (vtophys(vq->vq_ring.used));
}
uint16_t
virtqueue_index(struct virtqueue *vq)
{
return (vq->vq_queue_index);
}
int
virtqueue_size(struct virtqueue *vq)
{
return (vq->vq_nentries);
}
int
virtqueue_nfree(struct virtqueue *vq)
{
return (vq->vq_free_cnt);
}
-int
+bool
virtqueue_empty(struct virtqueue *vq)
{
return (vq->vq_nentries == vq->vq_free_cnt);
}
-int
+bool
virtqueue_full(struct virtqueue *vq)
{
return (vq->vq_free_cnt == 0);
}
void
virtqueue_notify(struct virtqueue *vq)
{
/* Ensure updated avail->idx is visible to host. */
mb();
if (vq_ring_must_notify_host(vq))
vq_ring_notify_host(vq);
vq->vq_queued_cnt = 0;
}
int
virtqueue_nused(struct virtqueue *vq)
{
uint16_t used_idx, nused;
used_idx = vq_htog16(vq, vq->vq_ring.used->idx);
nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
return (nused);
}
int
virtqueue_intr_filter(struct virtqueue *vq)
{
if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
return (0);
virtqueue_disable_intr(vq);
return (1);
}
void
virtqueue_intr(struct virtqueue *vq)
{
vq->vq_intrhand(vq->vq_intrhand_arg);
}
int
virtqueue_enable_intr(struct virtqueue *vq)
{
return (vq_ring_enable_interrupt(vq, 0));
}
int
virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
{
uint16_t ndesc, avail_idx;
avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
switch (hint) {
case VQ_POSTPONE_SHORT:
ndesc = ndesc / 4;
break;
case VQ_POSTPONE_LONG:
ndesc = (ndesc * 3) / 4;
break;
case VQ_POSTPONE_EMPTIED:
break;
}
return (vq_ring_enable_interrupt(vq, ndesc));
}
/*
* Note this is only considered a hint to the host.
*/
void
virtqueue_disable_intr(struct virtqueue *vq)
{
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
vring_used_event(&vq->vq_ring) = vq_gtoh16(vq,
vq->vq_used_cons_idx - vq->vq_nentries - 1);
return;
}
vq->vq_ring.avail->flags |= vq_gtoh16(vq, VRING_AVAIL_F_NO_INTERRUPT);
}
int
virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
int readable, int writable)
{
struct vq_desc_extra *dxp;
int needed;
uint16_t head_idx, idx;
needed = readable + writable;
VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
VQASSERT(vq, needed == sg->sg_nseg,
"segment count mismatch, %d, %d", needed, sg->sg_nseg);
VQASSERT(vq,
needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
"too many segments to enqueue: %d, %d/%d", needed,
vq->vq_nentries, vq->vq_max_indirect_size);
if (needed < 1)
return (EINVAL);
if (vq->vq_free_cnt == 0)
return (ENOSPC);
if (vq_ring_use_indirect(vq, needed)) {
vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
return (0);
} else if (vq->vq_free_cnt < needed)
return (EMSGSIZE);
head_idx = vq->vq_desc_head_idx;
VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
dxp = &vq->vq_descx[head_idx];
VQASSERT(vq, dxp->cookie == NULL,
"cookie already exists for index %d", head_idx);
dxp->cookie = cookie;
dxp->ndescs = needed;
idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
sg, readable, writable);
vq->vq_desc_head_idx = idx;
vq->vq_free_cnt -= needed;
if (vq->vq_free_cnt == 0)
VQ_RING_ASSERT_CHAIN_TERM(vq);
else
VQ_RING_ASSERT_VALID_IDX(vq, idx);
vq_ring_update_avail(vq, head_idx);
return (0);
}
void *
virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
{
struct vring_used_elem *uep;
void *cookie;
uint16_t used_idx, desc_idx;
if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
return (NULL);
used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
uep = &vq->vq_ring.used->ring[used_idx];
rmb();
desc_idx = (uint16_t) vq_htog32(vq, uep->id);
if (len != NULL)
*len = vq_htog32(vq, uep->len);
vq_ring_free_chain(vq, desc_idx);
cookie = vq->vq_descx[desc_idx].cookie;
VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
vq->vq_descx[desc_idx].cookie = NULL;
return (cookie);
}
void *
virtqueue_poll(struct virtqueue *vq, uint32_t *len)
{
void *cookie;
VIRTIO_BUS_POLL(vq->vq_dev);
while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
cpu_spinwait();
VIRTIO_BUS_POLL(vq->vq_dev);
}
return (cookie);
}
void *
virtqueue_drain(struct virtqueue *vq, int *last)
{
void *cookie;
int idx;
cookie = NULL;
idx = *last;
while (idx < vq->vq_nentries && cookie == NULL) {
if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
vq->vq_descx[idx].cookie = NULL;
/* Free chain to keep free count consistent. */
vq_ring_free_chain(vq, idx);
}
idx++;
}
*last = idx;
return (cookie);
}
void
virtqueue_dump(struct virtqueue *vq)
{
if (vq == NULL)
return;
printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
"desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
"used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, virtqueue_nused(vq),
vq->vq_queued_cnt, vq->vq_desc_head_idx,
vq_htog16(vq, vq->vq_ring.avail->idx), vq->vq_used_cons_idx,
vq_htog16(vq, vq->vq_ring.used->idx),
vq_htog16(vq, vring_used_event(&vq->vq_ring)),
vq_htog16(vq, vq->vq_ring.avail->flags),
vq_htog16(vq, vq->vq_ring.used->flags));
}
static void
vq_ring_init(struct virtqueue *vq)
{
struct vring *vr;
char *ring_mem;
int i, size;
ring_mem = vq->vq_ring_mem;
size = vq->vq_nentries;
vr = &vq->vq_ring;
vring_init(vr, size, ring_mem, vq->vq_alignment);
for (i = 0; i < size - 1; i++)
vr->desc[i].next = vq_gtoh16(vq, i + 1);
vr->desc[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
}
static void
vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx, avail_ring_idx;
/*
* Place the head of the descriptor chain into the next slot and make
* it usable to the host. The chain is made available now rather than
* deferring to virtqueue_notify() in the hopes that if the host is
* currently running on another CPU, we can keep it processing the new
* descriptor.
*/
avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
avail_ring_idx = avail_idx & (vq->vq_nentries - 1);
vq->vq_ring.avail->ring[avail_ring_idx] = vq_gtoh16(vq, desc_idx);
wmb();
vq->vq_ring.avail->idx = vq_gtoh16(vq, avail_idx + 1);
/* Keep pending count until virtqueue_notify(). */
vq->vq_queued_cnt++;
}
static uint16_t
vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
uint16_t head_idx, struct sglist *sg, int readable, int writable)
{
struct sglist_seg *seg;
struct vring_desc *dp;
int i, needed;
uint16_t idx;
SDT_PROBE6(virtqueue, , enqueue_segments, entry, vq, desc, head_idx,
sg, readable, writable);
needed = readable + writable;
for (i = 0, idx = head_idx, seg = sg->sg_segs;
i < needed;
i++, idx = vq_htog16(vq, dp->next), seg++) {
VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
"premature end of free desc chain");
dp = &desc[idx];
dp->addr = vq_gtoh64(vq, seg->ss_paddr);
dp->len = vq_gtoh32(vq, seg->ss_len);
dp->flags = 0;
if (i < needed - 1)
dp->flags |= vq_gtoh16(vq, VRING_DESC_F_NEXT);
if (i >= readable)
dp->flags |= vq_gtoh16(vq, VRING_DESC_F_WRITE);
}
SDT_PROBE1(virtqueue, , enqueue_segments, return, idx);
return (idx);
}
-static int
+static bool
vq_ring_use_indirect(struct virtqueue *vq, int needed)
{
if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
- return (0);
+ return (false);
if (vq->vq_max_indirect_size < needed)
- return (0);
+ return (false);
if (needed < 2)
- return (0);
+ return (false);
- return (1);
+ return (true);
}
static void
vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
struct sglist *sg, int readable, int writable)
{
struct vring_desc *dp;
struct vq_desc_extra *dxp;
int needed;
uint16_t head_idx;
needed = readable + writable;
VQASSERT(vq, needed <= vq->vq_max_indirect_size,
"enqueuing too many indirect descriptors");
head_idx = vq->vq_desc_head_idx;
VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
dp = &vq->vq_ring.desc[head_idx];
dxp = &vq->vq_descx[head_idx];
VQASSERT(vq, dxp->cookie == NULL,
"cookie already exists for index %d", head_idx);
dxp->cookie = cookie;
dxp->ndescs = 1;
dp->addr = vq_gtoh64(vq, dxp->indirect_paddr);
dp->len = vq_gtoh32(vq, needed * sizeof(struct vring_desc));
dp->flags = vq_gtoh16(vq, VRING_DESC_F_INDIRECT);
vq_ring_enqueue_segments(vq, dxp->indirect, 0,
sg, readable, writable);
vq->vq_desc_head_idx = vq_htog16(vq, dp->next);
vq->vq_free_cnt--;
if (vq->vq_free_cnt == 0)
VQ_RING_ASSERT_CHAIN_TERM(vq);
else
VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
vq_ring_update_avail(vq, head_idx);
}
static int
vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
{
/*
* Enable interrupts, making sure we get the latest index of
* what's already been consumed.
*/
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
vring_used_event(&vq->vq_ring) =
vq_gtoh16(vq, vq->vq_used_cons_idx + ndesc);
} else {
vq->vq_ring.avail->flags &=
vq_gtoh16(vq, ~VRING_AVAIL_F_NO_INTERRUPT);
}
mb();
/*
* Enough items may have already been consumed to meet our threshold
* since we last checked. Let our caller know so it processes the new
* entries.
*/
if (virtqueue_nused(vq) > ndesc)
return (1);
return (0);
}
static int
vq_ring_must_notify_host(struct virtqueue *vq)
{
uint16_t new_idx, prev_idx, event_idx, flags;
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
new_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
prev_idx = new_idx - vq->vq_queued_cnt;
event_idx = vq_htog16(vq, vring_avail_event(&vq->vq_ring));
return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
}
flags = vq->vq_ring.used->flags;
return ((flags & vq_gtoh16(vq, VRING_USED_F_NO_NOTIFY)) == 0);
}
static void
vq_ring_notify_host(struct virtqueue *vq)
{
VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index,
vq->vq_notify_offset);
}
static void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp;
struct vq_desc_extra *dxp;
VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
dp = &vq->vq_ring.desc[desc_idx];
dxp = &vq->vq_descx[desc_idx];
if (vq->vq_free_cnt == 0)
VQ_RING_ASSERT_CHAIN_TERM(vq);
vq->vq_free_cnt += dxp->ndescs;
dxp->ndescs--;
if ((dp->flags & vq_gtoh16(vq, VRING_DESC_F_INDIRECT)) == 0) {
while (dp->flags & vq_gtoh16(vq, VRING_DESC_F_NEXT)) {
uint16_t next_idx = vq_htog16(vq, dp->next);
VQ_RING_ASSERT_VALID_IDX(vq, next_idx);
dp = &vq->vq_ring.desc[next_idx];
dxp->ndescs--;
}
}
VQASSERT(vq, dxp->ndescs == 0,
"failed to free entire desc chain, remaining: %d", dxp->ndescs);
/*
* We must append the existing free chain, if any, to the end of
* newly freed chain. If the virtqueue was completely used, then
* head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
*/
dp->next = vq_gtoh16(vq, vq->vq_desc_head_idx);
vq->vq_desc_head_idx = desc_idx;
}
diff --git a/sys/dev/virtio/virtqueue.h b/sys/dev/virtio/virtqueue.h
index a98dc4728259..b5c60b0ce951 100644
--- a/sys/dev/virtio/virtqueue.h
+++ b/sys/dev/virtio/virtqueue.h
@@ -1,101 +1,101 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2011, Bryan Venteicher
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _VIRTIO_VIRTQUEUE_H
#define _VIRTIO_VIRTQUEUE_H
struct virtqueue;
struct sglist;
/* Device callback for a virtqueue interrupt. */
typedef void virtqueue_intr_t(void *);
/*
* Hint on how long the next interrupt should be postponed. This is
* only used when the EVENT_IDX feature is negotiated.
*/
typedef enum {
VQ_POSTPONE_SHORT,
VQ_POSTPONE_LONG,
VQ_POSTPONE_EMPTIED /* Until all available desc are used. */
} vq_postpone_t;
#define VIRTQUEUE_MAX_NAME_SZ 32
/* One for each virtqueue the device wishes to allocate. */
struct vq_alloc_info {
char vqai_name[VIRTQUEUE_MAX_NAME_SZ];
int vqai_maxindirsz;
virtqueue_intr_t *vqai_intr;
void *vqai_intr_arg;
struct virtqueue **vqai_vq;
};
#define VQ_ALLOC_INFO_INIT(_i,_nsegs,_intr,_arg,_vqp,_str,...) do { \
snprintf((_i)->vqai_name, VIRTQUEUE_MAX_NAME_SZ, _str, \
##__VA_ARGS__); \
(_i)->vqai_maxindirsz = (_nsegs); \
(_i)->vqai_intr = (_intr); \
(_i)->vqai_intr_arg = (_arg); \
(_i)->vqai_vq = (_vqp); \
} while (0)
int virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
bus_size_t notify_offset, int align, vm_paddr_t highaddr,
struct vq_alloc_info *info, struct virtqueue **vqp);
void *virtqueue_drain(struct virtqueue *vq, int *last);
void virtqueue_free(struct virtqueue *vq);
int virtqueue_reinit(struct virtqueue *vq, uint16_t size);
int virtqueue_intr_filter(struct virtqueue *vq);
void virtqueue_intr(struct virtqueue *vq);
int virtqueue_enable_intr(struct virtqueue *vq);
int virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint);
void virtqueue_disable_intr(struct virtqueue *vq);
/* Get physical address of the virtqueue ring. */
vm_paddr_t virtqueue_paddr(struct virtqueue *vq);
vm_paddr_t virtqueue_desc_paddr(struct virtqueue *vq);
vm_paddr_t virtqueue_avail_paddr(struct virtqueue *vq);
vm_paddr_t virtqueue_used_paddr(struct virtqueue *vq);
uint16_t virtqueue_index(struct virtqueue *vq);
-int virtqueue_full(struct virtqueue *vq);
-int virtqueue_empty(struct virtqueue *vq);
+bool virtqueue_full(struct virtqueue *vq);
+bool virtqueue_empty(struct virtqueue *vq);
int virtqueue_size(struct virtqueue *vq);
int virtqueue_nfree(struct virtqueue *vq);
int virtqueue_nused(struct virtqueue *vq);
void virtqueue_notify(struct virtqueue *vq);
void virtqueue_dump(struct virtqueue *vq);
int virtqueue_enqueue(struct virtqueue *vq, void *cookie,
struct sglist *sg, int readable, int writable);
void *virtqueue_dequeue(struct virtqueue *vq, uint32_t *len);
void *virtqueue_poll(struct virtqueue *vq, uint32_t *len);
#endif /* _VIRTIO_VIRTQUEUE_H */