Index: head/sys/dev/beri/virtio/virtio_mmio_platform.c
===================================================================
--- head/sys/dev/beri/virtio/virtio_mmio_platform.c (revision 275727)
+++ head/sys/dev/beri/virtio/virtio_mmio_platform.c (revision 275728)
@@ -1,256 +1,266 @@
/*-
* Copyright (c) 2014 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* BERI interface for Virtio MMIO bus.
*
* This driver provides interrupt-engine for software-implemented
* Virtio MMIO backend.
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "virtio_mmio_if.h"
#include "pio_if.h"
static void platform_intr(void *arg);
struct virtio_mmio_platform_softc {
struct resource *res[1];
bus_space_tag_t bst;
bus_space_handle_t bsh;
device_t dev;
void (*intr_handler)(void *);
void *ih_user;
device_t pio_recv;
device_t pio_send;
};
static int
setup_pio(struct virtio_mmio_platform_softc *sc, char *name, device_t *dev)
{
phandle_t pio_node;
struct fdt_ic *ic;
phandle_t xref;
phandle_t node;
if ((node = ofw_bus_get_node(sc->dev)) == -1)
return (ENXIO);
if (OF_searchencprop(node, name, &xref,
sizeof(xref)) == -1) {
return (ENXIO);
}
pio_node = OF_node_from_xref(xref);
SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) {
if (ic->iph == pio_node) {
*dev = ic->dev;
PIO_CONFIGURE(*dev, PIO_OUT_ALL,
PIO_UNMASK_ALL);
return (0);
}
}
return (ENXIO);
}
static int
virtio_mmio_platform_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "beri,virtio_mmio_platform"))
return (ENXIO);
device_set_desc(dev, "Virtio MMIO platform");
return (BUS_PROBE_DEFAULT);
}
static int
virtio_mmio_platform_attach(device_t dev)
{
struct virtio_mmio_platform_softc *sc;
struct fdt_ic *fic;
phandle_t node;
sc = device_get_softc(dev);
sc->dev = dev;
if (setup_pio(sc, "pio-send", &sc->pio_send) != 0)
return (ENXIO);
if (setup_pio(sc, "pio-recv", &sc->pio_recv) != 0)
return (ENXIO);
if ((node = ofw_bus_get_node(sc->dev)) == -1)
return (ENXIO);
fic = malloc(sizeof(*fic), M_DEVBUF, M_WAITOK|M_ZERO);
fic->iph = node;
fic->dev = dev;
SLIST_INSERT_HEAD(&fdt_ic_list_head, fic, fdt_ics);
return (0);
}
static int
platform_note(device_t dev, size_t offset, int val)
{
struct virtio_mmio_platform_softc *sc;
int note;
int i;
sc = device_get_softc(dev);
switch (offset) {
case (VIRTIO_MMIO_QUEUE_NOTIFY):
if (val == 0)
note = Q_NOTIFY;
else if (val == 1)
note = Q_NOTIFY1;
break;
case (VIRTIO_MMIO_QUEUE_PFN):
note = Q_PFN;
break;
case (VIRTIO_MMIO_QUEUE_SEL):
note = Q_SEL;
break;
default:
note = 0;
}
if (note) {
mips_dcache_wbinv_all();
PIO_SET(sc->pio_send, note, 1);
/*
* Wait until host ack the request.
* Usually done within few cycles.
* TODO: bad
*/
for (i = 100; i > 0; i--) {
if (PIO_READ(sc->pio_send) == 0)
break;
}
if (i == 0)
device_printf(sc->dev, "Warning: host busy\n");
}
return (0);
}
static void
platform_intr(void *arg)
{
struct virtio_mmio_platform_softc *sc;
int reg;
sc = arg;
/* Read pending */
reg = PIO_READ(sc->pio_recv);
/* Ack */
PIO_SET(sc->pio_recv, reg, 0);
/* Writeback, invalidate cache */
mips_dcache_wbinv_all();
if (sc->intr_handler != NULL)
sc->intr_handler(sc->ih_user);
}
static int
platform_setup_intr(device_t dev, device_t mmio_dev,
void *intr_handler, void *ih_user)
{
struct virtio_mmio_platform_softc *sc;
sc = device_get_softc(dev);
sc->intr_handler = intr_handler;
sc->ih_user = ih_user;
PIO_SETUP_IRQ(sc->pio_recv, platform_intr, sc);
return (0);
}
+static int
+platform_poll(device_t dev)
+{
+
+ mips_dcache_wbinv_all();
+
+ return (0);
+}
+
static device_method_t virtio_mmio_platform_methods[] = {
DEVMETHOD(device_probe, virtio_mmio_platform_probe),
DEVMETHOD(device_attach, virtio_mmio_platform_attach),
/* virtio_mmio_if.h */
DEVMETHOD(virtio_mmio_note, platform_note),
+ DEVMETHOD(virtio_mmio_poll, platform_poll),
DEVMETHOD(virtio_mmio_setup_intr, platform_setup_intr),
DEVMETHOD_END
};
static driver_t virtio_mmio_platform_driver = {
"virtio_mmio_platform",
virtio_mmio_platform_methods,
sizeof(struct virtio_mmio_platform_softc),
};
static devclass_t virtio_mmio_platform_devclass;
DRIVER_MODULE(virtio_mmio_platform, simplebus, virtio_mmio_platform_driver,
virtio_mmio_platform_devclass, 0, 0);
Index: head/sys/dev/virtio/mmio/virtio_mmio.c
===================================================================
--- head/sys/dev/virtio/mmio/virtio_mmio.c (revision 275727)
+++ head/sys/dev/virtio/mmio/virtio_mmio.c (revision 275728)
@@ -1,840 +1,853 @@
/*-
* Copyright (c) 2014 Ruslan Bukin
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Portions of this software were developed by Andrew Turner
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* VirtIO MMIO interface.
* This driver is heavily based on VirtIO PCI interface driver.
*/
/*
* FDT example:
* virtio_block@1000 {
* compatible = "virtio,mmio";
* reg = <0x1000 0x100>;
* interrupts = <63>;
* interrupt-parent = <&GIC>;
* };
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "virtio_mmio_if.h"
#include "virtio_bus_if.h"
#include "virtio_if.h"
#define PAGE_SHIFT 12
struct vtmmio_virtqueue {
struct virtqueue *vtv_vq;
int vtv_no_intr;
};
struct vtmmio_softc {
device_t dev;
device_t platform;
struct resource *res[2];
uint64_t vtmmio_features;
uint32_t vtmmio_flags;
/* This "bus" will only ever have one child. */
device_t vtmmio_child_dev;
struct virtio_feature_desc *vtmmio_child_feat_desc;
int vtmmio_nvqs;
struct vtmmio_virtqueue *vtmmio_vqs;
void *ih;
};
static int vtmmio_probe(device_t);
static int vtmmio_attach(device_t);
static int vtmmio_detach(device_t);
static int vtmmio_suspend(device_t);
static int vtmmio_resume(device_t);
static int vtmmio_shutdown(device_t);
static void vtmmio_driver_added(device_t, driver_t *);
static void vtmmio_child_detached(device_t, device_t);
static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *);
static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t);
static uint64_t vtmmio_negotiate_features(device_t, uint64_t);
static int vtmmio_with_feature(device_t, uint64_t);
static int vtmmio_alloc_virtqueues(device_t, int, int,
struct vq_alloc_info *);
static int vtmmio_setup_intr(device_t, enum intr_type);
static void vtmmio_stop(device_t);
+static void vtmmio_poll(device_t);
static int vtmmio_reinit(device_t, uint64_t);
static void vtmmio_reinit_complete(device_t);
static void vtmmio_notify_virtqueue(device_t, uint16_t);
static uint8_t vtmmio_get_status(device_t);
static void vtmmio_set_status(device_t, uint8_t);
static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int);
static void vtmmio_write_dev_config(device_t, bus_size_t, void *, int);
static void vtmmio_describe_features(struct vtmmio_softc *, const char *,
uint64_t);
static void vtmmio_probe_and_attach_child(struct vtmmio_softc *);
static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int);
static void vtmmio_free_interrupts(struct vtmmio_softc *);
static void vtmmio_free_virtqueues(struct vtmmio_softc *);
static void vtmmio_release_child_resources(struct vtmmio_softc *);
static void vtmmio_reset(struct vtmmio_softc *);
static void vtmmio_select_virtqueue(struct vtmmio_softc *, int);
static void vtmmio_vq_intr(void *);
/*
* I/O port read/write wrappers.
*/
#define vtmmio_write_config_1(sc, o, v) \
do { \
bus_write_1((sc)->res[0], (o), (v)); \
if (sc->platform != NULL) \
VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
} while (0)
#define vtmmio_write_config_2(sc, o, v) \
do { \
bus_write_2((sc)->res[0], (o), (v)); \
if (sc->platform != NULL) \
VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
} while (0)
#define vtmmio_write_config_4(sc, o, v) \
do { \
bus_write_4((sc)->res[0], (o), (v)); \
if (sc->platform != NULL) \
VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
} while (0)
#define vtmmio_read_config_1(sc, o) \
bus_read_1((sc)->res[0], (o))
#define vtmmio_read_config_2(sc, o) \
bus_read_2((sc)->res[0], (o))
#define vtmmio_read_config_4(sc, o) \
bus_read_4((sc)->res[0], (o))
static device_method_t vtmmio_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vtmmio_probe),
DEVMETHOD(device_attach, vtmmio_attach),
DEVMETHOD(device_detach, vtmmio_detach),
DEVMETHOD(device_suspend, vtmmio_suspend),
DEVMETHOD(device_resume, vtmmio_resume),
DEVMETHOD(device_shutdown, vtmmio_shutdown),
/* Bus interface. */
DEVMETHOD(bus_driver_added, vtmmio_driver_added),
DEVMETHOD(bus_child_detached, vtmmio_child_detached),
DEVMETHOD(bus_read_ivar, vtmmio_read_ivar),
DEVMETHOD(bus_write_ivar, vtmmio_write_ivar),
/* VirtIO bus interface. */
DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features),
DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature),
DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr),
DEVMETHOD(virtio_bus_stop, vtmmio_stop),
+ DEVMETHOD(virtio_bus_poll, vtmmio_poll),
DEVMETHOD(virtio_bus_reinit, vtmmio_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue),
DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config),
DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config),
DEVMETHOD_END
};
static driver_t vtmmio_driver = {
"virtio_mmio",
vtmmio_methods,
sizeof(struct vtmmio_softc)
};
devclass_t vtmmio_devclass;
DRIVER_MODULE(virtio_mmio, simplebus, vtmmio_driver, vtmmio_devclass, 0, 0);
MODULE_VERSION(virtio_mmio, 1);
MODULE_DEPEND(virtio_mmio, simplebus, 1, 1, 1);
MODULE_DEPEND(virtio_mmio, virtio, 1, 1, 1);
static int
vtmmio_setup_intr(device_t dev, enum intr_type type)
{
struct vtmmio_softc *sc;
int rid;
int err;
sc = device_get_softc(dev);
if (sc->platform != NULL) {
err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev,
vtmmio_vq_intr, sc);
if (err == 0) {
/* Okay we have backend-specific interrupts */
return (0);
}
}
rid = 0;
sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (!sc->res[1]) {
device_printf(dev, "Can't allocate interrupt\n");
return (ENXIO);
}
if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
NULL, vtmmio_vq_intr, sc, &sc->ih)) {
device_printf(dev, "Can't setup the interrupt\n");
return (ENXIO);
}
return (0);
}
static int
vtmmio_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "virtio,mmio"))
return (ENXIO);
device_set_desc(dev, "VirtIO MMIO adapter");
return (BUS_PROBE_DEFAULT);
}
static int
vtmmio_setup_platform(struct vtmmio_softc *sc)
{
phandle_t platform_node;
struct fdt_ic *ic;
phandle_t xref;
phandle_t node;
sc->platform = NULL;
if ((node = ofw_bus_get_node(sc->dev)) == -1)
return (ENXIO);
if (OF_searchencprop(node, "platform", &xref,
sizeof(xref)) == -1) {
return (ENXIO);
}
platform_node = OF_node_from_xref(xref);
SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) {
if (ic->iph == platform_node) {
sc->platform = ic->dev;
break;
}
}
if (sc->platform == NULL) {
/* No platform-specific device. Ignore it. */
}
return (0);
}
static int
vtmmio_attach(device_t dev)
{
struct vtmmio_softc *sc;
device_t child;
int rid;
sc = device_get_softc(dev);
sc->dev = dev;
vtmmio_setup_platform(sc);
rid = 0;
sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (!sc->res[0]) {
device_printf(dev, "Cannot allocate memory window.\n");
return (ENXIO);
}
vtmmio_reset(sc);
/* Tell the host we've noticed this device. */
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
if ((child = device_add_child(dev, NULL, -1)) == NULL) {
device_printf(dev, "Cannot create child device.\n");
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
vtmmio_detach(dev);
return (ENOMEM);
}
sc->vtmmio_child_dev = child;
vtmmio_probe_and_attach_child(sc);
return (0);
}
static int
vtmmio_detach(device_t dev)
{
struct vtmmio_softc *sc;
device_t child;
int error;
sc = device_get_softc(dev);
if ((child = sc->vtmmio_child_dev) != NULL) {
error = device_delete_child(dev, child);
if (error)
return (error);
sc->vtmmio_child_dev = NULL;
}
vtmmio_reset(sc);
if (sc->res[0] != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, 0,
sc->res[0]);
sc->res[0] = NULL;
}
return (0);
}
static int
vtmmio_suspend(device_t dev)
{
return (bus_generic_suspend(dev));
}
static int
vtmmio_resume(device_t dev)
{
return (bus_generic_resume(dev));
}
static int
vtmmio_shutdown(device_t dev)
{
(void) bus_generic_shutdown(dev);
/* Forcibly stop the host device. */
vtmmio_stop(dev);
return (0);
}
static void
vtmmio_driver_added(device_t dev, driver_t *driver)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
vtmmio_probe_and_attach_child(sc);
}
static void
vtmmio_child_detached(device_t dev, device_t child)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
vtmmio_reset(sc);
vtmmio_release_child_resources(sc);
}
static int
vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (sc->vtmmio_child_dev != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_DEVTYPE:
case VIRTIO_IVAR_SUBDEVICE:
*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID);
break;
case VIRTIO_IVAR_VENDOR:
*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
break;
default:
return (ENOENT);
}
return (0);
}
static int
vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (sc->vtmmio_child_dev != child)
return (ENOENT);
switch (index) {
case VIRTIO_IVAR_FEATURE_DESC:
sc->vtmmio_child_feat_desc = (void *) value;
break;
default:
return (ENOENT);
}
return (0);
}
static uint64_t
vtmmio_negotiate_features(device_t dev, uint64_t child_features)
{
struct vtmmio_softc *sc;
uint64_t host_features, features;
sc = device_get_softc(dev);
host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES);
vtmmio_describe_features(sc, "host", host_features);
/*
* Limit negotiated features to what the driver, virtqueue, and
* host all support.
*/
features = host_features & child_features;
features = virtqueue_filter_features(features);
sc->vtmmio_features = features;
vtmmio_describe_features(sc, "negotiated", features);
vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features);
return (features);
}
static int
vtmmio_with_feature(device_t dev, uint64_t feature)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
return ((sc->vtmmio_features & feature) != 0);
}
static int
vtmmio_alloc_virtqueues(device_t dev, int flags, int nvqs,
struct vq_alloc_info *vq_info)
{
struct vtmmio_virtqueue *vqx;
struct vq_alloc_info *info;
struct vtmmio_softc *sc;
struct virtqueue *vq;
uint32_t size;
int idx, error;
sc = device_get_softc(dev);
if (sc->vtmmio_nvqs != 0)
return (EALREADY);
if (nvqs <= 0)
return (EINVAL);
sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtmmio_vqs == NULL)
return (ENOMEM);
for (idx = 0; idx < nvqs; idx++) {
vqx = &sc->vtmmio_vqs[idx];
info = &vq_info[idx];
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx);
vtmmio_select_virtqueue(sc, idx);
size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
error = virtqueue_alloc(dev, idx, size,
VIRTIO_MMIO_VRING_ALIGN, 0xFFFFFFFFUL, info, &vq);
if (error) {
device_printf(dev,
"cannot allocate virtqueue %d: %d\n",
idx, error);
break;
}
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN,
VIRTIO_MMIO_VRING_ALIGN);
#if 0
device_printf(dev, "virtqueue paddr 0x%08lx\n",
(uint64_t)virtqueue_paddr(vq));
#endif
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
virtqueue_paddr(vq) >> PAGE_SHIFT);
vqx->vtv_vq = *info->vqai_vq = vq;
vqx->vtv_no_intr = info->vqai_intr == NULL;
sc->vtmmio_nvqs++;
}
if (error)
vtmmio_free_virtqueues(sc);
return (error);
}
static void
vtmmio_stop(device_t dev)
{
vtmmio_reset(device_get_softc(dev));
+}
+
+static void
+vtmmio_poll(device_t dev)
+{
+ struct vtmmio_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->platform != NULL)
+ VIRTIO_MMIO_POLL(sc->platform);
}
static int
vtmmio_reinit(device_t dev, uint64_t features)
{
struct vtmmio_softc *sc;
int idx, error;
sc = device_get_softc(dev);
if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
vtmmio_stop(dev);
/*
* Quickly drive the status through ACK and DRIVER. The device
* does not become usable again until vtmmio_reinit_complete().
*/
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
vtmmio_negotiate_features(dev, features);
for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
error = vtmmio_reinit_virtqueue(sc, idx);
if (error)
return (error);
}
return (0);
}
static void
vtmmio_reinit_complete(device_t dev)
{
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
}
static void
vtmmio_notify_virtqueue(device_t dev, uint16_t queue)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NOTIFY, queue);
}
static uint8_t
vtmmio_get_status(device_t dev)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS));
}
static void
vtmmio_set_status(device_t dev, uint8_t status)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (status != VIRTIO_CONFIG_STATUS_RESET)
status |= vtmmio_get_status(dev);
vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status);
}
static void
vtmmio_read_dev_config(device_t dev, bus_size_t offset,
void *dst, int length)
{
struct vtmmio_softc *sc;
bus_size_t off;
uint8_t *d;
int size;
sc = device_get_softc(dev);
off = VIRTIO_MMIO_CONFIG + offset;
for (d = dst; length > 0; d += size, off += size, length -= size) {
#ifdef ALLOW_WORD_ALIGNED_ACCESS
if (length >= 4) {
size = 4;
*(uint32_t *)d = vtmmio_read_config_4(sc, off);
} else if (length >= 2) {
size = 2;
*(uint16_t *)d = vtmmio_read_config_2(sc, off);
} else
#endif
{
size = 1;
*d = vtmmio_read_config_1(sc, off);
}
}
}
static void
vtmmio_write_dev_config(device_t dev, bus_size_t offset,
void *src, int length)
{
struct vtmmio_softc *sc;
bus_size_t off;
uint8_t *s;
int size;
sc = device_get_softc(dev);
off = VIRTIO_MMIO_CONFIG + offset;
for (s = src; length > 0; s += size, off += size, length -= size) {
#ifdef ALLOW_WORD_ALIGNED_ACCESS
if (length >= 4) {
size = 4;
vtmmio_write_config_4(sc, off, *(uint32_t *)s);
} else if (length >= 2) {
size = 2;
vtmmio_write_config_2(sc, off, *(uint16_t *)s);
} else
#endif
{
size = 1;
vtmmio_write_config_1(sc, off, *s);
}
}
}
static void
vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg,
uint64_t features)
{
device_t dev, child;
dev = sc->dev;
child = sc->vtmmio_child_dev;
if (device_is_attached(child) && bootverbose == 0)
return;
virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc);
}
static void
vtmmio_probe_and_attach_child(struct vtmmio_softc *sc)
{
device_t dev, child;
dev = sc->dev;
child = sc->vtmmio_child_dev;
if (child == NULL)
return;
if (device_get_state(child) != DS_NOTPRESENT) {
return;
}
if (device_probe(child) != 0) {
return;
}
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
if (device_attach(child) != 0) {
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
vtmmio_reset(sc);
vtmmio_release_child_resources(sc);
/* Reset status for future attempt. */
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
} else {
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
VIRTIO_ATTACH_COMPLETED(child);
}
}
static int
vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx)
{
struct vtmmio_virtqueue *vqx;
struct virtqueue *vq;
int error;
uint16_t size;
vqx = &sc->vtmmio_vqs[idx];
vq = vqx->vtv_vq;
KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
vtmmio_select_virtqueue(sc, idx);
size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
error = virtqueue_reinit(vq, size);
if (error)
return (error);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
virtqueue_paddr(vq) >> PAGE_SHIFT);
return (0);
}
static void
vtmmio_free_interrupts(struct vtmmio_softc *sc)
{
if (sc->ih != NULL)
bus_teardown_intr(sc->dev, sc->res[1], sc->ih);
if (sc->res[1] != NULL)
bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]);
}
static void
vtmmio_free_virtqueues(struct vtmmio_softc *sc)
{
struct vtmmio_virtqueue *vqx;
int idx;
for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
vqx = &sc->vtmmio_vqs[idx];
vtmmio_select_virtqueue(sc, idx);
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0);
virtqueue_free(vqx->vtv_vq);
vqx->vtv_vq = NULL;
}
free(sc->vtmmio_vqs, M_DEVBUF);
sc->vtmmio_vqs = NULL;
sc->vtmmio_nvqs = 0;
}
static void
vtmmio_release_child_resources(struct vtmmio_softc *sc)
{
vtmmio_free_interrupts(sc);
vtmmio_free_virtqueues(sc);
}
static void
vtmmio_reset(struct vtmmio_softc *sc)
{
/*
* Setting the status to RESET sets the host device to
* the original, uninitialized state.
*/
vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET);
}
static void
vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx)
{
vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx);
}
static void
vtmmio_vq_intr(void *arg)
{
struct vtmmio_virtqueue *vqx;
struct vtmmio_softc *sc;
struct virtqueue *vq;
uint32_t status;
int idx;
sc = arg;
status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS);
vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status);
/* The config changed */
if (status & VIRTIO_MMIO_INT_CONFIG)
if (sc->vtmmio_child_dev != NULL)
VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev);
/* Notify all virtqueues. */
if (status & VIRTIO_MMIO_INT_VRING) {
for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
vqx = &sc->vtmmio_vqs[idx];
if (vqx->vtv_no_intr == 0) {
vq = vqx->vtv_vq;
virtqueue_intr(vq);
}
}
}
}
Index: head/sys/dev/virtio/mmio/virtio_mmio_if.m
===================================================================
--- head/sys/dev/virtio/mmio/virtio_mmio_if.m (revision 275727)
+++ head/sys/dev/virtio/mmio/virtio_mmio_if.m (revision 275728)
@@ -1,77 +1,84 @@
#-
# Copyright (c) 2014 Ruslan Bukin
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
#include
#
# This is optional interface to virtio mmio backend.
# Useful when backend is implemented not by the hardware but software, e.g.
# by using another cpu core.
#
INTERFACE virtio_mmio;
CODE {
static int
virtio_mmio_note(device_t dev, size_t offset, int val)
{
return (1);
}
static int
virtio_mmio_setup_intr(device_t dev, device_t mmio_dev,
void *handler, void *ih_user)
{
return (1);
}
};
#
# Inform backend we have data wrotten to offset.
#
METHOD int note {
device_t dev;
size_t offset;
int val;
} DEFAULT virtio_mmio_note;
#
+# Inform backend we are going to poll virtqueue.
+#
+METHOD int poll {
+ device_t dev;
+};
+
+#
# Setup backend-specific interrupts.
#
METHOD int setup_intr {
device_t dev;
device_t mmio_dev;
void *handler;
void *ih_user;
} DEFAULT virtio_mmio_setup_intr;
Index: head/sys/dev/virtio/virtio_bus_if.m
===================================================================
--- head/sys/dev/virtio/virtio_bus_if.m (revision 275727)
+++ head/sys/dev/virtio/virtio_bus_if.m (revision 275728)
@@ -1,89 +1,94 @@
#-
# Copyright (c) 2011, Bryan Venteicher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#include
#include
INTERFACE virtio_bus;
HEADER {
struct vq_alloc_info;
};
METHOD uint64_t negotiate_features {
device_t dev;
uint64_t child_features;
};
METHOD int with_feature {
device_t dev;
uint64_t feature;
};
METHOD int alloc_virtqueues {
device_t dev;
int flags;
int nvqs;
struct vq_alloc_info *info;
};
METHOD int setup_intr {
device_t dev;
enum intr_type type;
};
METHOD void stop {
device_t dev;
};
METHOD int reinit {
device_t dev;
uint64_t features;
};
METHOD void reinit_complete {
device_t dev;
};
METHOD void notify_vq {
device_t dev;
uint16_t queue;
};
METHOD void read_device_config {
device_t dev;
bus_size_t offset;
void *dst;
int len;
};
METHOD void write_device_config {
device_t dev;
bus_size_t offset;
void *src;
int len;
};
+
+METHOD void poll {
+ device_t dev;
+};
+
Index: head/sys/dev/virtio/virtqueue.c
===================================================================
--- head/sys/dev/virtio/virtqueue.c (revision 275727)
+++ head/sys/dev/virtio/virtqueue.c (revision 275728)
@@ -1,828 +1,831 @@
/*-
* Copyright (c) 2011, Bryan Venteicher
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Implements the virtqueue interface as basically described
* in the original VirtIO paper.
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "virtio_bus_if.h"
struct virtqueue {
device_t vq_dev;
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
uint16_t vq_queue_index;
uint16_t vq_nentries;
uint32_t vq_flags;
#define VIRTQUEUE_FLAG_INDIRECT 0x0001
#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
int vq_alignment;
int vq_ring_size;
void *vq_ring_mem;
int vq_max_indirect_size;
int vq_indirect_mem_size;
virtqueue_intr_t *vq_intrhand;
void *vq_intrhand_arg;
struct vring vq_ring;
uint16_t vq_free_cnt;
uint16_t vq_queued_cnt;
/*
* Head of the free chain in the descriptor table. If
* there are no free descriptors, this will be set to
* VQ_RING_DESC_CHAIN_END.
*/
uint16_t vq_desc_head_idx;
/*
* Last consumed descriptor in the used table,
* trails vq_ring.used->idx.
*/
uint16_t vq_used_cons_idx;
struct vq_desc_extra {
void *cookie;
struct vring_desc *indirect;
vm_paddr_t indirect_paddr;
uint16_t ndescs;
} vq_descx[0];
};
/*
* The maximum virtqueue size is 2^15. Use that value as the end of
* descriptor chain terminator since it will never be a valid index
* in the descriptor table. This is used to verify we are correctly
* handling vq_free_cnt.
*/
#define VQ_RING_DESC_CHAIN_END 32768
#define VQASSERT(_vq, _exp, _msg, ...) \
KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
##__VA_ARGS__))
#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
"invalid ring index: %d, max: %d", (_idx), \
(_vq)->vq_nentries)
#define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
VQ_RING_DESC_CHAIN_END, "full ring terminated " \
"incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
static int virtqueue_init_indirect(struct virtqueue *vq, int);
static void virtqueue_free_indirect(struct virtqueue *vq);
static void virtqueue_init_indirect_list(struct virtqueue *,
struct vring_desc *);
static void vq_ring_init(struct virtqueue *);
static void vq_ring_update_avail(struct virtqueue *, uint16_t);
static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
struct vring_desc *, uint16_t, struct sglist *, int, int);
static int vq_ring_use_indirect(struct virtqueue *, int);
static void vq_ring_enqueue_indirect(struct virtqueue *, void *,
struct sglist *, int, int);
static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
static int vq_ring_must_notify_host(struct virtqueue *);
static void vq_ring_notify_host(struct virtqueue *);
static void vq_ring_free_chain(struct virtqueue *, uint16_t);
uint64_t
virtqueue_filter_features(uint64_t features)
{
uint64_t mask;
mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
mask |= VIRTIO_RING_F_INDIRECT_DESC;
mask |= VIRTIO_RING_F_EVENT_IDX;
return (features & mask);
}
int
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
{
struct virtqueue *vq;
int error;
*vqp = NULL;
error = 0;
if (size == 0) {
device_printf(dev,
"virtqueue %d (%s) does not exist (size is zero)\n",
queue, info->vqai_name);
return (ENODEV);
} else if (!powerof2(size)) {
device_printf(dev,
"virtqueue %d (%s) size is not a power of 2: %d\n",
queue, info->vqai_name, size);
return (ENXIO);
} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
device_printf(dev, "virtqueue %d (%s) requested too many "
"indirect descriptors: %d, max %d\n",
queue, info->vqai_name, info->vqai_maxindirsz,
VIRTIO_MAX_INDIRECT);
return (EINVAL);
}
vq = malloc(sizeof(struct virtqueue) +
size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
if (vq == NULL) {
device_printf(dev, "cannot allocate virtqueue\n");
return (ENOMEM);
}
vq->vq_dev = dev;
strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
vq->vq_queue_index = queue;
vq->vq_alignment = align;
vq->vq_nentries = size;
vq->vq_free_cnt = size;
vq->vq_intrhand = info->vqai_intr;
vq->vq_intrhand_arg = info->vqai_intr_arg;
if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
if (info->vqai_maxindirsz > 1) {
error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
if (error)
goto fail;
}
vq->vq_ring_size = round_page(vring_size(size, align));
vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
if (vq->vq_ring_mem == NULL) {
device_printf(dev,
"cannot allocate memory for virtqueue ring\n");
error = ENOMEM;
goto fail;
}
vq_ring_init(vq);
virtqueue_disable_intr(vq);
*vqp = vq;
fail:
if (error)
virtqueue_free(vq);
return (error);
}
static int
virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
{
device_t dev;
struct vq_desc_extra *dxp;
int i, size;
dev = vq->vq_dev;
if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
/*
* Indirect descriptors requested by the driver but not
* negotiated. Return zero to keep the initialization
* going: we'll run fine without.
*/
if (bootverbose)
device_printf(dev, "virtqueue %d (%s) requested "
"indirect descriptors but not negotiated\n",
vq->vq_queue_index, vq->vq_name);
return (0);
}
size = indirect_size * sizeof(struct vring_desc);
vq->vq_max_indirect_size = indirect_size;
vq->vq_indirect_mem_size = size;
vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
for (i = 0; i < vq->vq_nentries; i++) {
dxp = &vq->vq_descx[i];
dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
if (dxp->indirect == NULL) {
device_printf(dev, "cannot allocate indirect list\n");
return (ENOMEM);
}
dxp->indirect_paddr = vtophys(dxp->indirect);
virtqueue_init_indirect_list(vq, dxp->indirect);
}
return (0);
}
static void
virtqueue_free_indirect(struct virtqueue *vq)
{
struct vq_desc_extra *dxp;
int i;
for (i = 0; i < vq->vq_nentries; i++) {
dxp = &vq->vq_descx[i];
if (dxp->indirect == NULL)
break;
free(dxp->indirect, M_DEVBUF);
dxp->indirect = NULL;
dxp->indirect_paddr = 0;
}
vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
vq->vq_indirect_mem_size = 0;
}
static void
virtqueue_init_indirect_list(struct virtqueue *vq,
struct vring_desc *indirect)
{
int i;
bzero(indirect, vq->vq_indirect_mem_size);
for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
indirect[i].next = i + 1;
indirect[i].next = VQ_RING_DESC_CHAIN_END;
}
int
virtqueue_reinit(struct virtqueue *vq, uint16_t size)
{
struct vq_desc_extra *dxp;
int i;
if (vq->vq_nentries != size) {
device_printf(vq->vq_dev,
"%s: '%s' changed size; old=%hu, new=%hu\n",
__func__, vq->vq_name, vq->vq_nentries, size);
return (EINVAL);
}
/* Warn if the virtqueue was not properly cleaned up. */
if (vq->vq_free_cnt != vq->vq_nentries) {
device_printf(vq->vq_dev,
"%s: warning '%s' virtqueue not empty, "
"leaking %d entries\n", __func__, vq->vq_name,
vq->vq_nentries - vq->vq_free_cnt);
}
vq->vq_desc_head_idx = 0;
vq->vq_used_cons_idx = 0;
vq->vq_queued_cnt = 0;
vq->vq_free_cnt = vq->vq_nentries;
/* To be safe, reset all our allocated memory. */
bzero(vq->vq_ring_mem, vq->vq_ring_size);
for (i = 0; i < vq->vq_nentries; i++) {
dxp = &vq->vq_descx[i];
dxp->cookie = NULL;
dxp->ndescs = 0;
if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
virtqueue_init_indirect_list(vq, dxp->indirect);
}
vq_ring_init(vq);
virtqueue_disable_intr(vq);
return (0);
}
void
virtqueue_free(struct virtqueue *vq)
{
if (vq->vq_free_cnt != vq->vq_nentries) {
device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
"leaking %d entries\n", vq->vq_name,
vq->vq_nentries - vq->vq_free_cnt);
}
if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
virtqueue_free_indirect(vq);
if (vq->vq_ring_mem != NULL) {
contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
vq->vq_ring_size = 0;
vq->vq_ring_mem = NULL;
}
free(vq, M_DEVBUF);
}
vm_paddr_t
virtqueue_paddr(struct virtqueue *vq)
{
return (vtophys(vq->vq_ring_mem));
}
int
virtqueue_size(struct virtqueue *vq)
{
return (vq->vq_nentries);
}
int
virtqueue_nfree(struct virtqueue *vq)
{
return (vq->vq_free_cnt);
}
int
virtqueue_empty(struct virtqueue *vq)
{
return (vq->vq_nentries == vq->vq_free_cnt);
}
int
virtqueue_full(struct virtqueue *vq)
{
return (vq->vq_free_cnt == 0);
}
void
virtqueue_notify(struct virtqueue *vq)
{
/* Ensure updated avail->idx is visible to host. */
mb();
if (vq_ring_must_notify_host(vq))
vq_ring_notify_host(vq);
vq->vq_queued_cnt = 0;
}
int
virtqueue_nused(struct virtqueue *vq)
{
uint16_t used_idx, nused;
used_idx = vq->vq_ring.used->idx;
nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
return (nused);
}
int
virtqueue_intr_filter(struct virtqueue *vq)
{
if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
return (0);
virtqueue_disable_intr(vq);
return (1);
}
void
virtqueue_intr(struct virtqueue *vq)
{
vq->vq_intrhand(vq->vq_intrhand_arg);
}
int
virtqueue_enable_intr(struct virtqueue *vq)
{
return (vq_ring_enable_interrupt(vq, 0));
}
int
virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
{
uint16_t ndesc, avail_idx;
avail_idx = vq->vq_ring.avail->idx;
ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
switch (hint) {
case VQ_POSTPONE_SHORT:
ndesc = ndesc / 4;
break;
case VQ_POSTPONE_LONG:
ndesc = (ndesc * 3) / 4;
break;
case VQ_POSTPONE_EMPTIED:
break;
}
return (vq_ring_enable_interrupt(vq, ndesc));
}
/*
* Note this is only considered a hint to the host.
*/
void
virtqueue_disable_intr(struct virtqueue *vq)
{
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
vq->vq_nentries - 1;
} else
vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
int
virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
int readable, int writable)
{
struct vq_desc_extra *dxp;
int needed;
uint16_t head_idx, idx;
needed = readable + writable;
VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
VQASSERT(vq, needed == sg->sg_nseg,
"segment count mismatch, %d, %d", needed, sg->sg_nseg);
VQASSERT(vq,
needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
"too many segments to enqueue: %d, %d/%d", needed,
vq->vq_nentries, vq->vq_max_indirect_size);
if (needed < 1)
return (EINVAL);
if (vq->vq_free_cnt == 0)
return (ENOSPC);
if (vq_ring_use_indirect(vq, needed)) {
vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
return (0);
} else if (vq->vq_free_cnt < needed)
return (EMSGSIZE);
head_idx = vq->vq_desc_head_idx;
VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
dxp = &vq->vq_descx[head_idx];
VQASSERT(vq, dxp->cookie == NULL,
"cookie already exists for index %d", head_idx);
dxp->cookie = cookie;
dxp->ndescs = needed;
idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
sg, readable, writable);
vq->vq_desc_head_idx = idx;
vq->vq_free_cnt -= needed;
if (vq->vq_free_cnt == 0)
VQ_RING_ASSERT_CHAIN_TERM(vq);
else
VQ_RING_ASSERT_VALID_IDX(vq, idx);
vq_ring_update_avail(vq, head_idx);
return (0);
}
void *
virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
{
struct vring_used_elem *uep;
void *cookie;
uint16_t used_idx, desc_idx;
if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
return (NULL);
used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
uep = &vq->vq_ring.used->ring[used_idx];
rmb();
desc_idx = (uint16_t) uep->id;
if (len != NULL)
*len = uep->len;
vq_ring_free_chain(vq, desc_idx);
cookie = vq->vq_descx[desc_idx].cookie;
VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
vq->vq_descx[desc_idx].cookie = NULL;
return (cookie);
}
void *
virtqueue_poll(struct virtqueue *vq, uint32_t *len)
{
void *cookie;
- while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
+ VIRTIO_BUS_POLL(vq->vq_dev);
+ while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
cpu_spinwait();
+ VIRTIO_BUS_POLL(vq->vq_dev);
+ }
return (cookie);
}
void *
virtqueue_drain(struct virtqueue *vq, int *last)
{
void *cookie;
int idx;
cookie = NULL;
idx = *last;
while (idx < vq->vq_nentries && cookie == NULL) {
if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
vq->vq_descx[idx].cookie = NULL;
/* Free chain to keep free count consistent. */
vq_ring_free_chain(vq, idx);
}
idx++;
}
*last = idx;
return (cookie);
}
void
virtqueue_dump(struct virtqueue *vq)
{
if (vq == NULL)
return;
printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
"desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
"used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
vq->vq_ring.used->idx,
vring_used_event(&vq->vq_ring),
vq->vq_ring.avail->flags,
vq->vq_ring.used->flags);
}
static void
vq_ring_init(struct virtqueue *vq)
{
struct vring *vr;
char *ring_mem;
int i, size;
ring_mem = vq->vq_ring_mem;
size = vq->vq_nentries;
vr = &vq->vq_ring;
vring_init(vr, size, ring_mem, vq->vq_alignment);
for (i = 0; i < size - 1; i++)
vr->desc[i].next = i + 1;
vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
}
static void
vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx;
/*
* Place the head of the descriptor chain into the next slot and make
* it usable to the host. The chain is made available now rather than
* deferring to virtqueue_notify() in the hopes that if the host is
* currently running on another CPU, we can keep it processing the new
* descriptor.
*/
avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
wmb();
vq->vq_ring.avail->idx++;
/* Keep pending count until virtqueue_notify(). */
vq->vq_queued_cnt++;
}
static uint16_t
vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
uint16_t head_idx, struct sglist *sg, int readable, int writable)
{
struct sglist_seg *seg;
struct vring_desc *dp;
int i, needed;
uint16_t idx;
needed = readable + writable;
for (i = 0, idx = head_idx, seg = sg->sg_segs;
i < needed;
i++, idx = dp->next, seg++) {
VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
"premature end of free desc chain");
dp = &desc[idx];
dp->addr = seg->ss_paddr;
dp->len = seg->ss_len;
dp->flags = 0;
if (i < needed - 1)
dp->flags |= VRING_DESC_F_NEXT;
if (i >= readable)
dp->flags |= VRING_DESC_F_WRITE;
}
return (idx);
}
static int
vq_ring_use_indirect(struct virtqueue *vq, int needed)
{
if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
return (0);
if (vq->vq_max_indirect_size < needed)
return (0);
if (needed < 2)
return (0);
return (1);
}
static void
vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
struct sglist *sg, int readable, int writable)
{
struct vring_desc *dp;
struct vq_desc_extra *dxp;
int needed;
uint16_t head_idx;
needed = readable + writable;
VQASSERT(vq, needed <= vq->vq_max_indirect_size,
"enqueuing too many indirect descriptors");
head_idx = vq->vq_desc_head_idx;
VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
dp = &vq->vq_ring.desc[head_idx];
dxp = &vq->vq_descx[head_idx];
VQASSERT(vq, dxp->cookie == NULL,
"cookie already exists for index %d", head_idx);
dxp->cookie = cookie;
dxp->ndescs = 1;
dp->addr = dxp->indirect_paddr;
dp->len = needed * sizeof(struct vring_desc);
dp->flags = VRING_DESC_F_INDIRECT;
vq_ring_enqueue_segments(vq, dxp->indirect, 0,
sg, readable, writable);
vq->vq_desc_head_idx = dp->next;
vq->vq_free_cnt--;
if (vq->vq_free_cnt == 0)
VQ_RING_ASSERT_CHAIN_TERM(vq);
else
VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
vq_ring_update_avail(vq, head_idx);
}
static int
vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
{
/*
* Enable interrupts, making sure we get the latest index of
* what's already been consumed.
*/
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
else
vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
mb();
/*
* Enough items may have already been consumed to meet our threshold
* since we last checked. Let our caller know so it processes the new
* entries.
*/
if (virtqueue_nused(vq) > ndesc)
return (1);
return (0);
}
static int
vq_ring_must_notify_host(struct virtqueue *vq)
{
uint16_t new_idx, prev_idx, event_idx;
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
new_idx = vq->vq_ring.avail->idx;
prev_idx = new_idx - vq->vq_queued_cnt;
event_idx = vring_avail_event(&vq->vq_ring);
return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
}
return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
}
static void
vq_ring_notify_host(struct virtqueue *vq)
{
VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
}
static void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp;
struct vq_desc_extra *dxp;
VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
dp = &vq->vq_ring.desc[desc_idx];
dxp = &vq->vq_descx[desc_idx];
if (vq->vq_free_cnt == 0)
VQ_RING_ASSERT_CHAIN_TERM(vq);
vq->vq_free_cnt += dxp->ndescs;
dxp->ndescs--;
if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
while (dp->flags & VRING_DESC_F_NEXT) {
VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
dp = &vq->vq_ring.desc[dp->next];
dxp->ndescs--;
}
}
VQASSERT(vq, dxp->ndescs == 0,
"failed to free entire desc chain, remaining: %d", dxp->ndescs);
/*
* We must append the existing free chain, if any, to the end of
* newly freed chain. If the virtqueue was completely used, then
* head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
*/
dp->next = vq->vq_desc_head_idx;
vq->vq_desc_head_idx = desc_idx;
}