diff --git a/sys/dev/virtio/gpu/virtio_gpu.c b/sys/dev/virtio/gpu/virtio_gpu.c index 6f786a450900..668eb170304a 100644 --- a/sys/dev/virtio/gpu/virtio_gpu.c +++ b/sys/dev/virtio/gpu/virtio_gpu.c @@ -1,709 +1,709 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013, Bryan Venteicher * All rights reserved. * Copyright (c) 2023, Arm Ltd * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO GPU device. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fb_if.h" #define VTGPU_FEATURES 0 /* The guest can allocate resource IDs, we only need one */ #define VTGPU_RESOURCE_ID 1 struct vtgpu_softc { /* Must be first so we can cast from info -> softc */ struct fb_info vtgpu_fb_info; struct virtio_gpu_config vtgpu_gpucfg; device_t vtgpu_dev; uint64_t vtgpu_features; struct virtqueue *vtgpu_ctrl_vq; uint64_t vtgpu_next_fence; bool vtgpu_have_fb_info; }; static int vtgpu_modevent(module_t, int, void *); static int vtgpu_probe(device_t); static int vtgpu_attach(device_t); static int vtgpu_detach(device_t); static int vtgpu_negotiate_features(struct vtgpu_softc *); static int vtgpu_setup_features(struct vtgpu_softc *); static void vtgpu_read_config(struct vtgpu_softc *, struct virtio_gpu_config *); static int vtgpu_alloc_virtqueue(struct vtgpu_softc *); static int vtgpu_get_display_info(struct vtgpu_softc *); static int vtgpu_create_2d(struct vtgpu_softc *); static int vtgpu_attach_backing(struct vtgpu_softc *); static int vtgpu_set_scanout(struct vtgpu_softc *, uint32_t, uint32_t, uint32_t, uint32_t); static int vtgpu_transfer_to_host_2d(struct vtgpu_softc *, uint32_t, uint32_t, uint32_t, uint32_t); static int vtgpu_resource_flush(struct vtgpu_softc *, uint32_t, uint32_t, uint32_t, uint32_t); static vd_blank_t vtgpu_fb_blank; static vd_bitblt_text_t vtgpu_fb_bitblt_text; static vd_bitblt_bmp_t vtgpu_fb_bitblt_bitmap; static vd_drawrect_t vtgpu_fb_drawrect; static vd_setpixel_t vtgpu_fb_setpixel; static vd_bitblt_argb_t vtgpu_fb_bitblt_argb; static struct vt_driver vtgpu_fb_driver = { .vd_name = "virtio_gpu", .vd_init = vt_fb_init, .vd_fini = vt_fb_fini, .vd_blank = vtgpu_fb_blank, .vd_bitblt_text = vtgpu_fb_bitblt_text, .vd_invalidate_text = vt_fb_invalidate_text, .vd_bitblt_bmp = vtgpu_fb_bitblt_bitmap, .vd_bitblt_argb = vtgpu_fb_bitblt_argb, .vd_drawrect = vtgpu_fb_drawrect, .vd_setpixel = vtgpu_fb_setpixel, .vd_postswitch = vt_fb_postswitch, .vd_priority = VD_PRIORITY_GENERIC+10, .vd_fb_ioctl = vt_fb_ioctl, .vd_fb_mmap = NULL, /* No mmap as we need to signal the host */ .vd_suspend = vt_fb_suspend, .vd_resume = vt_fb_resume, }; VT_DRIVER_DECLARE(vt_vtgpu, vtgpu_fb_driver); static void vtgpu_fb_blank(struct vt_device *vd, term_color_t color) { struct vtgpu_softc *sc; struct fb_info *info; info = vd->vd_softc; sc = (struct vtgpu_softc *)info; vt_fb_blank(vd, color); vtgpu_transfer_to_host_2d(sc, 0, 0, sc->vtgpu_fb_info.fb_width, sc->vtgpu_fb_info.fb_height); vtgpu_resource_flush(sc, 0, 0, sc->vtgpu_fb_info.fb_width, sc->vtgpu_fb_info.fb_height); } static void vtgpu_fb_bitblt_text(struct vt_device *vd, const struct vt_window *vw, const term_rect_t *area) { struct vtgpu_softc *sc; struct fb_info *info; int x, y, width, height; info = vd->vd_softc; sc = (struct vtgpu_softc *)info; vt_fb_bitblt_text(vd, vw, area); x = area->tr_begin.tp_col * vw->vw_font->vf_width + vw->vw_draw_area.tr_begin.tp_col; y = area->tr_begin.tp_row * vw->vw_font->vf_height + vw->vw_draw_area.tr_begin.tp_row; width = area->tr_end.tp_col * vw->vw_font->vf_width + vw->vw_draw_area.tr_begin.tp_col - x; height = area->tr_end.tp_row * vw->vw_font->vf_height + vw->vw_draw_area.tr_begin.tp_row - y; vtgpu_transfer_to_host_2d(sc, x, y, width, height); vtgpu_resource_flush(sc, x, y, width, height); } static void vtgpu_fb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw, const uint8_t *pattern, const uint8_t *mask, unsigned int width, unsigned int height, unsigned int x, unsigned int y, term_color_t fg, term_color_t bg) { struct vtgpu_softc *sc; struct fb_info *info; info = vd->vd_softc; sc = (struct vtgpu_softc *)info; vt_fb_bitblt_bitmap(vd, vw, pattern, mask, width, height, x, y, fg, bg); vtgpu_transfer_to_host_2d(sc, x, y, width, height); vtgpu_resource_flush(sc, x, y, width, height); } static int vtgpu_fb_bitblt_argb(struct vt_device *vd, const struct vt_window *vw, const uint8_t *argb, unsigned int width, unsigned int height, unsigned int x, unsigned int y) { return (EOPNOTSUPP); } static void vtgpu_fb_drawrect(struct vt_device *vd, int x1, int y1, int x2, int y2, int fill, term_color_t color) { struct vtgpu_softc *sc; struct fb_info *info; int width, height; info = vd->vd_softc; sc = (struct vtgpu_softc *)info; vt_fb_drawrect(vd, x1, y1, x2, y2, fill, color); width = x2 - x1 + 1; height = y2 - y1 + 1; vtgpu_transfer_to_host_2d(sc, x1, y1, width, height); vtgpu_resource_flush(sc, x1, y1, width, height); } static void vtgpu_fb_setpixel(struct vt_device *vd, int x, int y, term_color_t color) { struct vtgpu_softc *sc; struct fb_info *info; info = vd->vd_softc; sc = (struct vtgpu_softc *)info; vt_fb_setpixel(vd, x, y, color); vtgpu_transfer_to_host_2d(sc, x, y, 1, 1); vtgpu_resource_flush(sc, x, y, 1, 1); } static struct virtio_feature_desc vtgpu_feature_desc[] = { { VIRTIO_GPU_F_VIRGL, "VirGL" }, { VIRTIO_GPU_F_EDID, "EDID" }, { VIRTIO_GPU_F_RESOURCE_UUID, "ResUUID" }, { VIRTIO_GPU_F_RESOURCE_BLOB, "ResBlob" }, { VIRTIO_GPU_F_CONTEXT_INIT, "ContextInit" }, { 0, NULL } }; static device_method_t vtgpu_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtgpu_probe), DEVMETHOD(device_attach, vtgpu_attach), DEVMETHOD(device_detach, vtgpu_detach), DEVMETHOD_END }; static driver_t vtgpu_driver = { "vtgpu", vtgpu_methods, sizeof(struct vtgpu_softc) }; VIRTIO_DRIVER_MODULE(virtio_gpu, vtgpu_driver, vtgpu_modevent, NULL); MODULE_VERSION(virtio_gpu, 1); MODULE_DEPEND(virtio_gpu, virtio, 1, 1, 1); VIRTIO_SIMPLE_PNPINFO(virtio_gpu, VIRTIO_ID_GPU, "VirtIO GPU"); static int vtgpu_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: case MOD_QUIESCE: case MOD_UNLOAD: case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static int vtgpu_probe(device_t dev) { return (VIRTIO_SIMPLE_PROBE(dev, virtio_gpu)); } static int vtgpu_attach(device_t dev) { struct vtgpu_softc *sc; int error; sc = device_get_softc(dev); sc->vtgpu_have_fb_info = false; sc->vtgpu_dev = dev; sc->vtgpu_next_fence = 1; virtio_set_feature_desc(dev, vtgpu_feature_desc); error = vtgpu_setup_features(sc); if (error != 0) { device_printf(dev, "cannot setup features\n"); goto fail; } vtgpu_read_config(sc, &sc->vtgpu_gpucfg); error = vtgpu_alloc_virtqueue(sc); if (error != 0) { device_printf(dev, "cannot allocate virtqueue\n"); goto fail; } virtio_setup_intr(dev, INTR_TYPE_TTY); /* Read the device info to get the display size */ error = vtgpu_get_display_info(sc); if (error != 0) { goto fail; } /* * TODO: This doesn't need to be contigmalloc as we * can use scatter-gather lists. */ sc->vtgpu_fb_info.fb_vbase = (vm_offset_t)contigmalloc( sc->vtgpu_fb_info.fb_size, M_DEVBUF, M_WAITOK|M_ZERO, 0, ~0, 4, 0); sc->vtgpu_fb_info.fb_pbase = pmap_kextract(sc->vtgpu_fb_info.fb_vbase); /* Create the 2d resource */ error = vtgpu_create_2d(sc); if (error != 0) { goto fail; } /* Attach the backing memory */ error = vtgpu_attach_backing(sc); if (error != 0) { goto fail; } /* Set the scanout to link the framebuffer to the display scanout */ error = vtgpu_set_scanout(sc, 0, 0, sc->vtgpu_fb_info.fb_width, sc->vtgpu_fb_info.fb_height); if (error != 0) { goto fail; } vt_allocate(&vtgpu_fb_driver, &sc->vtgpu_fb_info); sc->vtgpu_have_fb_info = true; error = vtgpu_transfer_to_host_2d(sc, 0, 0, sc->vtgpu_fb_info.fb_width, sc->vtgpu_fb_info.fb_height); if (error != 0) goto fail; error = vtgpu_resource_flush(sc, 0, 0, sc->vtgpu_fb_info.fb_width, sc->vtgpu_fb_info.fb_height); fail: if (error != 0) vtgpu_detach(dev); return (error); } static int vtgpu_detach(device_t dev) { struct vtgpu_softc *sc; sc = device_get_softc(dev); if (sc->vtgpu_have_fb_info) vt_deallocate(&vtgpu_fb_driver, &sc->vtgpu_fb_info); if (sc->vtgpu_fb_info.fb_vbase != 0) { MPASS(sc->vtgpu_fb_info.fb_size != 0); free((void *)sc->vtgpu_fb_info.fb_vbase, M_DEVBUF); } /* TODO: Tell the host we are detaching */ return (0); } static int vtgpu_negotiate_features(struct vtgpu_softc *sc) { device_t dev; uint64_t features; dev = sc->vtgpu_dev; features = VTGPU_FEATURES; sc->vtgpu_features = virtio_negotiate_features(dev, features); return (virtio_finalize_features(dev)); } static int vtgpu_setup_features(struct vtgpu_softc *sc) { int error; error = vtgpu_negotiate_features(sc); if (error != 0) return (error); return (0); } static void vtgpu_read_config(struct vtgpu_softc *sc, struct virtio_gpu_config *gpucfg) { device_t dev; dev = sc->vtgpu_dev; bzero(gpucfg, sizeof(struct virtio_gpu_config)); #define VTGPU_GET_CONFIG(_dev, _field, _cfg) \ virtio_read_device_config(_dev, \ offsetof(struct virtio_gpu_config, _field), \ &(_cfg)->_field, sizeof((_cfg)->_field)) \ VTGPU_GET_CONFIG(dev, events_read, gpucfg); VTGPU_GET_CONFIG(dev, events_clear, gpucfg); VTGPU_GET_CONFIG(dev, num_scanouts, gpucfg); VTGPU_GET_CONFIG(dev, num_capsets, gpucfg); #undef VTGPU_GET_CONFIG } static int vtgpu_alloc_virtqueue(struct vtgpu_softc *sc) { device_t dev; struct vq_alloc_info vq_info[2]; int nvqs; dev = sc->vtgpu_dev; nvqs = 1; VQ_ALLOC_INFO_INIT(&vq_info[0], 0, NULL, sc, &sc->vtgpu_ctrl_vq, "%s control", device_get_nameunit(dev)); return (virtio_alloc_virtqueues(dev, nvqs, vq_info)); } static int vtgpu_req_resp(struct vtgpu_softc *sc, void *req, size_t reqlen, void *resp, size_t resplen) { struct sglist sg; struct sglist_seg segs[2]; int error; sglist_init(&sg, 2, segs); error = sglist_append(&sg, req, reqlen); if (error != 0) { device_printf(sc->vtgpu_dev, "Unable to append the request to the sglist: %d\n", error); return (error); } error = sglist_append(&sg, resp, resplen); if (error != 0) { device_printf(sc->vtgpu_dev, "Unable to append the response buffer to the sglist: %d\n", error); return (error); } error = virtqueue_enqueue(sc->vtgpu_ctrl_vq, resp, &sg, 1, 1); if (error != 0) { device_printf(sc->vtgpu_dev, "Enqueue failed: %d\n", error); return (error); } virtqueue_notify(sc->vtgpu_ctrl_vq); virtqueue_poll(sc->vtgpu_ctrl_vq, NULL); return (0); } static int vtgpu_get_display_info(struct vtgpu_softc *sc) { struct { struct virtio_gpu_ctrl_hdr req; char pad; struct virtio_gpu_resp_display_info resp; } s = { 0 }; int error; s.req.type = htole32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); s.req.flags = htole32(VIRTIO_GPU_FLAG_FENCE); s.req.fence_id = htole64(atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, sizeof(s.resp)); if (error != 0) return (error); for (int i = 0; i < sc->vtgpu_gpucfg.num_scanouts; i++) { if (s.resp.pmodes[i].enabled != 0) MPASS(i == 0); sc->vtgpu_fb_info.fb_name = device_get_nameunit(sc->vtgpu_dev); sc->vtgpu_fb_info.fb_width = le32toh(s.resp.pmodes[i].r.width); sc->vtgpu_fb_info.fb_height = le32toh(s.resp.pmodes[i].r.height); /* 32 bits per pixel */ sc->vtgpu_fb_info.fb_bpp = 32; sc->vtgpu_fb_info.fb_depth = 32; sc->vtgpu_fb_info.fb_size = sc->vtgpu_fb_info.fb_width * sc->vtgpu_fb_info.fb_height * 4; sc->vtgpu_fb_info.fb_stride = sc->vtgpu_fb_info.fb_width * 4; return (0); } return (ENXIO); } static int vtgpu_create_2d(struct vtgpu_softc *sc) { struct { struct virtio_gpu_resource_create_2d req; char pad; struct virtio_gpu_ctrl_hdr resp; } s = { 0 }; int error; s.req.hdr.type = htole32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE); s.req.hdr.fence_id = htole64( atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); s.req.resource_id = htole32(VTGPU_RESOURCE_ID); s.req.format = htole32(VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM); s.req.width = htole32(sc->vtgpu_fb_info.fb_width); s.req.height = htole32(sc->vtgpu_fb_info.fb_height); error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, sizeof(s.resp)); if (error != 0) return (error); if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) { - device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n", + device_printf(sc->vtgpu_dev, "Invalid response type %x\n", le32toh(s.resp.type)); return (EINVAL); } return (0); } static int vtgpu_attach_backing(struct vtgpu_softc *sc) { struct { struct { struct virtio_gpu_resource_attach_backing backing; struct virtio_gpu_mem_entry mem[1]; } req; char pad; struct virtio_gpu_ctrl_hdr resp; } s = { 0 }; int error; s.req.backing.hdr.type = htole32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); s.req.backing.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE); s.req.backing.hdr.fence_id = htole64( atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); s.req.backing.resource_id = htole32(VTGPU_RESOURCE_ID); s.req.backing.nr_entries = htole32(1); s.req.mem[0].addr = htole64(sc->vtgpu_fb_info.fb_pbase); s.req.mem[0].length = htole32(sc->vtgpu_fb_info.fb_size); error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, sizeof(s.resp)); if (error != 0) return (error); if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) { - device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n", + device_printf(sc->vtgpu_dev, "Invalid response type %x\n", le32toh(s.resp.type)); return (EINVAL); } return (0); } static int vtgpu_set_scanout(struct vtgpu_softc *sc, uint32_t x, uint32_t y, uint32_t width, uint32_t height) { struct { struct virtio_gpu_set_scanout req; char pad; struct virtio_gpu_ctrl_hdr resp; } s = { 0 }; int error; s.req.hdr.type = htole32(VIRTIO_GPU_CMD_SET_SCANOUT); s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE); s.req.hdr.fence_id = htole64( atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); s.req.r.x = htole32(x); s.req.r.y = htole32(y); s.req.r.width = htole32(width); s.req.r.height = htole32(height); s.req.scanout_id = 0; s.req.resource_id = htole32(VTGPU_RESOURCE_ID); error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, sizeof(s.resp)); if (error != 0) return (error); if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) { - device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n", + device_printf(sc->vtgpu_dev, "Invalid response type %x\n", le32toh(s.resp.type)); return (EINVAL); } return (0); } static int vtgpu_transfer_to_host_2d(struct vtgpu_softc *sc, uint32_t x, uint32_t y, uint32_t width, uint32_t height) { struct { struct virtio_gpu_transfer_to_host_2d req; char pad; struct virtio_gpu_ctrl_hdr resp; } s = { 0 }; int error; s.req.hdr.type = htole32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE); s.req.hdr.fence_id = htole64( atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); s.req.r.x = htole32(x); s.req.r.y = htole32(y); s.req.r.width = htole32(width); s.req.r.height = htole32(height); s.req.offset = htole64((y * sc->vtgpu_fb_info.fb_width + x) * (sc->vtgpu_fb_info.fb_bpp / 8)); s.req.resource_id = htole32(VTGPU_RESOURCE_ID); error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, sizeof(s.resp)); if (error != 0) return (error); if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) { - device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n", + device_printf(sc->vtgpu_dev, "Invalid response type %x\n", le32toh(s.resp.type)); return (EINVAL); } return (0); } static int vtgpu_resource_flush(struct vtgpu_softc *sc, uint32_t x, uint32_t y, uint32_t width, uint32_t height) { struct { struct virtio_gpu_resource_flush req; char pad; struct virtio_gpu_ctrl_hdr resp; } s = { 0 }; int error; s.req.hdr.type = htole32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE); s.req.hdr.fence_id = htole64( atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); s.req.r.x = htole32(x); s.req.r.y = htole32(y); s.req.r.width = htole32(width); s.req.r.height = htole32(height); s.req.resource_id = htole32(VTGPU_RESOURCE_ID); error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, sizeof(s.resp)); if (error != 0) return (error); if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) { - device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n", + device_printf(sc->vtgpu_dev, "Invalid response type %x\n", le32toh(s.resp.type)); return (EINVAL); } return (0); } diff --git a/sys/dev/virtio/scmi/virtio_scmi.c b/sys/dev/virtio/scmi/virtio_scmi.c index f5427756e971..436711dc0ae2 100644 --- a/sys/dev/virtio/scmi/virtio_scmi.c +++ b/sys/dev/virtio/scmi/virtio_scmi.c @@ -1,520 +1,520 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2023 Arm Ltd * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO SCMI device. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct vtscmi_pdu { enum vtscmi_chan chan; struct sglist sg; struct sglist_seg segs[2]; void *buf; SLIST_ENTRY(vtscmi_pdu) next; }; struct vtscmi_queue { device_t dev; int vq_id; unsigned int vq_sz; struct virtqueue *vq; struct mtx vq_mtx; struct vtscmi_pdu *pdus; SLIST_HEAD(pdus_head, vtscmi_pdu) p_head; struct mtx p_mtx; virtio_scmi_rx_callback_t *rx_callback; void *priv; }; struct vtscmi_softc { device_t vtscmi_dev; uint64_t vtscmi_features; uint8_t vtscmi_vqs_cnt; struct vtscmi_queue vtscmi_queues[VIRTIO_SCMI_CHAN_MAX]; bool has_p2a; bool has_shared; }; static device_t vtscmi_dev; static int vtscmi_modevent(module_t, int, void *); static int vtscmi_probe(device_t); static int vtscmi_attach(device_t); static int vtscmi_detach(device_t); static int vtscmi_shutdown(device_t); static int vtscmi_negotiate_features(struct vtscmi_softc *); static int vtscmi_setup_features(struct vtscmi_softc *); static void vtscmi_vq_intr(void *); static int vtscmi_alloc_virtqueues(struct vtscmi_softc *); static int vtscmi_alloc_queues(struct vtscmi_softc *); static void vtscmi_free_queues(struct vtscmi_softc *); static void *virtio_scmi_pdu_get(struct vtscmi_queue *, void *, unsigned int, unsigned int); static void virtio_scmi_pdu_put(device_t, struct vtscmi_pdu *); static struct virtio_feature_desc vtscmi_feature_desc[] = { { VIRTIO_SCMI_F_P2A_CHANNELS, "P2AChannel" }, { VIRTIO_SCMI_F_SHARED_MEMORY, "SharedMem" }, { 0, NULL } }; static device_method_t vtscmi_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtscmi_probe), DEVMETHOD(device_attach, vtscmi_attach), DEVMETHOD(device_detach, vtscmi_detach), DEVMETHOD(device_shutdown, vtscmi_shutdown), DEVMETHOD_END }; static driver_t vtscmi_driver = { "vtscmi", vtscmi_methods, sizeof(struct vtscmi_softc) }; VIRTIO_DRIVER_MODULE(virtio_scmi, vtscmi_driver, vtscmi_modevent, NULL); MODULE_VERSION(virtio_scmi, 1); MODULE_DEPEND(virtio_scmi, virtio, 1, 1, 1); VIRTIO_SIMPLE_PNPINFO(virtio_scmi, VIRTIO_ID_SCMI, "VirtIO SCMI Adapter"); static int vtscmi_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: case MOD_QUIESCE: case MOD_UNLOAD: case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static int vtscmi_probe(device_t dev) { return (VIRTIO_SIMPLE_PROBE(dev, virtio_scmi)); } static int vtscmi_attach(device_t dev) { struct vtscmi_softc *sc; int error; /* Only one SCMI device per-agent */ if (vtscmi_dev != NULL) return (EEXIST); sc = device_get_softc(dev); sc->vtscmi_dev = dev; virtio_set_feature_desc(dev, vtscmi_feature_desc); error = vtscmi_setup_features(sc); if (error) { device_printf(dev, "cannot setup features\n"); goto fail; } error = vtscmi_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = vtscmi_alloc_queues(sc); if (error) { device_printf(dev, "cannot allocate queues\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_MISC); if (error) { device_printf(dev, "cannot setup intr\n"); vtscmi_free_queues(sc); goto fail; } /* Save unique device */ vtscmi_dev = sc->vtscmi_dev; fail: return (error); } static int vtscmi_detach(device_t dev) { struct vtscmi_softc *sc; sc = device_get_softc(dev); /* These also disable related interrupts */ virtio_scmi_channel_callback_set(dev, VIRTIO_SCMI_CHAN_A2P, NULL, NULL); virtio_scmi_channel_callback_set(dev, VIRTIO_SCMI_CHAN_P2A, NULL, NULL); virtio_stop(dev); vtscmi_free_queues(sc); return (0); } static int vtscmi_shutdown(device_t dev) { return (0); } static int vtscmi_negotiate_features(struct vtscmi_softc *sc) { device_t dev; uint64_t features; dev = sc->vtscmi_dev; /* We still don't support shared mem (stats)...so don't advertise it */ features = VIRTIO_SCMI_F_P2A_CHANNELS; sc->vtscmi_features = virtio_negotiate_features(dev, features); return (virtio_finalize_features(dev)); } static int vtscmi_setup_features(struct vtscmi_softc *sc) { device_t dev; int error; dev = sc->vtscmi_dev; error = vtscmi_negotiate_features(sc); if (error) return (error); if (virtio_with_feature(dev, VIRTIO_SCMI_F_P2A_CHANNELS)) sc->has_p2a = true; if (virtio_with_feature(dev, VIRTIO_SCMI_F_SHARED_MEMORY)) sc->has_shared = true; device_printf(dev, "Platform %s P2A channel.\n", sc->has_p2a ? "supports" : "does NOT support"); return (0); } static int vtscmi_alloc_queues(struct vtscmi_softc *sc) { int idx; for (idx = VIRTIO_SCMI_CHAN_A2P; idx < VIRTIO_SCMI_CHAN_MAX; idx++) { int i, vq_sz; struct vtscmi_queue *q; struct vtscmi_pdu *pdu; if (idx == VIRTIO_SCMI_CHAN_P2A && !sc->has_p2a) continue; q = &sc->vtscmi_queues[idx]; q->dev = sc->vtscmi_dev; q->vq_id = idx; vq_sz = virtqueue_size(q->vq); q->vq_sz = idx != VIRTIO_SCMI_CHAN_A2P ? vq_sz : vq_sz / 2; q->pdus = mallocarray(q->vq_sz, sizeof(*pdu), M_DEVBUF, M_ZERO | M_WAITOK); SLIST_INIT(&q->p_head); for (i = 0, pdu = q->pdus; i < q->vq_sz; i++, pdu++) { pdu->chan = idx; //XXX Maybe one seg redndant for P2A sglist_init(&pdu->sg, idx == VIRTIO_SCMI_CHAN_A2P ? 2 : 1, pdu->segs); SLIST_INSERT_HEAD(&q->p_head, pdu, next); } mtx_init(&q->p_mtx, "vtscmi_pdus", "VTSCMI", MTX_SPIN); mtx_init(&q->vq_mtx, "vtscmi_vq", "VTSCMI", MTX_SPIN); } return (0); } static void vtscmi_free_queues(struct vtscmi_softc *sc) { int idx; for (idx = VIRTIO_SCMI_CHAN_A2P; idx < VIRTIO_SCMI_CHAN_MAX; idx++) { struct vtscmi_queue *q; if (idx == VIRTIO_SCMI_CHAN_P2A && !sc->has_p2a) continue; q = &sc->vtscmi_queues[idx]; if (q->vq_sz == 0) continue; free(q->pdus, M_DEVBUF); mtx_destroy(&q->p_mtx); mtx_destroy(&q->vq_mtx); } } static void vtscmi_vq_intr(void *arg) { struct vtscmi_queue *q = arg; /* * TODO * - consider pressure on RX by msg floods * + Does it need a taskqueue_ like virtio/net to postpone processing * under pressure ? (SCMI is low_freq compared to network though) */ for (;;) { struct vtscmi_pdu *pdu; uint32_t rx_len; mtx_lock_spin(&q->vq_mtx); pdu = virtqueue_dequeue(q->vq, &rx_len); mtx_unlock_spin(&q->vq_mtx); if (!pdu) return; if (q->rx_callback) q->rx_callback(pdu->buf, rx_len, q->priv); /* Note that this only frees the PDU, NOT the buffer itself */ virtio_scmi_pdu_put(q->dev, pdu); } } static int vtscmi_alloc_virtqueues(struct vtscmi_softc *sc) { device_t dev; struct vq_alloc_info vq_info[VIRTIO_SCMI_CHAN_MAX]; dev = sc->vtscmi_dev; sc->vtscmi_vqs_cnt = sc->has_p2a ? 2 : 1; VQ_ALLOC_INFO_INIT(&vq_info[VIRTIO_SCMI_CHAN_A2P], 0, vtscmi_vq_intr, &sc->vtscmi_queues[VIRTIO_SCMI_CHAN_A2P], &sc->vtscmi_queues[VIRTIO_SCMI_CHAN_A2P].vq, "%s cmdq", device_get_nameunit(dev)); if (sc->has_p2a) { VQ_ALLOC_INFO_INIT(&vq_info[VIRTIO_SCMI_CHAN_P2A], 0, vtscmi_vq_intr, &sc->vtscmi_queues[VIRTIO_SCMI_CHAN_P2A], &sc->vtscmi_queues[VIRTIO_SCMI_CHAN_P2A].vq, "%s evtq", device_get_nameunit(dev)); } return (virtio_alloc_virtqueues(dev, sc->vtscmi_vqs_cnt, vq_info)); } static void * virtio_scmi_pdu_get(struct vtscmi_queue *q, void *buf, unsigned int tx_len, unsigned int rx_len) { struct vtscmi_pdu *pdu = NULL; if (rx_len == 0) return (NULL); mtx_lock_spin(&q->p_mtx); if (!SLIST_EMPTY(&q->p_head)) { pdu = SLIST_FIRST(&q->p_head); SLIST_REMOVE_HEAD(&q->p_head, next); } mtx_unlock_spin(&q->p_mtx); if (pdu == NULL) { - device_printf(q->dev, "Cannnot allocate PDU.\n"); + device_printf(q->dev, "Cannot allocate PDU.\n"); return (NULL); } /*Save msg buffer for easy access */ pdu->buf = buf; if (tx_len != 0) sglist_append(&pdu->sg, pdu->buf, tx_len); sglist_append(&pdu->sg, pdu->buf, rx_len); return (pdu); } static void virtio_scmi_pdu_put(device_t dev, struct vtscmi_pdu *pdu) { struct vtscmi_softc *sc; struct vtscmi_queue *q; if (pdu == NULL) return; sc = device_get_softc(dev); q = &sc->vtscmi_queues[pdu->chan]; sglist_reset(&pdu->sg); mtx_lock_spin(&q->p_mtx); SLIST_INSERT_HEAD(&q->p_head, pdu, next); mtx_unlock_spin(&q->p_mtx); } device_t virtio_scmi_transport_get(void) { return (vtscmi_dev); } int virtio_scmi_channel_size_get(device_t dev, enum vtscmi_chan chan) { struct vtscmi_softc *sc; sc = device_get_softc(dev); if (chan >= sc->vtscmi_vqs_cnt) return (0); return (sc->vtscmi_queues[chan].vq_sz); } int virtio_scmi_channel_callback_set(device_t dev, enum vtscmi_chan chan, virtio_scmi_rx_callback_t *cb, void *priv) { struct vtscmi_softc *sc; sc = device_get_softc(dev); if (chan >= sc->vtscmi_vqs_cnt) return (1); if (cb == NULL) virtqueue_disable_intr(sc->vtscmi_queues[chan].vq); sc->vtscmi_queues[chan].rx_callback = cb; sc->vtscmi_queues[chan].priv = priv; /* Enable Interrupt on VQ once the callback is set */ if (cb != NULL) /* * TODO * Does this need a taskqueue_ task to process already pending * messages ? */ virtqueue_enable_intr(sc->vtscmi_queues[chan].vq); device_printf(dev, "%sabled interrupts on VQ[%d].\n", cb ? "En" : "Dis", chan); return (0); } int virtio_scmi_message_enqueue(device_t dev, enum vtscmi_chan chan, void *buf, unsigned int tx_len, unsigned int rx_len) { struct vtscmi_softc *sc; struct vtscmi_pdu *pdu; struct vtscmi_queue *q; int ret; sc = device_get_softc(dev); if (chan >= sc->vtscmi_vqs_cnt) return (1); q = &sc->vtscmi_queues[chan]; pdu = virtio_scmi_pdu_get(q, buf, tx_len, rx_len); if (pdu == NULL) return (ENXIO); mtx_lock_spin(&q->vq_mtx); ret = virtqueue_enqueue(q->vq, pdu, &pdu->sg, chan == VIRTIO_SCMI_CHAN_A2P ? 1 : 0, 1); if (ret == 0) virtqueue_notify(q->vq); mtx_unlock_spin(&q->vq_mtx); return (ret); } void * virtio_scmi_message_poll(device_t dev, uint32_t *rx_len) { struct vtscmi_softc *sc; struct vtscmi_queue *q; struct vtscmi_pdu *pdu; void *buf = NULL; sc = device_get_softc(dev); q = &sc->vtscmi_queues[VIRTIO_SCMI_CHAN_A2P]; mtx_lock_spin(&q->vq_mtx); /* Not using virtqueue_poll since has no configurable timeout */ pdu = virtqueue_dequeue(q->vq, rx_len); mtx_unlock_spin(&q->vq_mtx); if (pdu != NULL) { buf = pdu->buf; virtio_scmi_pdu_put(dev, pdu); } return (buf); }