diff --git a/sys/dev/vmd/vmd.c b/sys/dev/vmd/vmd.c index 616fd4c8373e..80eeda4639fa 100644 --- a/sys/dev/vmd/vmd.c +++ b/sys/dev/vmd/vmd.c @@ -1,627 +1,684 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2019 Cisco Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TASK_QUEUE_INTR 1 #include #include "pcib_if.h" #include "pci_if.h" struct vmd_type { u_int16_t vmd_vid; u_int16_t vmd_did; char *vmd_name; + int flags; +#define BUS_RESTRICT 1 }; #define INTEL_VENDOR_ID 0x8086 -#define INTEL_DEVICE_ID_VMD 0x201d -#define INTEL_DEVICE_ID_VMD2 0x28c0 +#define INTEL_DEVICE_ID_201d 0x201d +#define INTEL_DEVICE_ID_28c0 0x28c0 +#define INTEL_DEVICE_ID_467f 0x467f +#define INTEL_DEVICE_ID_4c3d 0x4c3d +#define INTEL_DEVICE_ID_9a0b 0x9a0b + +#define VMD_CAP 0x40 +#define VMD_BUS_RESTRICT 0x1 + +#define VMD_CONFIG 0x44 +#define VMD_BUS_START(x) ((x >> 8) & 0x3) + +#define VMD_LOCK 0x70 static struct vmd_type vmd_devs[] = { - { INTEL_VENDOR_ID, INTEL_DEVICE_ID_VMD, "Intel Volume Management Device" }, - { INTEL_VENDOR_ID, INTEL_DEVICE_ID_VMD2, "Intel Volume Management Device" }, - { 0, 0, NULL } + { INTEL_VENDOR_ID, INTEL_DEVICE_ID_201d, "Intel Volume Management Device", 0 }, + { INTEL_VENDOR_ID, INTEL_DEVICE_ID_28c0, "Intel Volume Management Device", BUS_RESTRICT }, + { INTEL_VENDOR_ID, INTEL_DEVICE_ID_467f, "Intel Volume Management Device", BUS_RESTRICT }, + { INTEL_VENDOR_ID, INTEL_DEVICE_ID_4c3d, "Intel Volume Management Device", BUS_RESTRICT }, + { INTEL_VENDOR_ID, INTEL_DEVICE_ID_9a0b, "Intel Volume Management Device", BUS_RESTRICT }, + { 0, 0, NULL, 0 } }; static int vmd_probe(device_t dev) { struct vmd_type *t; uint16_t vid, did; t = vmd_devs; vid = pci_get_vendor(dev); did = pci_get_device(dev); while (t->vmd_name != NULL) { if (vid == t->vmd_vid && did == t->vmd_did) { device_set_desc(dev, t->vmd_name); return (BUS_PROBE_DEFAULT); } t++; } -return (ENXIO); + return (ENXIO); } static void vmd_free(struct vmd_softc *sc) { int i; struct vmd_irq_handler *elm, *tmp; if (sc->vmd_bus.rman.rm_end != 0) rman_fini(&sc->vmd_bus.rman); #ifdef TASK_QUEUE_INTR if (sc->vmd_irq_tq != NULL) { taskqueue_drain(sc->vmd_irq_tq, &sc->vmd_irq_task); taskqueue_free(sc->vmd_irq_tq); sc->vmd_irq_tq = NULL; } #endif if (sc->vmd_irq != NULL) { for (i = 0; i < sc->vmd_msix_count; i++) { if (sc->vmd_irq[i].vmd_res != NULL) { bus_teardown_intr(sc->vmd_dev, sc->vmd_irq[i].vmd_res, sc->vmd_irq[i].vmd_handle); bus_release_resource(sc->vmd_dev, SYS_RES_IRQ, sc->vmd_irq[i].vmd_rid, sc->vmd_irq[i].vmd_res); } } TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list ,vmd_link, tmp) { TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link); free(elm, M_DEVBUF); } } free(sc->vmd_irq, M_DEVBUF); sc->vmd_irq = NULL; pci_release_msi(sc->vmd_dev); for (i = 0; i < VMD_MAX_BAR; i++) { if (sc->vmd_regs_resource[i] != NULL) bus_release_resource(sc->vmd_dev, SYS_RES_MEMORY, sc->vmd_regs_rid[i], sc->vmd_regs_resource[i]); } if (sc->vmd_io_resource) bus_release_resource(device_get_parent(sc->vmd_dev), SYS_RES_IOPORT, sc->vmd_io_rid, sc->vmd_io_resource); #ifndef TASK_QUEUE_INTR if (mtx_initialized(&sc->vmd_irq_lock)) { mtx_destroy(&sc->vmd_irq_lock); } #endif } /* Hidden PCI Roots are hidden in BAR(0). */ static uint32_t vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { struct vmd_softc *sc; bus_addr_t offset; - offset = (b << 20) + (s << 15) + (f << 12) + reg; sc = device_get_softc(dev); + if (b < sc->vmd_bus_start) + return (0xffffffff); + + offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg; + switch(width) { case 4: return (bus_space_read_4(sc->vmd_btag, sc->vmd_bhandle, offset)); case 2: return (bus_space_read_2(sc->vmd_btag, sc->vmd_bhandle, offset)); case 1: return (bus_space_read_1(sc->vmd_btag, sc->vmd_bhandle, offset)); default: KASSERT(1, ("Invalid width requested")); return (0xffffffff); } } static void vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { struct vmd_softc *sc; bus_addr_t offset; - offset = (b << 20) + (s << 15) + (f << 12) + reg; sc = device_get_softc(dev); + if (b < sc->vmd_bus_start) + return; + + offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg; switch(width) { case 4: return (bus_space_write_4(sc->vmd_btag, sc->vmd_bhandle, offset, val)); case 2: return (bus_space_write_2(sc->vmd_btag, sc->vmd_bhandle, offset, val)); case 1: return (bus_space_write_1(sc->vmd_btag, sc->vmd_bhandle, offset, val)); default: panic("Failed to specific width"); } } static uint32_t vmd_pci_read_config(device_t dev, device_t child, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; return vmd_read_config(dev, cfg->bus, cfg->slot, cfg->func, reg, width); } static void vmd_pci_write_config(device_t dev, device_t child, int reg, uint32_t val, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; vmd_write_config(dev, cfg->bus, cfg->slot, cfg->func, reg, val, width); } static struct pci_devinfo * vmd_alloc_devinfo(device_t dev) { struct pci_devinfo *dinfo; dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); return (dinfo); } static void vmd_intr(void *arg) { struct vmd_irq *irq; struct vmd_softc *sc; #ifndef TASK_QUEUE_INTR struct vmd_irq_handler *elm, *tmp_elm; #endif irq = (struct vmd_irq *)arg; sc = irq->vmd_sc; #ifdef TASK_QUEUE_INTR taskqueue_enqueue(sc->vmd_irq_tq, &sc->vmd_irq_task); #else mtx_lock(&sc->vmd_irq_lock); TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) { (elm->vmd_intr)(elm->vmd_arg); } mtx_unlock(&sc->vmd_irq_lock); #endif } #ifdef TASK_QUEUE_INTR static void vmd_handle_irq(void *context, int pending) { struct vmd_irq_handler *elm, *tmp_elm; struct vmd_softc *sc; sc = context; TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) { (elm->vmd_intr)(elm->vmd_arg); } } #endif static int vmd_attach(device_t dev) { struct vmd_softc *sc; struct pcib_secbus *bus; + struct vmd_type *t; + uint16_t vid, did; uint32_t bar; int i, j, error; int rid, sec_reg; static int b; static int s; static int f; int min_count = 1; char buf[64]; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->vmd_dev = dev; b = s = f = 0; pci_enable_busmaster(dev); #ifdef TASK_QUEUE_INTR sc->vmd_irq_tq = taskqueue_create_fast("vmd_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->vmd_irq_tq); taskqueue_start_threads(&sc->vmd_irq_tq, 1, PI_DISK, "%s taskq", device_get_nameunit(sc->vmd_dev)); TASK_INIT(&sc->vmd_irq_task, 0, vmd_handle_irq, sc); #else mtx_init(&sc->vmd_irq_lock, "VMD IRQ lock", NULL, MTX_DEF); #endif for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++ ) { sc->vmd_regs_rid[i] = PCIR_BAR(j); bar = pci_read_config(dev, PCIR_BAR(0), 4); if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) j++; if ((sc->vmd_regs_resource[i] = bus_alloc_resource_any( sc->vmd_dev, SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate resources\n"); goto fail; } } sc->vmd_io_rid = PCIR_IOBASEL_1; sc->vmd_io_resource = bus_alloc_resource_any( device_get_parent(sc->vmd_dev), SYS_RES_IOPORT, &sc->vmd_io_rid, RF_ACTIVE); if (sc->vmd_io_resource == NULL) { device_printf(dev, "Cannot allocate IO\n"); goto fail; } sc->vmd_btag = rman_get_bustag(sc->vmd_regs_resource[0]); sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_resource[0]); pci_write_config(dev, PCIR_PRIBUS_2, pcib_get_bus(device_get_parent(dev)), 1); + t = vmd_devs; + vid = pci_get_vendor(dev); + did = pci_get_device(dev); + + sc->vmd_bus_start = 0; + while (t->vmd_name != NULL) { + if (vid == t->vmd_vid && + did == t->vmd_did) { + if (t->flags == BUS_RESTRICT) { + if (pci_read_config(dev, VMD_CAP, 2) & + VMD_BUS_RESTRICT) + switch (VMD_BUS_START(pci_read_config( + dev, VMD_CONFIG, 2))) { + case 1: + sc->vmd_bus_start = 128; + break; + case 2: + sc->vmd_bus_start = 224; + break; + case 3: + device_printf(dev, + "Unknown bug offset\n"); + goto fail; + break; + } + } + } + t++; + } + + device_printf(dev, "VMD bus starts at %d\n", sc->vmd_bus_start); + sec_reg = PCIR_SECBUS_1; bus = &sc->vmd_bus; bus->sub_reg = PCIR_SUBBUS_1; bus->sec = vmd_read_config(dev, b, s, f, sec_reg, 1); bus->sub = vmd_read_config(dev, b, s, f, bus->sub_reg, 1); bus->dev = dev; - bus->rman.rm_start = 0; + bus->rman.rm_start = sc->vmd_bus_start; bus->rman.rm_end = PCI_BUSMAX; bus->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); bus->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&bus->rman); if (error) { device_printf(dev, "Failed to initialize %s bus number rman\n", device_get_nameunit(dev)); bus->rman.rm_end = 0; goto fail; } /* * Allocate a bus range. This will return an existing bus range * if one exists, or a new bus range if one does not. */ rid = 0; bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, min_count, 0); if (bus->res == NULL) { /* * Fall back to just allocating a range of a single bus * number. */ bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, 1, 0); } else if (rman_get_size(bus->res) < min_count) { /* * Attempt to grow the existing range to satisfy the * minimum desired count. */ (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), rman_get_start(bus->res) + min_count - 1); } /* * Add the initial resource to the rman. */ if (bus->res != NULL) { error = rman_manage_region(&bus->rman, rman_get_start(bus->res), rman_get_end(bus->res)); if (error) { device_printf(dev, "Failed to add resource to rman\n"); goto fail; } bus->sec = rman_get_start(bus->res); bus->sub = rman_get_end(bus->res); } sc->vmd_msix_count = pci_msix_count(dev); if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) { sc->vmd_irq = malloc(sizeof(struct vmd_irq) * sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < sc->vmd_msix_count; i++) { sc->vmd_irq[i].vmd_rid = i + 1; sc->vmd_irq[i].vmd_sc = sc; sc->vmd_irq[i].vmd_instance = i; sc->vmd_irq[i].vmd_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->vmd_irq[i].vmd_rid, RF_ACTIVE); if (sc->vmd_irq[i].vmd_res == NULL) { device_printf(dev,"Failed to alloc irq\n"); goto fail; } TAILQ_INIT(&sc->vmd_irq[i].vmd_list); if (bus_setup_intr(dev, sc->vmd_irq[i].vmd_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, vmd_intr, &sc->vmd_irq[i], &sc->vmd_irq[i].vmd_handle)) { device_printf(sc->vmd_dev, "Cannot set up interrupt\n"); sc->vmd_irq[i].vmd_res = NULL; goto fail; } } } sc->vmd_child = device_add_child(dev, NULL, -1); if (sc->vmd_child == NULL) { device_printf(dev, "Failed to attach child\n"); goto fail; } error = device_probe_and_attach(sc->vmd_child); if (error) { device_printf(dev, "Failed to add probe child: %d\n", error); (void)device_delete_child(dev, sc->vmd_child); goto fail; } return (0); fail: vmd_free(sc); return (ENXIO); } static int vmd_detach(device_t dev) { struct vmd_softc *sc; int err; sc = device_get_softc(dev); if (sc->vmd_child != NULL) { err = bus_generic_detach(sc->vmd_child); if (err) return (err); err = device_delete_child(dev, sc->vmd_child); if (err) return (err); } vmd_free(sc); return (0); } /* Pass request to alloc an MSI-X message up to the parent bridge. */ static int vmd_alloc_msix(device_t pcib, device_t dev, int *irq) { struct vmd_softc *sc = device_get_softc(pcib); device_t bus; int ret; if (sc->vmd_flags & PCIB_DISABLE_MSIX) return (ENXIO); bus = device_get_parent(pcib); ret = PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq); return (ret); } static struct resource * vmd_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { /* Start at max PCI vmd_domain and work down */ if (type == PCI_RES_BUS) { return (pci_domain_alloc_bus(PCI_DOMAINMAX - device_get_unit(dev), child, rid, start, end, count, flags)); } return (pcib_alloc_resource(dev, child, type, rid, start, end, count, flags)); } static int vmd_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct resource *res = r; if (type == PCI_RES_BUS) return (pci_domain_adjust_bus(PCI_DOMAINMAX - device_get_unit(dev), child, res, start, end)); return (pcib_adjust_resource(dev, child, type, res, start, end)); } static int vmd_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == PCI_RES_BUS) return (pci_domain_release_bus(PCI_DOMAINMAX - device_get_unit(dev), child, rid, r)); return (pcib_release_resource(dev, child, type, rid, r)); } static int vmd_shutdown(device_t dev) { return (0); } static int vmd_pcib_route_interrupt(device_t pcib, device_t dev, int pin) { return (pcib_route_interrupt(pcib, dev, pin)); } static int vmd_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { return (pcib_alloc_msi(pcib, dev, count, maxcount, irqs)); } static int vmd_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) { return (pcib_release_msi(pcib, dev, count, irqs)); } static int vmd_pcib_release_msix(device_t pcib, device_t dev, int irq) { return pcib_release_msix(pcib, dev, irq); } static int vmd_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { struct vmd_irq_handler *elm; struct vmd_softc *sc; int i; sc = device_get_softc(dev); /* * There appears to be no steering of VMD interrupts from device * to VMD interrupt */ i = 0; elm = malloc(sizeof(*elm), M_DEVBUF, M_NOWAIT|M_ZERO); elm->vmd_child = child; elm->vmd_intr = intr; elm->vmd_rid = rman_get_rid(irq); elm->vmd_arg = arg; TAILQ_INSERT_TAIL(&sc->vmd_irq[i].vmd_list, elm, vmd_link); return (bus_generic_setup_intr(dev, child, irq, flags, filter, intr, arg, cookiep)); } static int vmd_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct vmd_irq_handler *elm, *tmp;; struct vmd_softc *sc; sc = device_get_softc(dev); TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp) { if (elm->vmd_child == child && elm->vmd_rid == rman_get_rid(irq)) { TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link); free(elm, M_DEVBUF); } } return (bus_generic_teardown_intr(dev, child, irq, cookie)); } static device_method_t vmd_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vmd_probe), DEVMETHOD(device_attach, vmd_attach), DEVMETHOD(device_detach, vmd_detach), DEVMETHOD(device_shutdown, vmd_shutdown), /* Bus interface */ DEVMETHOD(bus_read_ivar, pcib_read_ivar), DEVMETHOD(bus_write_ivar, pcib_write_ivar), DEVMETHOD(bus_alloc_resource, vmd_alloc_resource), DEVMETHOD(bus_adjust_resource, vmd_adjust_resource), DEVMETHOD(bus_release_resource, vmd_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, vmd_setup_intr), DEVMETHOD(bus_teardown_intr, vmd_teardown_intr), /* pci interface */ DEVMETHOD(pci_read_config, vmd_pci_read_config), DEVMETHOD(pci_write_config, vmd_pci_write_config), DEVMETHOD(pci_alloc_devinfo, vmd_alloc_devinfo), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, vmd_read_config), DEVMETHOD(pcib_write_config, vmd_write_config), DEVMETHOD(pcib_route_interrupt, vmd_pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, vmd_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, vmd_pcib_release_msi), DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix), DEVMETHOD(pcib_release_msix, vmd_pcib_release_msix), DEVMETHOD(pcib_map_msi, pcib_map_msi), DEVMETHOD_END }; static devclass_t vmd_devclass; DEFINE_CLASS_0(vmd, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc)); DRIVER_MODULE(vmd, pci, vmd_pci_driver, vmd_devclass, NULL, NULL); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd, vmd_devs, nitems(vmd_devs) - 1); MODULE_DEPEND(vmd, vmd_bus, 1, 1, 1); diff --git a/sys/dev/vmd/vmd.h b/sys/dev/vmd/vmd.h index f86630d72c24..e51448c7733e 100644 --- a/sys/dev/vmd/vmd.h +++ b/sys/dev/vmd/vmd.h @@ -1,89 +1,90 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2019 Cisco Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef __VMD_PRIVATE_H__ #define __VMD_PRIVATE_H__ struct vmd_irq_handler { TAILQ_ENTRY(vmd_irq_handler) vmd_link; device_t vmd_child; driver_intr_t *vmd_intr; void *vmd_arg; int vmd_rid; }; struct vmd_irq { struct resource *vmd_res; int vmd_rid; void *vmd_handle; struct vmd_softc *vmd_sc; int vmd_instance; TAILQ_HEAD(,vmd_irq_handler) vmd_list; }; /* * VMD specific data. */ struct vmd_softc { device_t vmd_dev; device_t vmd_child; uint32_t vmd_flags; /* flags */ #define PCIB_SUBTRACTIVE 0x1 #define PCIB_DISABLE_MSI 0x2 #define PCIB_DISABLE_MSIX 0x4 #define PCIB_ENABLE_ARI 0x8 #define PCIB_HOTPLUG 0x10 #define PCIB_HOTPLUG_CMD_PENDING 0x20 #define PCIB_DETACH_PENDING 0x40 #define PCIB_DETACHING 0x80 u_int vmd_domain; /* domain number */ struct pcib_secbus vmd_bus; /* secondary bus numbers */ #define VMD_MAX_BAR 3 struct resource *vmd_regs_resource[VMD_MAX_BAR]; int vmd_regs_rid[VMD_MAX_BAR]; bus_space_handle_t vmd_bhandle; bus_space_tag_t vmd_btag; int vmd_io_rid; struct resource *vmd_io_resource; void *vmd_intr; struct vmd_irq *vmd_irq; int vmd_msix_count; + uint8_t vmd_bus_start; #ifdef TASK_QUEUE_INTR struct taskqueue *vmd_irq_tq; struct task vmd_irq_task; #else struct mtx vmd_irq_lock; #endif }; #endif diff --git a/sys/dev/vmd/vmd_bus.c b/sys/dev/vmd/vmd_bus.c index 74a6faa812b1..5eed6176630f 100644 --- a/sys/dev/vmd/vmd_bus.c +++ b/sys/dev/vmd/vmd_bus.c @@ -1,201 +1,220 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2019 Cisco Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" static int vmd_bus_probe(device_t dev) { device_set_desc(dev, "VMD bus"); return (-1000); } /* PCI interface. */ static int vmd_bus_attach(device_t dev) { struct vmd_softc *sc; struct pci_devinfo *dinfo; rman_res_t start, end; int b, s, f; + int found; sc = device_get_softc(device_get_parent(dev)); - /* Start at max PCI vmd_domain and work down */ - b = s = f = 0; - dinfo = pci_read_device(device_get_parent(dev), dev, - PCI_DOMAINMAX - device_get_unit(device_get_parent(dev)), - b, s, f); - if (dinfo == NULL) { - device_printf(dev, "Cannot allocate dinfo!\n"); - return (ENOENT); + /* + * Start at max PCI vmd_domain and work down. Only VMD + * starting bus is connect to VMD device directly. Scan al + * lslots and function connected to starting bus. + */ + + b = sc->vmd_bus_start; + + found = 0; + for (s = 0; s < PCI_SLOTMAX; s++) { + for (f = 0; f < PCI_FUNCMAX; f++) { + dinfo = pci_read_device(device_get_parent(dev), dev, + PCI_DOMAINMAX - device_get_unit( + device_get_parent(dev)), b, s, f); + if (dinfo != NULL) { + found = 1; + pci_add_child(dev, dinfo); + + start = rman_get_start(sc->vmd_regs_resource[1]); + end = rman_get_end(sc->vmd_regs_resource[1]); + resource_list_add_next(&dinfo->resources, + SYS_RES_MEMORY, start, end, end - start + 1); + + start = rman_get_start(sc->vmd_io_resource); + end = rman_get_end(sc->vmd_io_resource); + resource_list_add_next(&dinfo->resources, + SYS_RES_IOPORT, start, end, end - start + 1); + + } + } } - pci_add_child(dev, dinfo); - - start = rman_get_start(sc->vmd_regs_resource[1]); - end = rman_get_end(sc->vmd_regs_resource[1]); - resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY, start, end, - end - start + 1); - - start = rman_get_start(sc->vmd_io_resource); - end = rman_get_end(sc->vmd_io_resource); - resource_list_add_next(&dinfo->resources, SYS_RES_IOPORT, start, end, - end - start + 1); - - bus_generic_attach(dev); + if (found) { + bus_generic_attach(dev); + } return (0); } static int vmd_bus_detach(device_t dev) { + struct vmd_softc *sc; struct pci_devinfo *dinfo; int b, s, f; device_delete_children(dev); - b = s = f = 0; - dinfo = pci_read_device(device_get_parent(dev), dev, - PCI_DOMAINMAX - device_get_unit(device_get_parent(dev)), - b, s, f); - if (dinfo == NULL) { - resource_list_free(&dinfo->resources); + sc = device_get_softc(device_get_parent(dev)); + b = sc->vmd_bus_start; + + for (s = 0; s < PCI_SLOTMAX; s++) { + for (f = 0; f < PCI_FUNCMAX; f++) { + dinfo = pci_read_device(device_get_parent(dev), dev, + PCI_DOMAINMAX - device_get_unit( + device_get_parent(dev)), b, s, f); + if (dinfo != NULL) + resource_list_free(&dinfo->resources); + } } return (0); } static int vmd_bus_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct resource *res = r; if (type == SYS_RES_MEMORY) { /* VMD device controls this */ return (0); } return (bus_generic_adjust_resource(dev, child, type, res, start, end)); } static int vmd_bus_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == SYS_RES_MEMORY) { /* VMD device controls this */ return (0); } return (pci_release_resource(dev, child, type, rid, r)); } static struct resource * vmd_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct vmd_softc *sc; sc = device_get_softc(device_get_parent(dev)); if (type == SYS_RES_MEMORY) { /* remap to VMD resources */ if (*rid == PCIR_MEMBASE_1) { return (sc->vmd_regs_resource[1]); } else if (*rid == PCIR_PMBASEL_1) { return (sc->vmd_regs_resource[2]); } else { return (sc->vmd_regs_resource[2]); } } return (pci_alloc_resource(dev, child, type, rid, start, end, count, flags)); } static int vmd_bus_shutdown(device_t dev) { return (0); } static device_method_t vmd_bus_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vmd_bus_probe), DEVMETHOD(device_attach, vmd_bus_attach), DEVMETHOD(device_detach, vmd_bus_detach), DEVMETHOD(device_shutdown, vmd_bus_shutdown), /* Bus interface */ DEVMETHOD(bus_alloc_resource, vmd_bus_alloc_resource), DEVMETHOD(bus_adjust_resource, vmd_bus_adjust_resource), DEVMETHOD(bus_release_resource, vmd_bus_release_resource), /* pci interface */ DEVMETHOD(pci_read_config, pci_read_config_method), DEVMETHOD(pci_write_config, pci_write_config_method), DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method), DEVMETHOD_END }; static devclass_t vmd_bus_devclass; DEFINE_CLASS_1(vmd_bus, vmd_bus_pci_driver, vmd_bus_pci_methods, sizeof(struct pci_softc), pci_driver); DRIVER_MODULE(vmd_bus, vmd, vmd_bus_pci_driver, vmd_bus_devclass, NULL, NULL); MODULE_VERSION(vmd_bus, 1);