Index: share/man/man4/Makefile =================================================================== --- share/man/man4/Makefile +++ share/man/man4/Makefile @@ -539,6 +539,7 @@ vkbd.4 \ vlan.4 \ vxlan.4 \ + ${_vmd.4} \ ${_vmm.4} \ ${_vmx.4} \ vpo.4 \ @@ -822,6 +823,7 @@ _qlnxe.4= qlnxe.4 _sfxge.4= sfxge.4 _smartpqi.4= smartpqi.4 +_vmd.4= vmd.4 MLINKS+=qlxge.4 if_qlxge.4 MLINKS+=qlxgb.4 if_qlxgb.4 Index: share/man/man4/vmd.4 =================================================================== --- /dev/null +++ share/man/man4/vmd.4 @@ -0,0 +1,63 @@ +.\"- +.\" SPDX-License-Identifier: BSD-2-Clause-FreeBSD +.\" +.\" Copyright 2019 Cisco Systems, Inc. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" $FreeBSD$ +.\" +.Dd July 31,, 2019 +.Dt VMD 4 +.Os +.Sh NAME +.Nm vmd +.Nd Intel Volume Management Device driver +.Sh SYNOPSIS +To compile this driver into the kernel, place the following lines in your +kernel configuration file: +.Bd -ragged -offset -indent +.Cd "device vmd" +.Cd "device vmd_bus" +.Ed +.Pp +Alternatively, to load the driver as a module at boot time, place the following +line in +.Xr loader.conf 5 : +.Bd -literal -offset indent +vmd_load="YES" +.Ed +.Pp +.Sh DESCRIPTION +This driver attaches to Intel VMD devices as a new PCI domain and then +triggers a probe of PCI devices. Intel VMD is used with Intel's VROC +(Virtual RAID on chip) used with NVME drives on Skylake SP servers. +.Sh SEE ALSO +.Xr graid 8 +.Sh HISTORY +The +.Nm +driver first appeared in +.Fx 13.0 . +.Sh BUGS +.Nm +is currently only available on amd64. Index: sys/amd64/conf/GENERIC =================================================================== --- sys/amd64/conf/GENERIC +++ sys/amd64/conf/GENERIC @@ -193,6 +193,10 @@ device nvme # base NVMe driver device nvd # expose NVMe namespaces as disks, depends on nvme +# Intel Volume Management Device (VMD) support +device vmd # base VMD device +device vmd_bus # bus for VMD children + # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard Index: sys/amd64/conf/NOTES =================================================================== --- sys/amd64/conf/NOTES +++ sys/amd64/conf/NOTES @@ -461,6 +461,11 @@ device nvd # expose NVMe namespaces as disks, depends on nvme # +# Intel Volume Management Device (VMD) support +device vmd # base VMD device +device vmd_bus # bus for VMD children + +# # PMC-Sierra SAS/SATA controller device pmspcv Index: sys/conf/files.amd64 =================================================================== --- sys/conf/files.amd64 +++ sys/conf/files.amd64 @@ -368,6 +368,8 @@ dev/nvme/nvme_sysctl.c optional nvme dev/nvme/nvme_test.c optional nvme dev/nvme/nvme_util.c optional nvme +dev/vmd/vmd.c optional vmd +dev/vmd/vmd_bus.c optional vmd_bus dev/nvram/nvram.c optional nvram isa dev/random/ivy.c optional rdrand_rng !random_loadable dev/random/nehemiah.c optional padlock_rng !random_loadable Index: sys/dev/vmd/vmd.h =================================================================== --- /dev/null +++ sys/dev/vmd/vmd.h @@ -0,0 +1,86 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright 2019 Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + * + */ + +#ifndef __VMD_PRIVATE_H__ +#define __VMD_PRIVATE_H__ + +struct vmd_irq_handler { + TAILQ_ENTRY(vmd_irq_handler) vmd_link; + device_t vmd_child; + driver_intr_t *vmd_intr; + void *vmd_arg; + int vmd_rid; +}; + +struct vmd_irq { + struct resource *vmd_res; + int vmd_rid; + void *vmd_handle; + struct vmd_softc *vmd_sc; + int vmd_instance; + TAILQ_HEAD(,vmd_irq_handler) vmd_list; +}; + +/* + * VMD specific data. + */ +struct vmd_softc +{ + device_t vmd_dev; + device_t vmd_child; + uint32_t vmd_flags; /* flags */ +#define PCIB_SUBTRACTIVE 0x1 +#define PCIB_DISABLE_MSI 0x2 +#define PCIB_DISABLE_MSIX 0x4 +#define PCIB_ENABLE_ARI 0x8 +#define PCIB_HOTPLUG 0x10 +#define PCIB_HOTPLUG_CMD_PENDING 0x20 +#define PCIB_DETACH_PENDING 0x40 +#define PCIB_DETACHING 0x80 + u_int vmd_domain; /* domain number */ + struct pcib_secbus vmd_bus; /* secondary bus numbers */ + +#define VMD_MAX_BAR 3 + struct resource *vmd_regs_resource[VMD_MAX_BAR]; + int vmd_regs_rid[VMD_MAX_BAR]; + bus_space_handle_t vmd_bhandle; + bus_space_tag_t vmd_btag; + device_t vmd_bus_child; + int vmd_io_rid; + struct resource *vmd_io_resource; + void *vmd_intr; + struct vmd_irq *vmd_irq; + int vmd_msix_count; + struct taskqueue *vmd_irq_tq; + struct task vmd_irq_task; +}; + +#endif Index: sys/dev/vmd/vmd.c =================================================================== --- /dev/null +++ sys/dev/vmd/vmd.c @@ -0,0 +1,592 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright 2019 Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define TASK_QUEUE_INTR 1 +#include + +#include "pcib_if.h" +#include "pci_if.h" + + +static int +vmd_probe(device_t dev) +{ + uint16_t devid = pci_get_device(dev); + if (pci_get_vendor(dev) == 0x8086 && (devid == 0x201d || + devid == 0x28c0)) { + return (-10000); + } + return (ENXIO); +} + +static void +vmd_free(struct vmd_softc *sc) { + int i; + struct vmd_irq_handler *elm, *tmp; + + if (sc->vmd_irq_tq != NULL) { +#ifdef TASK_QUEUE_INTR + taskqueue_drain(sc->vmd_irq_tq, &sc->vmd_irq_task); +#endif + sc->vmd_irq_tq = NULL; + } + if (sc->vmd_irq != NULL) { + for (i = 0; i < sc->vmd_msix_count; i++) { + if (sc->vmd_irq[i].vmd_res != NULL) { + bus_teardown_intr(sc->vmd_dev, + sc->vmd_irq[i].vmd_res, + sc->vmd_irq[i].vmd_handle); + bus_release_resource(sc->vmd_dev, SYS_RES_IRQ, + sc->vmd_irq[i].vmd_rid, + sc->vmd_irq[i].vmd_res); + } + } + TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list ,vmd_link, + tmp) { + TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link); + free(elm, M_DEVBUF); + } + } + free(sc->vmd_irq, M_DEVBUF); + sc->vmd_irq = NULL; + pci_release_msi(sc->vmd_dev); + for (i = 0; i < VMD_MAX_BAR; i++) { + if (sc->vmd_regs_resource[i] != NULL) + bus_release_resource(sc->vmd_dev, SYS_RES_MEMORY, + sc->vmd_regs_rid[i], + sc->vmd_regs_resource[i]); + } + if (sc->vmd_io_resource) + bus_release_resource(device_get_parent(sc->vmd_dev), + SYS_RES_IOPORT, sc->vmd_io_rid, sc->vmd_io_resource); +} + +/* Hidden PCI Roots are hidden in BAR(0). */ + +static uint32_t +vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) +{ + + struct vmd_softc *sc; + bus_addr_t offset; + + offset = (b << 20) + (s << 15) + (f << 12) + reg; + sc = device_get_softc(dev); + switch(width) { + case 4: + return (bus_space_read_4(sc->vmd_btag, sc->vmd_bhandle, + offset)); + case 2: + return (bus_space_read_2(sc->vmd_btag, sc->vmd_bhandle, + offset)); + case 1: + return (bus_space_read_1(sc->vmd_btag, sc->vmd_bhandle, + offset)); + default: + return (0xffffffff); + } +} + +static void +vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, + uint32_t val, int width) +{ + + struct vmd_softc *sc; + bus_addr_t offset; + + offset = (b << 20) + (s << 15) + (f << 12) + reg; + sc = device_get_softc(dev); + + switch(width) { + case 4: + return (bus_space_write_4(sc->vmd_btag, sc->vmd_bhandle, + offset, val)); + case 2: + return (bus_space_write_2(sc->vmd_btag, sc->vmd_bhandle, + offset, val)); + case 1: + return (bus_space_write_1(sc->vmd_btag, sc->vmd_bhandle, + offset, val)); + } +} + +static uint32_t +vmd_pci_read_config(device_t dev, device_t child, int reg, int width) +{ + struct pci_devinfo *dinfo = device_get_ivars(child); + pcicfgregs *cfg = &dinfo->cfg; + + return vmd_read_config(dev, cfg->bus, cfg->slot, cfg->func, reg, width); +} + +static void +vmd_pci_write_config(device_t dev, device_t child, int reg, uint32_t val, + int width) +{ + struct pci_devinfo *dinfo = device_get_ivars(child); + pcicfgregs *cfg = &dinfo->cfg; + + vmd_write_config(dev, cfg->bus, cfg->slot, cfg->func, reg, val, width); +} + +static struct pci_devinfo * +vmd_alloc_devinfo(device_t dev) +{ + struct pci_devinfo *dinfo; + + dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); + return (dinfo); +} + +static void +vmd_intr(void *arg) +{ + struct vmd_irq *irq; + struct vmd_softc *sc; +#ifndef TASK_QUEUE_INTR + struct vmd_irq_handler *elm, *tmp_elm; +#endif + + irq = (struct vmd_irq *)arg; + sc = irq->vmd_sc; +#ifdef TASK_QUEUE_INTR + taskqueue_enqueue(taskqueue_swi, &sc->vmd_irq_task); +#else + TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) { + (elm->vmd_intr)(elm->vmd_arg); + } +#endif +} + +#ifdef TASK_QUEUE_INTR +static void +vmd_handle_irq(void *context, int pending) +{ + struct vmd_irq_handler *elm, *tmp_elm; + struct vmd_softc *sc; + + sc = context; + + TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) { + (elm->vmd_intr)(elm->vmd_arg); + } +} +#endif + +static int +vmd_attach(device_t dev) +{ + struct vmd_softc *sc; + struct pcib_secbus *bus; + uint32_t bar; + int i, j, error; + int rid, sec_reg; + static int b; + static int s; + static int f; + int min_count = 1; + char buf[64]; + + sc = device_get_softc(dev); + bzero(sc, sizeof(*sc)); + sc->vmd_dev = dev; + b = s = f = 0; + + pci_enable_busmaster(dev); + +#ifdef TASK_QUEUE_INTR + TASK_INIT(&sc->vmd_irq_task, 0, vmd_handle_irq, sc); +#endif + for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++ ) { + sc->vmd_regs_rid[i] = PCIR_BAR(j); + bar = pci_read_config(dev, PCIR_BAR(0), 4); + if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) == + PCIM_BAR_MEM_64) + j++; + if ((sc->vmd_regs_resource[i] = bus_alloc_resource_any( + sc->vmd_dev, SYS_RES_MEMORY, &sc->vmd_regs_rid[i], + RF_ACTIVE)) == NULL) { + device_printf(dev, "Cannot allocate resources\n"); + goto fail; + } + } + + sc->vmd_io_rid = PCIR_IOBASEL_1; + sc->vmd_io_resource = bus_alloc_resource_any( + device_get_parent(sc->vmd_dev), SYS_RES_IOPORT, &sc->vmd_io_rid, + RF_ACTIVE); + if (sc->vmd_io_resource == NULL) { + device_printf(dev, "Cannot allocate IO\n"); + goto fail; + } + + + sc->vmd_btag = rman_get_bustag(sc->vmd_regs_resource[0]); + sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_resource[0]); + + pci_write_config(dev, PCIR_PRIBUS_2, + pcib_get_bus(device_get_parent(dev)), 1); + + sec_reg = PCIR_SECBUS_1; + bus = &sc->vmd_bus; + bus->sub_reg = PCIR_SUBBUS_1; + bus->sec = vmd_read_config(dev, b, s, f, sec_reg, 1); + bus->sub = vmd_read_config(dev, b, s, f, bus->sub_reg, 1); + bus->dev = dev; + bus->rman.rm_start = 0; + bus->rman.rm_end = PCI_BUSMAX; + bus->rman.rm_type = RMAN_ARRAY; + snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); + bus->rman.rm_descr = strdup(buf, M_DEVBUF); + error = rman_init(&bus->rman); + + if (error) { + device_printf(dev, "Failed to initialize %s bus number rman\n", + device_get_nameunit(dev)); + goto fail; + } + + /* + * Allocate a bus range. This will return an existing bus range + * if one exists, or a new bus range if one does not. + */ + rid = 0; + bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, + min_count, 0); + if (bus->res == NULL) { + /* + * Fall back to just allocating a range of a single bus + * number. + */ + bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, + 1, 0); + } else if (rman_get_size(bus->res) < min_count) { + /* + * Attempt to grow the existing range to satisfy the + * minimum desired count. + */ + (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res, + rman_get_start(bus->res), rman_get_start(bus->res) + + min_count - 1); + } + + + /* + * Add the initial resource to the rman. + */ + if (bus->res != NULL) { + error = rman_manage_region(&bus->rman, rman_get_start(bus->res), + rman_get_end(bus->res)); + if (error) { + device_printf(dev, "Failed to add resource to rman\n"); + goto fail; + } + bus->sec = rman_get_start(bus->res); + bus->sub = rman_get_end(bus->res); + } + + sc->vmd_msix_count = pci_msix_count(dev); + if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) { + sc->vmd_irq = malloc(sizeof(struct vmd_irq) * + sc->vmd_msix_count, + M_DEVBUF, M_WAITOK | M_ZERO); + + for (i = 0; i < sc->vmd_msix_count; i++) { + sc->vmd_irq[i].vmd_rid = i + 1; + sc->vmd_irq[i].vmd_sc = sc; + sc->vmd_irq[i].vmd_instance = i; + sc->vmd_irq[i].vmd_res = bus_alloc_resource_any(dev, + SYS_RES_IRQ, &sc->vmd_irq[i].vmd_rid, + RF_ACTIVE); + if (sc->vmd_irq[i].vmd_res == NULL) { + device_printf(dev,"Failed to alloc irq\n"); + goto fail; + } + + TAILQ_INIT(&sc->vmd_irq[i].vmd_list); + if (bus_setup_intr(dev, sc->vmd_irq[i].vmd_res, + INTR_TYPE_MISC | INTR_MPSAFE, NULL, vmd_intr, + &sc->vmd_irq[i], &sc->vmd_irq[i].vmd_handle)) { + device_printf(sc->vmd_dev, + "Cannot set up interrupt\n"); + sc->vmd_irq[i].vmd_res = NULL; + goto fail; + } + } + } + + sc->vmd_child = device_add_child(dev, NULL, -1); + + if (sc->vmd_child == NULL) { + device_printf(dev, "Failed to attach child\n"); + goto fail; + } + + error = device_probe_and_attach(sc->vmd_child); + if (error) { + device_printf(dev, "Failed to add probe child\n"); + goto fail; + } + + + return (0); + +fail: + vmd_free(sc); + return (ENXIO); +} + +static int +vmd_detach(device_t dev) +{ + struct vmd_softc *sc; + int err; + + sc = device_get_softc(dev); + if (sc->vmd_child != NULL) { + err = bus_generic_detach(sc->vmd_child); + if (err) + return (err); + err = device_delete_child(dev, sc->vmd_child); + if (err) + return (err); + } + if (sc->vmd_bus.rman.rm_end != 0) + rman_fini(&sc->vmd_bus.rman); + + vmd_free(sc); + return (0); +} + +/* Pass request to alloc an MSI-X message up to the parent bridge. */ +static int +vmd_alloc_msix(device_t pcib, device_t dev, int *irq) +{ + struct vmd_softc *sc = device_get_softc(pcib); + device_t bus; + int ret; + + if (sc->vmd_flags & PCIB_DISABLE_MSIX) + return (ENXIO); + bus = device_get_parent(pcib); + ret = PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq); + return (ret); +} + +static struct resource * +vmd_alloc_resource(device_t dev, device_t child, int type, int *rid, + rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) +{ + /* Start at max PCI vmd_domain and work down */ + if (type == PCI_RES_BUS) { + return (pci_domain_alloc_bus(PCI_DOMAINMAX - + device_get_unit(dev), child, rid, start, end, + count, flags)); + } + + return (pcib_alloc_resource(dev, child, type, rid, start, end, + count, flags)); +} + +static int +vmd_adjust_resource(device_t dev, device_t child, int type, + struct resource *r, rman_res_t start, rman_res_t end) +{ + struct resource *res = r; + + if (type == PCI_RES_BUS) + return (pci_domain_adjust_bus(PCI_DOMAINMAX - + device_get_unit(dev), child, res, start, end)); + return (pcib_adjust_resource(dev, child, type, res, start, end)); +} + +static int +vmd_release_resource(device_t dev, device_t child, int type, int rid, + struct resource *r) +{ + if (type == PCI_RES_BUS) + return (pci_domain_release_bus(PCI_DOMAINMAX - + device_get_unit(dev), child, rid, r)); + return (pcib_release_resource(dev, child, type, rid, r)); +} + +static int +vmd_shutdown(device_t dev) +{ + return (0); +} + +static int +vmd_pcib_route_interrupt(device_t pcib, device_t dev, int pin) +{ + return (pcib_route_interrupt(pcib, dev, pin)); +} + + +static int +vmd_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, + int *irqs) +{ + return (pcib_alloc_msi(pcib, dev, count, maxcount, irqs)); +} + +static int +vmd_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) +{ + + return (pcib_release_msi(pcib, dev, count, irqs)); +} + +static int +vmd_pcib_release_msix(device_t pcib, device_t dev, int irq) { + return pcib_release_msix(pcib, dev, irq); +} + +static int +vmd_setup_intr(device_t dev, device_t child, struct resource *irq, + int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, + void **cookiep) +{ + struct vmd_irq_handler *elm; + struct vmd_softc *sc; + int i; + + sc = device_get_softc(dev); + + /* + * There appears to be no steering of VMD interrupts from device + * to VMD interrupt + */ + + i = 0; + elm = malloc(sizeof(*elm), M_DEVBUF, M_NOWAIT|M_ZERO); + elm->vmd_child = child; + elm->vmd_intr = intr; + elm->vmd_rid = rman_get_rid(irq); + elm->vmd_arg = arg; + TAILQ_INSERT_TAIL(&sc->vmd_irq[i].vmd_list, elm, vmd_link); + + return (bus_generic_setup_intr(dev, child, irq, flags, filter, intr, + arg, cookiep)); +} + +static int +vmd_teardown_intr(device_t dev, device_t child, struct resource *irq, + void *cookie) +{ + struct vmd_irq_handler *elm, *tmp;; + struct vmd_softc *sc; + + sc = device_get_softc(dev); + TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp) { + if (elm->vmd_child == child && + elm->vmd_rid == rman_get_rid(irq)) { + TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link); + free(elm, M_DEVBUF); + } + } + + return (bus_generic_teardown_intr(dev, child, irq, cookie)); +} + +static device_method_t vmd_pci_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, vmd_probe), + DEVMETHOD(device_attach, vmd_attach), + DEVMETHOD(device_detach, vmd_detach), + DEVMETHOD(device_shutdown, vmd_shutdown), + + /* pci interface */ + DEVMETHOD(pci_read_config, vmd_pci_read_config), + DEVMETHOD(pci_write_config, vmd_pci_write_config), + DEVMETHOD(pci_alloc_devinfo, vmd_alloc_devinfo), + + /* pcib interface */ + DEVMETHOD(pcib_maxslots, pcib_maxslots), + DEVMETHOD(pcib_read_config, vmd_read_config), + DEVMETHOD(pcib_write_config, vmd_write_config), + + /* Bus interface */ + DEVMETHOD(bus_read_ivar, pcib_read_ivar), + DEVMETHOD(bus_write_ivar, pcib_write_ivar), + DEVMETHOD(bus_alloc_resource, vmd_alloc_resource), + DEVMETHOD(bus_adjust_resource, vmd_adjust_resource), + DEVMETHOD(bus_release_resource, vmd_release_resource), + DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), + DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), + DEVMETHOD(bus_setup_intr, vmd_setup_intr), + DEVMETHOD(bus_teardown_intr, vmd_teardown_intr), + + /* pci interface */ + DEVMETHOD(pci_read_config, vmd_pci_read_config), + DEVMETHOD(pci_write_config, vmd_pci_write_config), + DEVMETHOD(pci_alloc_devinfo, vmd_alloc_devinfo), + + /* pcib interface */ + DEVMETHOD(pcib_maxslots, pcib_maxslots), + DEVMETHOD(pcib_read_config, vmd_read_config), + DEVMETHOD(pcib_write_config, vmd_write_config), + DEVMETHOD(pcib_route_interrupt, vmd_pcib_route_interrupt), + DEVMETHOD(pcib_alloc_msi, vmd_pcib_alloc_msi), + DEVMETHOD(pcib_release_msi, vmd_pcib_release_msi), + DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix), + DEVMETHOD(pcib_release_msix, vmd_pcib_release_msix), + DEVMETHOD(pcib_map_msi, pcib_map_msi), + + DEVMETHOD_END +}; + +static devclass_t vmd_devclass; + +DEFINE_CLASS_0(vmd, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc)); +DRIVER_MODULE(vmd, pci, vmd_pci_driver, vmd_devclass, NULL, NULL); +MODULE_DEPEND(vmd, vmd_bus, 1, 1, 1); Index: sys/dev/vmd/vmd_bus.c =================================================================== --- /dev/null +++ sys/dev/vmd/vmd_bus.c @@ -0,0 +1,235 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright 2019 Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "pcib_if.h" +#include "pci_if.h" + +static int +vmd_bus_probe(device_t dev) +{ + device_set_desc(dev, "VMD bus"); + + return (-1000); +} + +/* PCI interface. */ + +static void +vmd_bus_free(struct vmd_softc *sc) { +} + +static int +vmd_bus_attach(device_t dev) +{ + struct vmd_softc *sc; + struct pci_devinfo *dinfo; + struct pcib_secbus *bus; + rman_res_t start, end; + int b, s , f; + int error; + + sc = device_get_softc(device_get_parent(dev)); + + /* Start at max PCI vmd_domain and work down */ + b = s = f = 0; + dinfo = pci_read_device(device_get_parent(dev), dev, + PCI_DOMAINMAX - device_get_unit(device_get_parent(dev)), + b, s, f); + if (dinfo == NULL) { + device_printf(dev, "Cannot allocate dinfo!\n"); + return (ENOENT); + } + + sc->vmd_bus_child = device_add_child(dev, NULL, -1); + if (sc->vmd_bus_child == NULL) { + device_printf(dev, "Cannot add child!\n"); + return (ENOENT); + } + dinfo->cfg.dev = sc->vmd_bus_child; + resource_list_init(&dinfo->resources); + bus = &sc->vmd_bus; + + start = rman_get_start(sc->vmd_regs_resource[1]); + end = rman_get_end(sc->vmd_regs_resource[1]); + resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY, start, end, + end - start + 1); + + start = rman_get_start(sc->vmd_io_resource); + end = rman_get_end(sc->vmd_io_resource); + resource_list_add_next(&dinfo->resources, SYS_RES_IOPORT, start, end, + end - start + 1); + device_set_ivars(sc->vmd_bus_child, dinfo); + + pci_cfg_save(sc->vmd_bus_child, dinfo, 0); + pci_cfg_restore(sc->vmd_bus_child, dinfo); + pci_print_verbose(dinfo); + pci_add_resources(dev, sc->vmd_bus_child, 0, 0); + error = device_probe_and_attach(sc->vmd_bus_child); + if (error) { + device_printf(dev, "Failed to add probe child\n"); + goto fail; + } + + return (error); + +fail: + vmd_bus_free(sc); + return (ENXIO); +} + +static int +vmd_bus_detach(device_t dev) +{ + struct vmd_softc *sc; + struct pci_devinfo *dinfo; + int b, s , f, err; + + sc = device_get_softc(dev); + if (sc->vmd_bus_child != NULL) { + err = bus_generic_detach(sc->vmd_bus_child); + if (err) + return (err); + err = device_delete_children(sc->vmd_bus_child); + if (err) + return (err); + } + + b = s = f = 0; + dinfo = pci_read_device(device_get_parent(dev), dev, + PCI_DOMAINMAX - device_get_unit(device_get_parent(dev)), + b, s, f); + if (dinfo == NULL) { + resource_list_free(&dinfo->resources); + } + vmd_bus_free(sc); + return (0); +} + +static int +vmd_bus_adjust_resource(device_t dev, device_t child, int type, + struct resource *r, rman_res_t start, rman_res_t end) +{ + struct resource *res = r; + if (type == SYS_RES_MEMORY) { + /* VMD device controls this */ + return (0); + } + + return (bus_generic_adjust_resource(dev, child, type, res, start, end)); +} + +static int +vmd_bus_release_resource(device_t dev, device_t child, int type, int rid, + struct resource *r) +{ + if (type == SYS_RES_MEMORY) { + /* VMD device controls this */ + return(0); + } + + return (pci_release_resource(dev, child, type, rid, r)); +} + +static struct resource * +vmd_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, + rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) +{ + struct vmd_softc *sc = device_get_softc(device_get_parent(dev)); + + if (type == SYS_RES_MEMORY) { + /* remap to VMD resources */ + if (*rid == PCIR_MEMBASE_1) { + return (sc->vmd_regs_resource[1]); + } else if (*rid == PCIR_PMBASEL_1) { + return (sc->vmd_regs_resource[2]); + } else { + return (sc->vmd_regs_resource[2]); + } + } + return (pci_alloc_resource(dev, child, type, rid, start, end, + count, flags)); +} + +static int +vmd_bus_shutdown(device_t dev) +{ + return (0); +} + +static device_method_t vmd_bus_pci_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, vmd_bus_probe), + DEVMETHOD(device_attach, vmd_bus_attach), + DEVMETHOD(device_detach, vmd_bus_detach), + DEVMETHOD(device_shutdown, vmd_bus_shutdown), + + /* Bus interface */ + DEVMETHOD(bus_alloc_resource, vmd_bus_alloc_resource), + DEVMETHOD(bus_adjust_resource, vmd_bus_adjust_resource), + DEVMETHOD(bus_release_resource, vmd_bus_release_resource), + + /* pci interface */ + DEVMETHOD(pci_read_config, pci_read_config_method), + DEVMETHOD(pci_write_config, pci_write_config_method), + DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method), + + DEVMETHOD_END +}; + +static devclass_t vmd_bus_devclass; + +DEFINE_CLASS_1(vmd_bus, vmd_bus_pci_driver, vmd_bus_pci_methods, + sizeof(struct pci_softc), pci_driver); + +DRIVER_MODULE(vmd_bus, vmd, vmd_bus_pci_driver, vmd_bus_devclass, NULL, NULL); +MODULE_VERSION(vmd_bus, 1); Index: sys/modules/Makefile =================================================================== --- sys/modules/Makefile +++ sys/modules/Makefile @@ -371,6 +371,7 @@ ${_viawd} \ videomode \ vkbd \ + ${_vmd} \ ${_vmm} \ ${_vmware} \ ${_vpo} \ @@ -696,6 +697,7 @@ _pms= pms _qlxge= qlxge _qlxgb= qlxgb +_vmd= vmd .if ${MK_SOURCELESS_UCODE} != "no" _qlxgbe= qlxgbe _qlnx= qlnx Index: sys/modules/vmd/Makefile =================================================================== --- /dev/null +++ sys/modules/vmd/Makefile @@ -0,0 +1,21 @@ +# $FreeBSD$ + +.PATH: ${SRCTOP}/sys/dev/vmd + +KMOD= vmd + +SRCS= \ + vmd_bus.c \ + vmd.c \ + bus_if.h \ + device_if.h \ + pci_if.h \ + pcib_if.h \ + opt_global.h \ + +CFLAGS+= -include opt_global.h + +opt_global.h: + echo "#define NEW_PCIB 1" >> ${.TARGET} + +.include