Index: lib/libvmmapi/vmmapi.h =================================================================== --- lib/libvmmapi/vmmapi.h +++ lib/libvmmapi/vmmapi.h @@ -111,6 +111,8 @@ int vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t segoff, size_t len, int prot); +int vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len); + int vm_create(const char *name); int vm_get_device_fd(struct vmctx *ctx); struct vmctx *vm_open(const char *name); @@ -176,6 +178,8 @@ int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func); int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); +int vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, + vm_paddr_t gpa, size_t len); int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func, uint64_t addr, uint64_t msg, int numvec); int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, Index: lib/libvmmapi/vmmapi.c =================================================================== --- lib/libvmmapi/vmmapi.c +++ lib/libvmmapi/vmmapi.c @@ -251,6 +251,19 @@ return (0); } +int +vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len) +{ + struct vm_munmap munmap; + int error; + + munmap.gpa = gpa; + munmap.len = len; + + error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap); + return (error); +} + int vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid, vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) @@ -980,6 +993,22 @@ return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio)); } +int +vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, + vm_paddr_t gpa, size_t len) +{ + struct vm_pptdev_mmio pptmmio; + + bzero(&pptmmio, sizeof(pptmmio)); + pptmmio.bus = bus; + pptmmio.slot = slot; + pptmmio.func = func; + pptmmio.gpa = gpa; + pptmmio.len = len; + + return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio)); +} + int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func, uint64_t addr, uint64_t msg, int numvec) @@ -1644,7 +1673,7 @@ /* keep in sync with machine/vmm_dev.h */ static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT, VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG, - VM_MMAP_GETNEXT, VM_SET_REGISTER, VM_GET_REGISTER, + VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER, VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR, VM_SET_REGISTER_SET, VM_GET_REGISTER_SET, VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV, @@ -1654,7 +1683,7 @@ VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER, VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV, VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI, - VM_PPTDEV_MSIX, VM_PPTDEV_DISABLE_MSIX, + VM_PPTDEV_MSIX, VM_PPTDEV_DISABLE_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_INJECT_NMI, VM_STATS, VM_STAT_DESC, VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE, VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA, Index: sys/amd64/include/vmm.h =================================================================== --- sys/amd64/include/vmm.h +++ sys/amd64/include/vmm.h @@ -231,6 +231,7 @@ */ int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off, size_t len, int prot, int flags); +int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len); int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem); void vm_free_memseg(struct vm *vm, int ident); int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); Index: sys/amd64/include/vmm_dev.h =================================================================== --- sys/amd64/include/vmm_dev.h +++ sys/amd64/include/vmm_dev.h @@ -49,6 +49,11 @@ #define VM_MEMMAP_F_WIRED 0x01 #define VM_MEMMAP_F_IOMMU 0x02 +struct vm_munmap { + vm_paddr_t gpa; + size_t len; +}; + #define VM_MEMSEG_NAME(m) ((m)->name[0] != '\0' ? (m)->name : NULL) struct vm_memseg { int segid; @@ -270,6 +275,7 @@ IOCNUM_MMAP_MEMSEG = 16, IOCNUM_MMAP_GETNEXT = 17, IOCNUM_GLA2GPA_NOFAULT = 18, + IOCNUM_MUNMAP_MEMSEG = 19, /* register/state accessors */ IOCNUM_SET_REGISTER = 20, @@ -302,6 +308,7 @@ IOCNUM_PPTDEV_MSI = 43, IOCNUM_PPTDEV_MSIX = 44, IOCNUM_PPTDEV_DISABLE_MSIX = 45, + IOCNUM_UNMAP_PPTDEV_MMIO = 46, /* statistics */ IOCNUM_VM_STATS = 50, @@ -358,6 +365,8 @@ _IOW('v', IOCNUM_MMAP_MEMSEG, struct vm_memmap) #define VM_MMAP_GETNEXT \ _IOWR('v', IOCNUM_MMAP_GETNEXT, struct vm_memmap) +#define VM_MUNMAP_MEMSEG \ + _IOW('v', IOCNUM_MUNMAP_MEMSEG, struct vm_munmap) #define VM_SET_REGISTER \ _IOW('v', IOCNUM_SET_REGISTER, struct vm_register) #define VM_GET_REGISTER \ @@ -416,6 +425,8 @@ _IOW('v', IOCNUM_PPTDEV_MSIX, struct vm_pptdev_msix) #define VM_PPTDEV_DISABLE_MSIX \ _IOW('v', IOCNUM_PPTDEV_DISABLE_MSIX, struct vm_pptdev) +#define VM_UNMAP_PPTDEV_MMIO \ + _IOW('v', IOCNUM_UNMAP_PPTDEV_MMIO, struct vm_pptdev_mmio) #define VM_INJECT_NMI \ _IOW('v', IOCNUM_INJECT_NMI, struct vm_nmi) #define VM_STATS \ Index: sys/amd64/vmm/io/ppt.h =================================================================== --- sys/amd64/vmm/io/ppt.h +++ sys/amd64/vmm/io/ppt.h @@ -34,6 +34,8 @@ int ppt_unassign_all(struct vm *vm); int ppt_map_mmio(struct vm *vm, int bus, int slot, int func, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); +int ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func, + vm_paddr_t gpa, size_t len); int ppt_setup_msi(struct vm *vm, int vcpu, int bus, int slot, int func, uint64_t addr, uint64_t msg, int numvec); int ppt_setup_msix(struct vm *vm, int vcpu, int bus, int slot, int func, Index: sys/amd64/vmm/io/ppt.c =================================================================== --- sys/amd64/vmm/io/ppt.c +++ sys/amd64/vmm/io/ppt.c @@ -224,7 +224,7 @@ } static void -ppt_unmap_mmio(struct vm *vm, struct pptdev *ppt) +ppt_unmap_all_mmio(struct vm *vm, struct pptdev *ppt) { int i; struct pptseg *seg; @@ -412,7 +412,7 @@ pci_save_state(ppt->dev); ppt_pci_reset(ppt->dev); pci_restore_state(ppt->dev); - ppt_unmap_mmio(vm, ppt); + ppt_unmap_all_mmio(vm, ppt); ppt_teardown_msi(ppt); ppt_teardown_msix(ppt); iommu_remove_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev)); @@ -466,6 +466,32 @@ return (ENOSPC); } +int +ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func, + vm_paddr_t gpa, size_t len) +{ + int i, error; + struct pptseg *seg; + struct pptdev *ppt; + + error = ppt_find(vm, bus, slot, func, &ppt); + if (error) + return (error); + + for (i = 0; i < MAX_MMIOSEGS; i++) { + seg = &ppt->mmio[i]; + if (seg->gpa == gpa && seg->len == len) { + error = vm_unmap_mmio(vm, seg->gpa, seg->len); + if (error == 0) { + seg->gpa = 0; + seg->len = 0; + } + return (error); + } + } + return (ENOENT); +} + static int pptintr(void *arg) { Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c +++ sys/amd64/vmm/vmm.c @@ -797,6 +797,24 @@ return (0); } +int +vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) +{ + struct mem_map *m; + int i; + + for (i = 0; i < VM_MAX_MEMMAPS; i++) { + m = &vm->mem_maps[i]; + if (m->gpa == gpa && m->len == len && + (m->flags & VM_MEMMAP_F_IOMMU) == 0) { + vm_free_memmap(vm, i); + return (0); + } + } + + return (EINVAL); +} + int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) Index: sys/amd64/vmm/vmm_dev.c =================================================================== --- sys/amd64/vmm/vmm_dev.c +++ sys/amd64/vmm/vmm_dev.c @@ -381,6 +381,7 @@ struct vm_rtc_time *rtctime; struct vm_rtc_data *rtcdata; struct vm_memmap *mm; + struct vm_munmap *mu; struct vm_cpu_topology *topology; struct vm_readwrite_kernemu_device *kernemu; uint64_t *regvals; @@ -435,6 +436,7 @@ break; case VM_MAP_PPTDEV_MMIO: + case VM_UNMAP_PPTDEV_MMIO: case VM_BIND_PPTDEV: case VM_UNBIND_PPTDEV: #ifdef COMPAT_FREEBSD12 @@ -442,6 +444,7 @@ #endif case VM_ALLOC_MEMSEG: case VM_MMAP_MEMSEG: + case VM_MUNMAP_MEMSEG: case VM_REINIT: /* * ioctls that operate on the entire virtual machine must @@ -525,6 +528,11 @@ pptmmio->func, pptmmio->gpa, pptmmio->len, pptmmio->hpa); break; + case VM_UNMAP_PPTDEV_MMIO: + pptmmio = (struct vm_pptdev_mmio *)data; + error = ppt_unmap_mmio(sc->vm, pptmmio->bus, pptmmio->slot, + pptmmio->func, pptmmio->gpa, pptmmio->len); + break; case VM_BIND_PPTDEV: pptdev = (struct vm_pptdev *)data; error = vm_assign_pptdev(sc->vm, pptdev->bus, pptdev->slot, @@ -649,6 +657,10 @@ sizeof(((struct vm_memseg_fbsd12 *)0)->name)); break; #endif + case VM_MUNMAP_MEMSEG: + mu = (struct vm_munmap *)data; + error = vm_munmap_memseg(sc->vm, mu->gpa, mu->len); + break; case VM_ALLOC_MEMSEG: error = alloc_memseg(sc, (struct vm_memseg *)data, sizeof(((struct vm_memseg *)0)->name)); Index: sys/dev/pci/pcireg.h =================================================================== --- sys/dev/pci/pcireg.h +++ sys/dev/pci/pcireg.h @@ -1098,3 +1098,9 @@ #define PCIM_OSC_CTL_PCIE_PME 0x04 /* PCIe Native Power Mgt Events */ #define PCIM_OSC_CTL_PCIE_AER 0x08 /* PCIe Advanced Error Reporting */ #define PCIM_OSC_CTL_PCIE_CAP_STRUCT 0x10 /* Various Capability Structures */ + +/* + * PCI Vendors + */ +#define PCI_VENDOR_INTEL 0x8086 +#define PCI_VENDOR_AMD 0x1002 Index: usr.sbin/bhyve/Makefile =================================================================== --- usr.sbin/bhyve/Makefile +++ usr.sbin/bhyve/Makefile @@ -43,6 +43,7 @@ pci_emul.c \ pci_hda.c \ pci_fbuf.c \ + pci_gvt-d.c \ pci_hostbridge.c \ pci_irq.c \ pci_lpc.c \ Index: usr.sbin/bhyve/pci_emul.h =================================================================== --- usr.sbin/bhyve/pci_emul.h +++ usr.sbin/bhyve/pci_emul.h @@ -40,6 +40,8 @@ #include +#include + #define PCI_BARMAX PCIR_MAX_BAR_0 /* BAR registers in a Type 0 header */ struct vmctx; @@ -73,6 +75,9 @@ struct pci_devinst *pi, int baridx, uint64_t offset, int size); + void (*pe_baraddr)(struct vmctx *ctx, struct pci_devinst *pi, + int baridx, int enabled, uint64_t address); + /* Save/restore device state */ int (*pe_snapshot)(struct vm_snapshot_meta *meta); int (*pe_pause)(struct vmctx *ctx, struct pci_devinst *pi); @@ -92,6 +97,7 @@ enum pcibar_type type; /* io or memory */ uint64_t size; uint64_t addr; + uint8_t lobits; }; #define PI_NAMESZ 40 @@ -221,6 +227,7 @@ void pci_callback(void); int pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type, uint64_t size); +uint64_t pci_emul_alloc_mmio(enum pcibar_type type, uint64_t size, uint64_t mask); int pci_emul_add_msicap(struct pci_devinst *pi, int msgnum); int pci_emul_add_pciecap(struct pci_devinst *pi, int pcie_device_type); void pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes, Index: usr.sbin/bhyve/pci_emul.c =================================================================== --- usr.sbin/bhyve/pci_emul.c +++ usr.sbin/bhyve/pci_emul.c @@ -461,10 +461,12 @@ static void modify_bar_registration(struct pci_devinst *pi, int idx, int registration) { + struct pci_devemu *pe; int error; struct inout_port iop; struct mem_range mr; + pe = pi->pi_d; switch (pi->pi_bar[idx].type) { case PCIBAR_IO: bzero(&iop, sizeof(struct inout_port)); @@ -478,6 +480,9 @@ error = register_inout(&iop); } else error = unregister_inout(&iop); + if (pe->pe_baraddr != NULL) + (*pe->pe_baraddr)(pi->pi_vmctx, pi, idx, registration, + pi->pi_bar[idx].addr); break; case PCIBAR_MEM32: case PCIBAR_MEM64: @@ -493,6 +498,9 @@ error = register_mem(&mr); } else error = unregister_mem(&mr); + if (pe->pe_baraddr != NULL) + (*pe->pe_baraddr)(pi->pi_vmctx, pi, idx, registration, + pi->pi_bar[idx].addr); break; default: error = EINVAL; @@ -673,6 +681,40 @@ return (0); } +/* mask should be a power of 2 minus 1 (e.g. 0x000FFFFF) */ +uint64_t +pci_emul_alloc_mmio(enum pcibar_type type, uint64_t size, uint64_t mask) +{ + uint64_t *baseptr, limit; + + switch (type) { + case PCIBAR_IO: + baseptr = &pci_emul_iobase; + limit = PCI_EMUL_IOLIMIT; + break; + case PCIBAR_MEM32: + baseptr = &pci_emul_membase32; + limit = PCI_EMUL_MEMLIMIT32; + break; + case PCIBAR_MEM64: + baseptr = &pci_emul_membase64; + limit = pci_emul_memlim64; + break; + default: + return 0; + } + + /* align base */ + const uint64_t base = (*baseptr + mask) & ~mask; + + if (base + size > limit) + return 0; + + *baseptr = base + size; + + return base; +} + #define CAP_START_OFFSET 0x40 static int pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen) Index: usr.sbin/bhyve/pci_fbuf.c =================================================================== --- usr.sbin/bhyve/pci_fbuf.c +++ usr.sbin/bhyve/pci_fbuf.c @@ -42,6 +42,7 @@ #include #include +#include #include #include @@ -224,6 +225,30 @@ return (value); } +static void +pci_fbuf_baraddr(struct vmctx *ctx, struct pci_devinst *pi, int baridx, + int enabled, uint64_t address) +{ + struct pci_fbuf_softc *sc; + int prot; + + if (baridx != 1) + return; + + sc = pi->pi_arg; + if (!enabled && sc->fbaddr != 0) { + if (vm_munmap_memseg(ctx, sc->fbaddr, FB_SIZE) != 0) + warnx("pci_fbuf: munmap_memseg failed"); + sc->fbaddr = 0; + } else if (sc->fb_base != NULL && sc->fbaddr == 0) { + prot = PROT_READ | PROT_WRITE; + if (vm_mmap_memseg(ctx, address, VM_FRAMEBUFFER, 0, FB_SIZE, prot) != 0) + warnx("pci_fbuf: mmap_memseg failed"); + sc->fbaddr = address; + } +} + + static int pci_fbuf_parse_opts(struct pci_fbuf_softc *sc, char *opts) { @@ -462,5 +487,6 @@ #ifdef BHYVE_SNAPSHOT .pe_snapshot = pci_fbuf_snapshot, #endif + .pe_baraddr = pci_fbuf_baraddr, }; PCI_EMUL_SET(pci_fbuf); Index: usr.sbin/bhyve/pci_gvt-d.c =================================================================== --- /dev/null +++ usr.sbin/bhyve/pci_gvt-d.c @@ -0,0 +1,387 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "inout.h" +#include "pci_passthru.h" + +#define MB (1024 * 1024UL) + +/* + * PCI definitions + */ +#define PCIR_GGC 0x50 /* GMCH Graphics Control register */ +#define PCIR_BDSM 0x5C /* Base Data of Stolen Memory register */ +#define PCIR_ASLS_CTL 0xFC /* Opregion start address register */ +#define PCIM_GEN5_75_GGC_GMS_MASK \ + 0x000000F0 /* Bits 7:4 contain Graphics Mode Select */ +#define PCIM_GEN6_GGC_GMS_MASK \ + 0x000000F8 /* Bits 7:3 contain Graphics Mode Select */ +#define PCIM_GEN8_GGC_GMS_MASK \ + 0x0000FF00 /* Bits 15:8 contain Graphics Mode Select */ +#define PCIM_BDSM_GSM_MASK \ + 0xFFF00000 /* Bits 31:20 contain base address of gsm */ +#define PCIM_ASLS_OPREGION_MASK 0xFFFFF000 /* Opregion is 4k aligned */ +#define GPU_OPREGION_LEN 0x00004000 /* Size of Opregion (16 KB) */ + +/* + * Known device ids for different generations of Intel graphics + * see https://www.graphics-drivers.eu/intel-pci-hardware-id-string.html for + * complete list + */ +/* Westmere & Ironlake */ +static const uint16_t igd_devid_gen5_75[] = { 0x0042, 0x0046 }; +/* Sandy Bridge */ +static const uint16_t igd_devid_gen6[] = { 0x0102, 0x0106, 0x010A, 0x0112, + 0x0116, 0x0122, 0x0126 }; +/* Ivy Bridge */ +static const uint16_t igd_devid_gen7[] = { 0x0152, 0x0156, 0x015A, 0x0162, + 0x0166, 0x016A }; +/* Haswsell */ +static const uint16_t igd_devid_gen7_5[] = { 0x0402, 0x0406, 0x040A, 0x0412, + 0x0416, 0x041A, 0x041E, 0x0A06, 0x0A0E, 0x0A16, 0x0A1E, 0x0A26, 0x0A2E, + 0x0C02, 0x0C06, 0x0C12, 0x0C16, 0x0C22, 0x0C26, 0x0D06, 0x0D16, 0x0D22, + 0x0D26 }; +/* Broadwell */ +static const uint16_t igd_devid_gen8[] = { 0x1606, 0x160E, 0x1612, 0x1616, + 0x161A, 0x161E, 0x1622, 0x1626, 0x162A, 0x162B }; +/* Skylake */ +static const uint16_t igd_devid_gen9[] = { 0x1902, 0x1906, 0x190B, 0x190E, + 0x1912, 0x1913, 0x1916, 0x1917, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, + 0x1926, 0x1927, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D }; +/* Kaby Lake & Whiskey Lake & Amber Lake & Coffee Lake & Comet Lake */ +static const uint16_t igd_devid_gen9_5[] = { 0x3E90, 0x3E91, 0x3E92, 0x3E93, + 0x3E94, 0x3E96, 0x3E98, 0x3E99, 0x3E9A, 0x3E9B, 0x3E9C, 0x3EA0, 0x3EA1, + 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8, 0x3EA9, 0x5902, 0x5906, 0x590B, 0x5912, + 0x5916, 0x5917, 0x591B, 0x591C, 0x591D, 0x591E, 0x5921, 0x5926, 0x5927, + 0x87C0, 0x87CA, 0x9B21, 0x9B41, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, + 0x9BAC, 0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCC, 0x9BE6, + 0x9BF6 }; + +static int +array_contains(const uint16_t *array, uint64_t elements, uint16_t item) +{ + for (uint64_t i = 0; i < elements; ++i) + if (array[i] == item) + return 1; + return 0; +} + +#define IGD_FUNC_IS_IGD_GEN(gen) \ + static int igd_gen##gen##_is_igd_gen(int devid) \ + { \ + return array_contains(igd_devid_gen##gen, \ + sizeof(igd_devid_gen##gen) / sizeof(uint16_t), devid); \ + } + +/* GVT-d definitions */ +#define GVT_D_MAP_OPREGION 0 +#define GVT_D_MAP_GSM 1 + +/* + * Handler for passthru of igd + * + * Keep it as struct instead of a single function pointer, since new + * generations of Intel graphics could need other funcs. + * e.g. Intel Elkhartlake and Intel Tigerlake: + * They will need different handling for GSM and Opregion (See ACRN-Hypervisor + * ) + */ +struct igd_funcs { + int (*is_igd_gen)(int devid); + uint64_t (*get_gsm_len)(struct vmctx *ctx, struct passthru_softc *sc); +}; + +/* Handler for igd of gen5.75 (Westmere & Ironlake) */ +IGD_FUNC_IS_IGD_GEN(5_75); + +static uint64_t +igd_gen5_75_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN5_75_GGC_GMS_MASK) >> + 4; /* Bits 7:4 contain Graphics Mode Select */ + switch (gms_val) { + case 0x05: + return 32 * MB; + case 0x06: + return 48 * MB; + case 0x07: + return 64 * MB; + case 0x08: + return 128 * MB; + case 0x09: + return 256 * MB; + case 0x0A: + return 96 * MB; + case 0x0B: + return 160 * MB; + case 0x0C: + return 224 * MB; + case 0x0D: + return 352 * MB; + } + + warnx("Unknown Graphic Mode (%x)", gms_val); + return 0; +} + +/* Handler for igd of gen6 (Sandy Bridge) */ +IGD_FUNC_IS_IGD_GEN(6); + +static uint64_t +igd_gen6_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN6_GGC_GMS_MASK) >> + 3; /* Bits 7:3 contain Graphics Mode Select */ + if (gms_val <= 0x10) + return gms_val * 32 * MB; + + warnx("Unknown Graphic Mode (%x)", gms_val); + return 0; +} + +/* Handler for igd of gen7 (Ivy Bridge) */ +IGD_FUNC_IS_IGD_GEN(7); + +/* Handler for igd of gen7.5 (Haswell) */ +IGD_FUNC_IS_IGD_GEN(7_5); + +/* Handler for igd of gen8 (Broadwell) */ +IGD_FUNC_IS_IGD_GEN(8); + +static uint64_t +igd_gen8_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN8_GGC_GMS_MASK) >> + 8; /* Bits 15:8 contain Graphics Mode Select */ + if ((gms_val <= 0x10) || (gms_val == 0x20) || (gms_val == 0x30) || + (gms_val == 0x3F)) + return gms_val * 32 * MB; + + warnx("Unknown Graphic Mode (%x)", gms_val); + return 0; +} + +/* Handler for igd of gen9 (Skylake) */ +IGD_FUNC_IS_IGD_GEN(9); + +static uint64_t +igd_gen9_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN8_GGC_GMS_MASK) >> + 8; /* Bits 15:8 contain Graphics Mode Select */ + if ((gms_val <= 0x10) || (gms_val == 0x20) || (gms_val == 0x30) || + (gms_val == 0x40)) + return gms_val * 32 * MB; + else if (gms_val >= 0xF0 && gms_val <= 0xFE) + return gms_val * 4 * MB; + + warnx("Unknown Graphic Mode (%x)", gms_val); + return 0; +} + +/* + * Handler for igd of gen9.5 (Kaby Lake & Whiskey Lake & Amber Lake & Coffee + * Lake & Comet Lake) + */ +IGD_FUNC_IS_IGD_GEN(9_5); + +/* Westmere & Ironlake */ +static const struct igd_funcs igd_gen5_75 = { + .is_igd_gen = igd_gen5_75_is_igd_gen, + .get_gsm_len = igd_gen5_75_get_gsm_len +}; +/* Sandy Bridge */ +static const struct igd_funcs igd_gen6 = { .is_igd_gen = igd_gen6_is_igd_gen, + .get_gsm_len = igd_gen6_get_gsm_len }; +/* Ivy Bridge */ +static const struct igd_funcs igd_gen7 = { .is_igd_gen = igd_gen7_is_igd_gen, + .get_gsm_len = igd_gen6_get_gsm_len }; +/* Haswell */ +static const struct igd_funcs igd_gen7_5 = { + .is_igd_gen = igd_gen7_5_is_igd_gen, + .get_gsm_len = igd_gen6_get_gsm_len +}; +/* Broadwell */ +static const struct igd_funcs igd_gen8 = { .is_igd_gen = igd_gen8_is_igd_gen, + .get_gsm_len = igd_gen8_get_gsm_len }; +/* Skylake */ +static const struct igd_funcs igd_gen9 = { .is_igd_gen = igd_gen9_is_igd_gen, + .get_gsm_len = igd_gen9_get_gsm_len }; +/* Kaby Lake & Whiskey Lake & Amber Lake & Coffee Lake & Comet Lake */ +static const struct igd_funcs igd_gen9_5 = { + .is_igd_gen = igd_gen9_5_is_igd_gen, + .get_gsm_len = igd_gen9_get_gsm_len +}; + +static const struct igd_funcs *igd_gen_map[] = { &igd_gen5_75, &igd_gen6, + &igd_gen7, &igd_gen7_5, &igd_gen8, &igd_gen9, &igd_gen9_5 }; + +static const struct igd_funcs * +get_igd_funcs(const uint16_t devid) +{ + for (int i = 0; i < sizeof(igd_gen_map) / sizeof(struct igd_funcs *); + ++i) { + if (igd_gen_map[i]->is_igd_gen(devid)) + return igd_gen_map[i]; + } + return NULL; +} + +int +gvt_d_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) +{ + int error; + struct passthru_softc *sc; + + sc = pi->pi_arg; + + /* check vendor == Intel */ + const uint16_t dev_vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 2); + if (dev_vendor != 0x8086) { + warnx("Unknown vendor (%x) of igd", dev_vendor); + return -ENODEV; + } + + /* check if device is a display device */ + if (read_config(&sc->psc_sel, PCIR_CLASS, 1) != PCIC_DISPLAY) { + warnx("%s is no display device", pi->pi_name); + return -ENODEV; + } + + /* Get IGD funcs */ + const struct igd_funcs *igd = get_igd_funcs( + read_config(&sc->psc_sel, PCIR_DEVICE, 2)); + if (igd == NULL) { + warnx("Unsupported igd-device (%x)", + read_config(&sc->psc_sel, PCIR_DEVICE, 2)); + return -ENODEV; + } + + struct passthru_mmio_mapping *opregion = + &sc->psc_mmio_map[GVT_D_MAP_OPREGION]; + struct passthru_mmio_mapping *gsm = &sc->psc_mmio_map[GVT_D_MAP_GSM]; + + /* Get Opregion length */ + opregion->len = GPU_OPREGION_LEN; + /* Get Opregion HPA */ + opregion->hpa = read_config(&sc->psc_sel, PCIR_ASLS_CTL, 4) & + PCIM_ASLS_OPREGION_MASK; + /* Get Graphics Stolen Memory len */ + gsm->len = igd->get_gsm_len(ctx, sc); + /* Get Graphics Stolen Memory HPA */ + gsm->hpa = read_config(&sc->psc_sel, PCIR_BDSM, 4) & PCIM_BDSM_GSM_MASK; + + if (opregion->len == 0 || gsm->len == 0) { + warnx("Could not determine size of opregion or gsm"); + return -ENODEV; + } + + /* Allocate Opregion and GSM in guest space */ + opregion->gpa = pci_emul_alloc_mmio( + PCIBAR_MEM32, opregion->len, ~PCIM_ASLS_OPREGION_MASK); + gsm->gpa = pci_emul_alloc_mmio( + PCIBAR_MEM32, gsm->len, ~PCIM_BDSM_GSM_MASK); + if (opregion->gpa == 0 || gsm->gpa == 0) { + error = -ENOMEM; + goto failed_opregion; + } + + /* Write address of Opregion and GSM into PCI register */ + /* Set Opregion GPA */ + uint32_t asls_val = read_config(&sc->psc_sel, PCIR_ASLS_CTL, 4); + pci_set_cfgdata32(sc->psc_pi, PCIR_ASLS_CTL, + opregion->gpa | (asls_val & ~PCIM_ASLS_OPREGION_MASK)); + /* Set Graphics Stolen Memory GPA */ + uint32_t bdsm_val = read_config(&sc->psc_sel, PCIR_BDSM, 4); + pci_set_cfgdata32( + sc->psc_pi, PCIR_BDSM, gsm->gpa | (bdsm_val & ~PCIM_BDSM_GSM_MASK)); + + /* Map Opregion and GSM into guest space */ + if ((error = passthru_modify_pptdev_mmio( + ctx, sc, opregion, PT_MAP_PPTDEV_MMIO)) != 0) + goto failed_opregion; + if ((error = passthru_modify_pptdev_mmio( + ctx, sc, gsm, PT_MAP_PPTDEV_MMIO)) != 0) + goto failed_gsm; + + /* Protect PCI register */ + set_pcir_prot(sc, PCIR_ASLS_CTL, 0x04, PPT_PCIR_PROT_NA); + set_pcir_prot(sc, PCIR_BDSM, 0x04, PPT_PCIR_PROT_NA); + + return (0); + +failed_opregion: + opregion->gpa = 0; +failed_gsm: + gsm->gpa = 0; + return error; +} + +void +gvt_d_deinit(struct vmctx *ctx, struct pci_devinst *pi) +{ + struct passthru_softc *sc; + + sc = pi->pi_arg; + + struct passthru_mmio_mapping *gsm = &sc->psc_mmio_map[GVT_D_MAP_GSM]; + struct passthru_mmio_mapping *opregion = + &sc->psc_mmio_map[GVT_D_MAP_OPREGION]; + + /* GPA is only set, if it's initialized */ + if (gsm->gpa) + passthru_modify_pptdev_mmio(ctx, sc, gsm, PT_UNMAP_PPTDEV_MMIO); + if (opregion->gpa) + passthru_modify_pptdev_mmio( + ctx, sc, opregion, PT_UNMAP_PPTDEV_MMIO); +} Index: usr.sbin/bhyve/pci_lpc.c =================================================================== --- usr.sbin/bhyve/pci_lpc.c +++ usr.sbin/bhyve/pci_lpc.c @@ -33,9 +33,13 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include +#include +#include +#include #include #include #include @@ -83,6 +87,29 @@ static bool pctestdev_present; +#ifndef _PATH_DEVPCI +#define _PATH_DEVPCI "/dev/pci" +#endif + +static int pcifd = -1; + +static uint32_t +read_config(struct pcisel *sel, long reg, int width) +{ + struct pci_io pi; + pi.pi_sel.pc_domain = sel->pc_domain; + pi.pi_sel.pc_bus = sel->pc_bus; + pi.pi_sel.pc_dev = sel->pc_dev; + pi.pi_sel.pc_func = sel->pc_func; + pi.pi_reg = reg; + pi.pi_width = width; + + if (ioctl(pcifd, PCIOCREAD, &pi) < 0) + return (0); + + return (pi.pi_data); +} + /* * LPC device configuration is in the following form: * [,] @@ -446,6 +473,35 @@ pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_BRIDGE); pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_BRIDGE_ISA); + /* open host device */ + if (pcifd < 0) { + pcifd = open(_PATH_DEVPCI, O_RDWR, 0); + if (pcifd < 0) { + warn("failed to open %s", _PATH_DEVPCI); + return (-1); + } + } + + /* on Intel systems lpc is always connected to 0:1f.0 */ + struct pcisel sel; + sel.pc_domain = 0; + sel.pc_bus = 0; + sel.pc_dev = 0x1f; + sel.pc_func = 0; + + if (read_config(&sel, PCIR_VENDOR, 2) == PCI_VENDOR_INTEL) { + /* + * The VID, DID, REVID, SUBVID and SUBDID of igd-lpc need aligned with + * physical one. Without these physical values, GVT-d GOP driver + * couldn't work. + */ + pci_set_cfgdata16(pi, PCIR_DEVICE, read_config(&sel, PCIR_DEVICE, 2)); + pci_set_cfgdata16(pi, PCIR_VENDOR, read_config(&sel, PCIR_VENDOR, 2)); + pci_set_cfgdata8(pi, PCIR_REVID, read_config(&sel, PCIR_REVID, 1)); + pci_set_cfgdata16(pi, PCIR_SUBVEND_0, read_config(&sel, PCIR_SUBVEND_0, 2)); + pci_set_cfgdata16(pi, PCIR_SUBDEV_0, read_config(&sel, PCIR_SUBDEV_0, 2)); + } + lpc_bridge = pi; return (0); Index: usr.sbin/bhyve/pci_passthru.h =================================================================== --- /dev/null +++ usr.sbin/bhyve/pci_passthru.h @@ -0,0 +1,83 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __PCI_PASSTHRU_H__ +#define __PCI_PASSTHRU_H__ + +#include + +#include + +#include "pci_emul.h" + +struct passthru_mmio_mapping { + uint64_t gpa; + uint64_t len; + uint64_t hpa; +}; + +struct passthru_softc { + struct pci_devinst *psc_pi; + struct pcibar psc_bar[PCI_BARMAX + 1]; + struct { + int capoff; + int msgctrl; + int emulated; + } psc_msi; + struct { + int capoff; + } psc_msix; + struct pcisel psc_sel; + + struct passthru_mmio_mapping psc_mmio_map[2]; + uint8_t psc_pcir_prot_map[(PCI_REGMAX + 1) / 4]; +}; + +#define PT_MAP_PPTDEV_MMIO 1 +#define PT_UNMAP_PPTDEV_MMIO 0 + +#define PPT_PCIR_PROT_NA 0 /* No Access to physical values */ +#define PPT_PCIR_PROT_RO 1 /* Read Only access to physical values */ +#define PPT_PCIR_PROT_WO 2 /* Write Only access to physical values */ +#define PPT_PCIR_PROT_RW \ + (PPT_PCIR_PROT_RO | \ + PPT_PCIR_PROT_WO) /* Read/Write access to physical values */ +#define PPT_PCIR_PROT_MASK 0x03 + +int passthru_modify_pptdev_mmio(struct vmctx *ctx, struct passthru_softc *sc, + struct passthru_mmio_mapping *map, int registration); +uint32_t read_config(const struct pcisel *sel, long reg, int width); +void write_config(const struct pcisel *sel, long reg, int width, uint32_t data); +int set_pcir_prot( + struct passthru_softc *sc, uint32_t reg, uint32_t len, uint8_t prot); +int gvt_d_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts); +void gvt_d_deinit(struct vmctx *ctx, struct pci_devinst *pi); + +#endif Index: usr.sbin/bhyve/pci_passthru.c =================================================================== --- usr.sbin/bhyve/pci_passthru.c +++ usr.sbin/bhyve/pci_passthru.c @@ -58,9 +58,8 @@ #include #include -#include -#include "pci_emul.h" #include "mem.h" +#include "pci_passthru.h" #ifndef _PATH_DEVPCI #define _PATH_DEVPCI "/dev/pci" @@ -83,20 +82,6 @@ static int iofd = -1; static int memfd = -1; -struct passthru_softc { - struct pci_devinst *psc_pi; - struct pcibar psc_bar[PCI_BARMAX + 1]; - struct { - int capoff; - int msgctrl; - int emulated; - } psc_msi; - struct { - int capoff; - } psc_msix; - struct pcisel psc_sel; -}; - static int msi_caplen(int msgctrl) { @@ -119,7 +104,7 @@ return (len); } -static uint32_t +uint32_t read_config(const struct pcisel *sel, long reg, int width) { struct pci_io pi; @@ -135,7 +120,7 @@ return (pi.pi_data); } -static void +void write_config(const struct pcisel *sel, long reg, int width, uint32_t data) { struct pci_io pi; @@ -149,6 +134,70 @@ (void)ioctl(pcifd, PCIOCWRITE, &pi); /* XXX */ } +int +passthru_modify_pptdev_mmio(struct vmctx *ctx, struct passthru_softc *sc, struct passthru_mmio_mapping *map, int registration) +{ + if (registration == PT_MAP_PPTDEV_MMIO) + return vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, map->gpa, map->len, map->hpa); + else + return vm_unmap_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, map->gpa, map->len); +} + +static int +passthru_modify_bar_registration(struct pci_devinst *pi, int idx, int registration) +{ + int error; + struct passthru_softc *sc; + struct passthru_mmio_mapping map; + + sc = pi->pi_arg; + + /* + * If the guest writes a new value to a 64-bit BAR, two writes are neccessary. + * vm_map_pptdev_mmio can fail in that case due to an invalid address after the first write. + * To avoid it, skip registration in that case. + */ + if ((registration == PT_MAP_PPTDEV_MMIO) && (pi->pi_bar[idx].type == PCIBAR_MEM64)) + if ((pci_get_cfgdata32(pi, PCIR_BAR(idx + 0)) == ~0U) || + (pci_get_cfgdata32(pi, PCIR_BAR(idx + 1)) == ~0U)) + return 0; + + if (idx != pci_msix_table_bar(pi)) { + map.gpa = pi->pi_bar[idx].addr; + map.len = pi->pi_bar[idx].size; + map.hpa = sc->psc_bar[idx].addr; + return passthru_modify_pptdev_mmio(pi->pi_vmctx, sc, &map, registration); + } + + /* special handling for MSI-X table */ + uint32_t table_offset, table_size; + + table_offset = rounddown2(pi->pi_msix.table_offset, 4096); + table_size = pi->pi_msix.table_offset - table_offset; + table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE; + table_size = roundup2(table_size, 4096); + + map.gpa = pi->pi_bar[idx].addr; + map.len = table_offset; + map.hpa = sc->psc_bar[idx].addr; + + /* map/unmap everything before MSI-X table */ + if (map.len > 0) + if ((error = passthru_modify_pptdev_mmio(pi->pi_vmctx, sc, &map, registration)) != 0) + return error; + + map.gpa += table_offset + table_size; + map.len = pi->pi_bar[idx].size - (table_offset + table_size); + map.hpa += table_offset + table_size; + + /* map/unmap everything behind MSI-X table */ + if (map.len > 0) + if ((error = passthru_modify_pptdev_mmio(pi->pi_vmctx, sc, &map, registration)) != 0) + return error; + + return (0); +} + #ifdef LEGACY_SUPPORT static int passthru_add_msicap(struct pci_devinst *pi, int msgnum, int nextptr) @@ -438,8 +487,8 @@ init_msix_table(struct vmctx *ctx, struct passthru_softc *sc, uint64_t base) { int b, s, f; - int error, idx; - size_t len, remaining; + int idx; + size_t remaining; uint32_t table_size, table_offset; uint32_t pba_size, pba_offset; vm_paddr_t start; @@ -501,31 +550,6 @@ } } - /* Map everything before the MSI-X table */ - if (table_offset > 0) { - len = table_offset; - error = vm_map_pptdev_mmio(ctx, b, s, f, start, len, base); - if (error) - return (error); - - base += len; - start += len; - remaining -= len; - } - - /* Skip the MSI-X table */ - base += table_size; - start += table_size; - remaining -= table_size; - - /* Map everything beyond the end of the MSI-X table */ - if (remaining > 0) { - len = remaining; - error = vm_map_pptdev_mmio(ctx, b, s, f, start, len, base); - if (error) - return (error); - } - return (0); } @@ -581,24 +605,36 @@ sc->psc_bar[i].type = bartype; sc->psc_bar[i].size = size; sc->psc_bar[i].addr = base; + sc->psc_bar[i].lobits = 0; /* Allocate the BAR in the guest I/O or MMIO space */ error = pci_emul_alloc_bar(pi, i, bartype, size); if (error) return (-1); + /* Use same prefetchable property as physical bar */ + uint8_t lobits = pci_get_cfgdata8(pi, PCIR_BAR(i)); + if (bartype == PCIBAR_MEM32 || bartype == PCIBAR_MEM64) { + if (bar.pbi_base & PCIM_BAR_MEM_PREFETCH) + lobits |= PCIM_BAR_MEM_PREFETCH; + else + lobits &= ~PCIM_BAR_MEM_PREFETCH; + pci_set_cfgdata8(pi, PCIR_BAR(i), lobits); + lobits &= ~PCIM_BAR_MEM_BASE; + } + else { + lobits |= PCIM_BAR_IO_SPACE; + pci_set_cfgdata8(pi, PCIR_BAR(i), lobits); + lobits &= ~PCIM_BAR_IO_BASE; + } + sc->psc_bar[i].lobits = lobits; + pi->pi_bar[i].lobits = lobits; + /* The MSI-X table needs special handling */ if (i == pci_msix_table_bar(pi)) { error = init_msix_table(ctx, sc, base); if (error) return (-1); - } else if (bartype != PCIBAR_IO) { - /* Map the physical BAR in the guest MMIO space */ - error = vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus, - sc->psc_sel.pc_dev, sc->psc_sel.pc_func, - pi->pi_bar[i].addr, pi->pi_bar[i].size, base); - if (error) - return (-1); } /* @@ -639,14 +675,89 @@ goto done; } - pci_set_cfgdata16(pi, PCIR_COMMAND, read_config(&sc->psc_sel, - PCIR_COMMAND, 2)); + /* sync command register */ + write_config(&sc->psc_sel, PCIR_COMMAND, 0x02, + pci_get_cfgdata16(pi, PCIR_COMMAND)); error = 0; /* success */ done: return (error); } +#define PPT_PCIR_PROT(reg) ((sc->psc_pcir_prot_map[reg / 4] >> (reg & 0x03)) & PPT_PCIR_PROT_MASK) + +int +set_pcir_prot(struct passthru_softc *sc, uint32_t reg, uint32_t len, uint8_t prot) +{ + if (reg > PCI_REGMAX || reg + len > PCI_REGMAX + 1) + return (-1); + + prot &= PPT_PCIR_PROT_MASK; + + for (int i = reg; i < reg + len; ++i) { + /* delete old prot value */ + sc->psc_pcir_prot_map[i / 4] &= ~(PPT_PCIR_PROT_MASK << (i & 0x03)); + /* set new prot value */ + sc->psc_pcir_prot_map[i / 4] |= prot << (i & 0x03); + } + + return (0); +} + +static int +is_pcir_writable(struct passthru_softc *sc, uint32_t reg) +{ + if (reg > PCI_REGMAX) + return (0); + + return ((PPT_PCIR_PROT(reg) & PPT_PCIR_PROT_WO) != 0); +} + +static int +is_pcir_readable(struct passthru_softc *sc, uint32_t reg) +{ + if (reg > PCI_REGMAX) + return (0); + + return ((PPT_PCIR_PROT(reg) & PPT_PCIR_PROT_RO) != 0); +} + +static int +passthru_init_quirks(struct vmctx *ctx, struct pci_devinst *pi, char *opts) +{ + struct passthru_softc *sc = pi->pi_arg; + + uint16_t vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 0x02); + uint8_t class = read_config(&sc->psc_sel, PCIR_CLASS, 0x01); + + /* currently only display devices have quirks */ + if (class != PCIC_DISPLAY) + return (0); + + if (vendor == PCI_VENDOR_INTEL) + return gvt_d_init(ctx, pi, opts); + + return (0); +} + +static void +passthru_deinit_quirks(struct vmctx *ctx, struct pci_devinst *pi) +{ + struct passthru_softc *sc = pi->pi_arg; + + uint16_t vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 0x02); + uint8_t class = read_config(&sc->psc_sel, PCIR_CLASS, 0x01); + + /* currently only display devices have quirks */ + if (class != PCIC_DISPLAY) + return; + + if (vendor == PCI_VENDOR_INTEL) + return gvt_d_deinit(ctx, pi); + + return; +} + static int passthru_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { @@ -734,9 +845,20 @@ sc->psc_pi = pi; /* initialize config space */ - error = cfginit(ctx, pi, bus, slot, func); + if ((error = cfginit(ctx, pi, bus, slot, func)) != 0) + goto done; + + /* allow access to all PCI registers */ + if ((error = set_pcir_prot(sc, 0, PCI_REGMAX + 1, PPT_PCIR_PROT_RW)) != 0) + goto done; + + if ((error = passthru_init_quirks(ctx, pi, opts)) != 0) + goto done; + + error = 0; /* success */ done: if (error) { + passthru_deinit_quirks(ctx, pi); free(sc); vm_unassign_pptdev(ctx, bus, slot, func); } @@ -778,6 +900,38 @@ coff < sc->psc_msix.capoff + MSIX_CAPLEN); } +static int +passthru_cfgread_bar(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, + int coff, int bytes, uint32_t *rv) +{ + const int idx = (coff - PCIR_BAR(0)) / 4; + int update_idx = idx; + if (pi->pi_bar[idx].type == PCIBAR_MEMHI64) + --update_idx; + + if (pci_get_cfgdata32(pi, PCIR_BAR(idx)) != ~0U) { + /* return address of BAR */ + if (bytes == 1) + *rv = pci_get_cfgdata8(pi, coff); + else if (bytes == 2) + *rv = pci_get_cfgdata16(pi, coff); + else + *rv = pci_get_cfgdata32(pi, coff); + + return (0); + } + + /* return size of BAR */ + uint64_t size = ~(uint64_t)(pi->pi_bar[update_idx].size - 1); + size |= pi->pi_bar[update_idx].lobits; + if (pi->pi_bar[idx].type == PCIBAR_MEMHI64) + size >>= 32; + assert(bytes == 4); + *rv = size; + + return (0); +} + static int passthru_cfgread(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int coff, int bytes, uint32_t *rv) @@ -786,12 +940,19 @@ sc = pi->pi_arg; + /* skip for protected PCI registers */ + if (!is_pcir_readable(sc, coff)) + return (-1); + /* * PCI BARs and MSI capability is emulated. */ - if (bar_access(coff) || msicap_access(sc, coff)) + if (msicap_access(sc, coff)) return (-1); + if (bar_access(coff)) + return passthru_cfgread_bar(ctx, vcpu, pi, coff, bytes, rv); + #ifdef LEGACY_SUPPORT /* * Emulate PCIR_CAP_PTR if this device does not support MSI capability @@ -822,6 +983,82 @@ return (0); } +static int +passthru_cfgwrite_bar(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, + int coff, int bytes, uint32_t val) +{ + const int idx = (coff - PCIR_BAR(0)) / 4; + int update_idx = idx; + + switch (pi->pi_bar[idx].type) { + case PCIBAR_MEMHI64: + --update_idx; + case PCIBAR_IO: + case PCIBAR_MEM32: + case PCIBAR_MEM64: + { + const uint16_t cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); + if ((cmd & PCIM_CMD_MEMEN && pi->pi_bar[idx].type != PCIBAR_IO) || + (cmd & PCIM_CMD_PORTEN && pi->pi_bar[idx].type == PCIBAR_IO)) { + passthru_modify_bar_registration(pi, update_idx, 0); + } + + if (val == ~0U) { + /* guest wants to read size of BAR */ + pci_set_cfgdata32(pi, coff, ~0U); + pi->pi_bar[update_idx].addr = 0; + break; + } + + /* guest sets address of BAR */ + uint64_t mask, bar; + mask = ~(pi->pi_bar[update_idx].size - 1); + if (pi->pi_bar[idx].type == PCIBAR_MEMHI64) + mask >>= 32; + bar = val & mask; + if (pi->pi_bar[idx].type != PCIBAR_MEMHI64) + bar |= pi->pi_bar[update_idx].lobits; + pci_set_cfgdata32(pi, coff, bar); + + /* Only register BAR if it contains a valid address */ + uint32_t lo, hi; + + lo = pci_get_cfgdata32(pi, PCIR_BAR(update_idx)); + if (pi->pi_bar[update_idx].type == PCIBAR_IO) { + if ((lo & PCIM_BAR_IO_BASE) == PCIM_BAR_IO_BASE) + lo = ~0U; + else + lo &= PCIM_BAR_IO_BASE; + } else { + if ((lo & PCIM_BAR_MEM_BASE) == PCIM_BAR_MEM_BASE) + lo = ~0U; + else + lo &= PCIM_BAR_MEM_BASE; + } + + if (pi->pi_bar[update_idx].type == PCIBAR_MEM64) + hi = pci_get_cfgdata32(pi, PCIR_BAR(update_idx + 1)); + else + hi = 0; + + if (lo != ~0U && hi != ~0U) { + pi->pi_bar[update_idx].addr = (uint64_t)lo | ((uint64_t)hi << 32U); + if ((cmd & PCIM_CMD_MEMEN && pi->pi_bar[idx].type != PCIBAR_IO) || + (cmd & PCIM_CMD_PORTEN && pi->pi_bar[idx].type == PCIBAR_IO)) { + passthru_modify_bar_registration(pi, update_idx, 1); + } + } + else + pi->pi_bar[update_idx].addr = 0; + break; + } + default: + pi->pi_bar[idx].addr = 0; + break; + } + return (0); +} + static int passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int coff, int bytes, uint32_t val) @@ -832,11 +1069,15 @@ sc = pi->pi_arg; + /* skip for protected PCI registers */ + if (!is_pcir_writable(sc, coff)) + return (-1); + /* * PCI BARs are emulated */ if (bar_access(coff)) - return (-1); + return passthru_cfgwrite_bar(ctx, vcpu, pi, coff, bytes, val); /* * MSI capability is emulated @@ -954,6 +1195,15 @@ return (val); } +static void +passthru_addr(struct vmctx *ctx, struct pci_devinst *pi, int baridx, + int enabled, uint64_t address) +{ + if (pi->pi_bar[baridx].type == PCIBAR_IO) + return; + passthru_modify_bar_registration(pi, baridx, enabled); +} + struct pci_devemu passthru = { .pe_emu = "passthru", .pe_init = passthru_init, @@ -961,5 +1211,6 @@ .pe_cfgread = passthru_cfgread, .pe_barwrite = passthru_write, .pe_barread = passthru_read, + .pe_baraddr = passthru_addr, }; PCI_EMUL_SET(passthru);