Index: lib/libvmmapi/vmmapi.h =================================================================== --- lib/libvmmapi/vmmapi.h +++ lib/libvmmapi/vmmapi.h @@ -176,6 +176,8 @@ int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func); int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); +int vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, + vm_paddr_t gpa, size_t len); int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func, uint64_t addr, uint64_t msg, int numvec); int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, Index: lib/libvmmapi/vmmapi.c =================================================================== --- lib/libvmmapi/vmmapi.c +++ lib/libvmmapi/vmmapi.c @@ -980,6 +980,26 @@ return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio)); } +int +vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, + vm_paddr_t gpa, size_t len) +{ + struct vm_pptdev_mmio pptmmio; + + bzero(&pptmmio, sizeof(pptmmio)); + pptmmio.bus = bus; + pptmmio.slot = slot; + pptmmio.func = func; + pptmmio.gpa = gpa; + pptmmio.len = len; + pptmmio.hpa = 0; + + if (gpa == 0) + return (0); + + return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio)); +} + int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func, uint64_t addr, uint64_t msg, int numvec) @@ -1640,7 +1660,7 @@ VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ, VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER, VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV, - VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI, + VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_MSI, VM_PPTDEV_MSIX, VM_INJECT_NMI, VM_STATS, VM_STAT_DESC, VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE, VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA, Index: sys/amd64/include/vmm_dev.h =================================================================== --- sys/amd64/include/vmm_dev.h +++ sys/amd64/include/vmm_dev.h @@ -299,6 +299,7 @@ IOCNUM_BIND_PPTDEV = 40, IOCNUM_UNBIND_PPTDEV = 41, IOCNUM_MAP_PPTDEV_MMIO = 42, + IOCNUM_UNMAP_PPTDEV_MMIO = 45, IOCNUM_PPTDEV_MSI = 43, IOCNUM_PPTDEV_MSIX = 44, @@ -409,6 +410,8 @@ _IOW('v', IOCNUM_UNBIND_PPTDEV, struct vm_pptdev) #define VM_MAP_PPTDEV_MMIO \ _IOW('v', IOCNUM_MAP_PPTDEV_MMIO, struct vm_pptdev_mmio) +#define VM_UNMAP_PPTDEV_MMIO \ + _IOW('v', IOCNUM_UNMAP_PPTDEV_MMIO, struct vm_pptdev_mmio) #define VM_PPTDEV_MSI \ _IOW('v', IOCNUM_PPTDEV_MSI, struct vm_pptdev_msi) #define VM_PPTDEV_MSIX \ Index: sys/amd64/vmm/io/ppt.h =================================================================== --- sys/amd64/vmm/io/ppt.h +++ sys/amd64/vmm/io/ppt.h @@ -34,6 +34,8 @@ int ppt_unassign_all(struct vm *vm); int ppt_map_mmio(struct vm *vm, int bus, int slot, int func, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); +int ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func, + vm_paddr_t gpa, size_t len); int ppt_setup_msi(struct vm *vm, int vcpu, int bus, int slot, int func, uint64_t addr, uint64_t msg, int numvec); int ppt_setup_msix(struct vm *vm, int vcpu, int bus, int slot, int func, Index: sys/amd64/vmm/io/ppt.c =================================================================== --- sys/amd64/vmm/io/ppt.c +++ sys/amd64/vmm/io/ppt.c @@ -218,7 +218,7 @@ } static void -ppt_unmap_mmio(struct vm *vm, struct pptdev *ppt) +ppt_unmap_mmio_all(struct vm *vm, struct pptdev *ppt) { int i; struct pptseg *seg; @@ -414,7 +414,7 @@ pci_save_state(ppt->dev); ppt_pci_reset(ppt->dev); pci_restore_state(ppt->dev); - ppt_unmap_mmio(vm, ppt); + ppt_unmap_mmio_all(vm, ppt); ppt_teardown_msi(ppt); ppt_teardown_msix(ppt); iommu_remove_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev)); @@ -473,6 +473,35 @@ return (ENOENT); } +int +ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func, + vm_paddr_t gpa, size_t len) +{ + int i, error; + struct pptseg *seg; + struct pptdev *ppt; + + ppt = ppt_find(bus, slot, func); + if (ppt != NULL) { + if (ppt->vm != vm) + return (EBUSY); + + for (i = 0; i < MAX_MMIOSEGS; i++) { + seg = &ppt->mmio[i]; + if (seg->gpa == gpa && seg->len == len) { + error = vm_unmap_mmio(vm, gpa, len); + if (error == 0) { + seg->gpa = 0; + seg->len = 0; + } + return (error); + } + } + return (ENOSPC); + } + return (ENOENT); +} + static int pptintr(void *arg) { Index: sys/amd64/vmm/vmm_dev.c =================================================================== --- sys/amd64/vmm/vmm_dev.c +++ sys/amd64/vmm/vmm_dev.c @@ -435,6 +435,7 @@ break; case VM_MAP_PPTDEV_MMIO: + case VM_UNMAP_PPTDEV_MMIO: case VM_BIND_PPTDEV: case VM_UNBIND_PPTDEV: #ifdef COMPAT_FREEBSD12 @@ -520,6 +521,11 @@ pptmmio->func, pptmmio->gpa, pptmmio->len, pptmmio->hpa); break; + case VM_UNMAP_PPTDEV_MMIO: + pptmmio = (struct vm_pptdev_mmio *)data; + error = ppt_unmap_mmio(sc->vm, pptmmio->bus, pptmmio->slot, + pptmmio->func, pptmmio->gpa, pptmmio->len); + break; case VM_BIND_PPTDEV: pptdev = (struct vm_pptdev *)data; error = vm_assign_pptdev(sc->vm, pptdev->bus, pptdev->slot, Index: sys/dev/pci/pcireg.h =================================================================== --- sys/dev/pci/pcireg.h +++ sys/dev/pci/pcireg.h @@ -1098,3 +1098,28 @@ #define PCIM_OSC_CTL_PCIE_PME 0x04 /* PCIe Native Power Mgt Events */ #define PCIM_OSC_CTL_PCIE_AER 0x08 /* PCIe Advanced Error Reporting */ #define PCIM_OSC_CTL_PCIE_CAP_STRUCT 0x10 /* Various Capability Structures */ + +/* + * GVT-d definitions + */ +#define IGD_DEVID_COFFEELAKE 0x3E00 /* 0x3Exx */ +#define IGD_DEVID_KABYLAKE 0x5900 /* 0x59xx */ +#define IGD_DEVID_SKYLAKE 0x1900 /* 0x19xx */ +#define IGD_DEVID_BROADWELL 0x1600 /* 0x16xx */ +#define IGD_DEVID_HASWELL 0x0400 /* 0x04xx */ +#define IGD_DEVID_IVYBRIDGE_1 0x0160 /* 0x016x */ +#define IGD_DEVID_IVYBRIDGE_0 0x0150 /* 0x015x */ +#define IGD_DEVID_SANDYBRIDGE_2 0x0120 /* 0x012x */ +#define IGD_DEVID_SANDYBRIDGE_1 0x0110 /* 0x011x */ +#define IGD_DEVID_SANDYBRIDGE_0 0x0100 /* 0x010x */ +#define IGD_DEVID_WESTMERE 0x0040 /* 0x004x */ +#define PCIR_GGC 0x50 /* GMCH Graphics Control register */ +#define PCIR_BDSM 0x5C /* Base Data of Stolen Memory register */ +#define PCIR_ASLS_CTL 0xFC /* Opregion start address register */ +#define PCIM_GEN5_75_GGC_GMS_MASK 0x000000F0 /* Bits 7:4 contain Graphics Mode Select */ +#define PCIM_GEN6_GGC_GMS_MASK 0x000000F8 /* Bits 7:3 contain Graphics Mode Select */ +#define PCIM_GEN8_GGC_GMS_MASK 0x0000FF00 /* Bits 15:8 contain Graphics Mode Select */ +#define PCIM_BDSM_GSM_MASK 0xFFF00000 /* Bits 31:20 contain base address of gsm */ +#define PCIM_ASLS_OPREGION_MASK 0xFFFFF000 /* Opregion is 4k aligned */ +#define GPU_GSM_SIZE 0x04000000 /* Size of Graphics Stolen Memory (fallback if detection fails) */ +#define GPU_OPREGION_SIZE 0x00004000 /* Size of Opregion */ Index: usr.sbin/bhyve/Makefile =================================================================== --- usr.sbin/bhyve/Makefile +++ usr.sbin/bhyve/Makefile @@ -42,7 +42,9 @@ pci_emul.c \ pci_hda.c \ pci_fbuf.c \ + pci_gvt-d.c \ pci_hostbridge.c \ + pci_igd_lpc.c \ pci_irq.c \ pci_lpc.c \ pci_nvme.c \ Index: usr.sbin/bhyve/pci_emul.h =================================================================== --- usr.sbin/bhyve/pci_emul.h +++ usr.sbin/bhyve/pci_emul.h @@ -40,6 +40,8 @@ #include +#include + #define PCI_BARMAX PCIR_MAX_BAR_0 /* BAR registers in a Type 0 header */ struct vmctx; @@ -92,6 +94,7 @@ enum pcibar_type type; /* io or memory */ uint64_t size; uint64_t addr; + uint8_t lobits; }; #define PI_NAMESZ 40 @@ -223,6 +226,9 @@ enum pcibar_type type, uint64_t size); int pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx, uint64_t hostbase, enum pcibar_type type, uint64_t size); +uint64_t pci_emul_alloc_mmio(enum pcibar_type type, uint64_t size, uint64_t mask); +void unregister_bar_gvt_d(struct pci_devinst *pi, int idx); +void register_bar_gvt_d(struct pci_devinst *pi, int idx); int pci_emul_add_msicap(struct pci_devinst *pi, int msgnum); int pci_emul_add_pciecap(struct pci_devinst *pi, int pcie_device_type); void pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes, @@ -298,4 +304,13 @@ return (*(uint32_t *)(pi->pi_cfgdata + offset)); } +static __inline int +is_gvt_d(struct pci_devinst *pi) +{ + if (strcmp(pi->pi_d->pe_emu, "gvt-d") == 0) + return (1); + else + return (0); +} + #endif /* _PCI_EMUL_H_ */ Index: usr.sbin/bhyve/pci_emul.c =================================================================== --- usr.sbin/bhyve/pci_emul.c +++ usr.sbin/bhyve/pci_emul.c @@ -509,15 +509,19 @@ static void unregister_bar(struct pci_devinst *pi, int idx) { - - modify_bar_registration(pi, idx, 0); + if (!is_gvt_d(pi)) + modify_bar_registration(pi, idx, 0); + else + unregister_bar_gvt_d(pi, idx); } static void register_bar(struct pci_devinst *pi, int idx) { - - modify_bar_registration(pi, idx, 1); + if (!is_gvt_d(pi)) + modify_bar_registration(pi, idx, 1); + else + register_bar_gvt_d(pi, idx); } /* Are we decoding i/o port accesses for the emulated pci device? */ @@ -684,6 +688,44 @@ return (0); } +// mask should be a power of 2 minus 1 (e.g. 0x000FFFFF) +uint64_t +pci_emul_alloc_mmio(enum pcibar_type type, uint64_t size, uint64_t mask) +{ + int error; + + error = 1; + + uint64_t *baseptr, limit, base; + + switch (type) { + case PCIBAR_IO: + baseptr = &pci_emul_iobase; + limit = PCI_EMUL_IOLIMIT; + break; + case PCIBAR_MEM32: + baseptr = &pci_emul_membase32; + limit = PCI_EMUL_MEMLIMIT32; + break; + case PCIBAR_MEM64: + baseptr = &pci_emul_membase64; + limit = PCI_EMUL_MEMLIMIT64; + break; + default: + return 0; + } + + // align base + base = (*baseptr + mask) & ~mask; + + if (base + size > limit) + return 0; + + *baseptr = base + size; + + return base; +} + #define CAP_START_OFFSET 0x40 static int pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen) Index: usr.sbin/bhyve/pci_gvt-d.c =================================================================== --- /dev/null +++ usr.sbin/bhyve/pci_gvt-d.c @@ -0,0 +1,761 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "inout.h" +#include "pci_passthru.h" + +#define KB (1024UL) +#define MB (1024 * 1024UL) +#define GB (1024 * 1024 * 1024UL) + +static int +gvt_d_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, uint32_t *eax, void *arg) +{ + struct pci_devinst *pdi = arg; + struct pci_devemu *pe = pdi->pi_d; + uint64_t offset; + int i; + + for (i = 0; i <= PCI_BARMAX; i++) { + if (pdi->pi_bar[i].type == PCIBAR_IO && + port >= pdi->pi_bar[i].addr && + port + bytes <= pdi->pi_bar[i].addr + pdi->pi_bar[i].size) { + offset = port - pdi->pi_bar[i].addr; + if (in) + *eax = (*pe->pe_barread)(ctx, vcpu, pdi, i, + offset, bytes); + else + (*pe->pe_barwrite)(ctx, vcpu, pdi, i, offset, + bytes, *eax); + return (0); + } + } + return (-1); +} + +void +unregister_bar_gvt_d(struct pci_devinst *pi, int idx) +{ + int error; + struct passthru_softc *sc; + struct inout_port iop; + + if (pi->pi_bar[idx].addr == 0) + return; + + sc = pi->pi_arg; + + switch (pi->pi_bar[idx].type) { + case PCIBAR_NONE: + case PCIBAR_MEMHI64: + break; + case PCIBAR_IO: + /* + * ToDo: Passthrough IO + * + * Use IO-Bitmap to emulate access to IO ports + * This would prevent VM_EXIT on access to specified IO ports + */ + bzero(&iop, sizeof(struct inout_port)); + iop.name = pi->pi_name; + iop.port = pi->pi_bar[idx].addr; + iop.size = pi->pi_bar[idx].size; + error = unregister_inout(&iop); + break; + case PCIBAR_MEM32: + case PCIBAR_MEM64: + if (idx != pci_msix_table_bar(pi)) { + error = vm_unmap_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, pi->pi_bar[idx].addr, pi->pi_bar[idx].size); + } + // special handling for msix table + else { + uint32_t table_offset, table_size; + uint32_t gpa, len; + + table_offset = rounddown2(pi->pi_msix.table_offset, 4096); + table_size = pi->pi_msix.table_offset - table_offset; + table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE; + table_size = roundup2(table_size, 4096); + + gpa = pi->pi_bar[idx].addr; + len = table_offset; + + // unmap everything bevor MSI-X table + if (len > 0) { + if ((error = vm_unmap_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gpa, len)) != 0) + goto done; + } + + gpa += table_offset + table_size; + len = pi->pi_bar[idx].size - (table_offset + table_size); + + // unmap everything behind MSI-X table + if (len > 0) { + if ((error = vm_unmap_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gpa, len)) != 0) + goto done; + } + } + break; + } + +done: + if (error != 0) + err(1, __func__); +} + +void +register_bar_gvt_d(struct pci_devinst *pi, int idx) +{ + int error; + struct passthru_softc *sc; + struct inout_port iop; + + sc = pi->pi_arg; + + switch (pi->pi_bar[idx].type) { + case PCIBAR_NONE: + case PCIBAR_MEMHI64: + break; + case PCIBAR_IO: + /* + * ToDo: Passthrough IO + * + * Use IO-Bitmap to emulate access to IO ports + * Prevent VM_EXIT on access to specified IO ports + */ + bzero(&iop, sizeof(struct inout_port)); + iop.name = pi->pi_name; + iop.port = pi->pi_bar[idx].addr; + iop.size = pi->pi_bar[idx].size; + iop.flags = IOPORT_F_INOUT; + iop.handler = gvt_d_io_handler; + iop.arg = pi; + error = register_inout(&iop); + break; + case PCIBAR_MEM32: + case PCIBAR_MEM64: + if (idx != pci_msix_table_bar(pi)) { + error = vm_map_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, pi->pi_bar[idx].addr, pi->pi_bar[idx].size, sc->psc_bar[idx].addr); + /* + * If the guest writes a new value to a 64-bit BAR, two writes are neccessary. + * vm_map_pptdev_mmio can fail in that case due to an invalid address after the first write. + */ + if (error != 0) { + pi->pi_bar[idx].addr = 0; + error = 0; + } + } + // special handling for msix table + else { + uint32_t table_offset, table_size; + uint32_t gpa, len, hpa; + + table_offset = rounddown2(pi->pi_msix.table_offset, 4096); + table_size = pi->pi_msix.table_offset - table_offset; + table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE; + table_size = roundup2(table_size, 4096); + + hpa = sc->psc_bar[idx].addr; + gpa = pi->pi_bar[idx].addr; + len = table_offset; + + // map everything bevor MSI-X table + if (len > 0) { + if ((error = vm_map_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gpa, len, hpa)) != 0) + goto done; + } + + hpa += table_offset + table_size; + gpa += table_offset + table_size; + len = pi->pi_bar[idx].size - (table_offset + table_size); + + // map everything behind MSI-X table + if (len > 0) { + if ((error = vm_map_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gpa, len, hpa)) != 0) + goto done; + } + } + break; + } + +done: + if (error != 0) + err(1, __func__); +} +/* + * GVT-d: Handler for passthru of igd + */ +struct igd_funcs { + uint64_t (*get_opregion_hpa)(struct vmctx *ctx, struct passthru_softc *sc); + uint64_t (*get_gsm_hpa)(struct vmctx *ctx, struct passthru_softc *sc); + uint64_t (*get_opregion_size)(struct vmctx *ctx, struct passthru_softc *sc); + uint64_t (*get_gsm_size)(struct vmctx *ctx, struct passthru_softc *sc); + void (*set_opregion_gpa)(struct vmctx *ctx, struct passthru_softc *sc, uint64_t gpa); + void (*set_gsm_gpa)(struct vmctx *ctx, struct passthru_softc *sc, uint64_t gpa); +}; + +/* + * GVT-d: Handler for igd of gen5.75 (Westmere) + */ + +static uint64_t +igd_gen5_75_get_opregion_hpa(struct vmctx *ctx, struct passthru_softc *sc) +{ + return read_config(&sc->psc_sel, PCIR_ASLS_CTL, 4) & PCIM_ASLS_OPREGION_MASK; +} + +static uint64_t +igd_gen5_75_get_gsm_hpa(struct vmctx *ctx, struct passthru_softc *sc) +{ + return read_config(&sc->psc_sel, PCIR_BDSM, 4) & PCIM_BDSM_GSM_MASK; +} + +static uint64_t +igd_gen5_75_get_opregion_size(struct vmctx *ctx, struct passthru_softc *sc) +{ + return GPU_OPREGION_SIZE; +} + +static uint64_t +igd_gen5_75_get_gsm_size(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint64_t gsm_size; + + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN5_75_GGC_GMS_MASK) >> 4; /* Bits 7:4 contain Graphics Mode Select */ + switch (gms_val) { + case 0x05: + gsm_size = 32*MB; + break; + case 0x06: + gsm_size = 48*MB; + break; + case 0x07: + gsm_size = 64*MB; + break; + case 0x08: + gsm_size = 128*MB; + break; + case 0x09: + gsm_size = 256*MB; + break; + case 0x0A: + gsm_size = 96*MB; + break; + case 0x0B: + gsm_size = 160*MB; + break; + case 0x0C: + gsm_size = 224*MB; + break; + case 0x0D: + gsm_size = 352*MB; + break; + default: + gsm_size = GPU_GSM_SIZE; + warnx("Unknown Graphic Mode (%x): Fallback to %lu MB of Graphics Stolen Memory.", gms_val, gsm_size / MB); + break; + } + + return gsm_size; +} + +static void +igd_gen5_75_set_opregion_gpa(struct vmctx *ctx, struct passthru_softc *sc, uint64_t gpa) +{ + uint32_t asls_val = read_config(&sc->psc_sel, PCIR_ASLS_CTL, 4); + pci_set_cfgdata32(sc->psc_pi, PCIR_ASLS_CTL, gpa | (asls_val & ~PCIM_ASLS_OPREGION_MASK)); +} + +static void +igd_gen5_75_set_gsm_gpa(struct vmctx *ctx, struct passthru_softc *sc, uint64_t gpa) +{ + uint32_t bdsm_val = read_config(&sc->psc_sel, PCIR_BDSM, 4); + pci_set_cfgdata32(sc->psc_pi, PCIR_BDSM, gpa | (bdsm_val & ~PCIM_BDSM_GSM_MASK)); +} + +/* + * GVT-d: Handler for igd of gen6 (Sandy Bridge) + */ +static uint64_t +igd_gen6_get_gsm_size(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint64_t gsm_size; + + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN6_GGC_GMS_MASK) >> 3; /* Bits 7:3 contain Graphics Mode Select */ + if (gms_val <= 0x10) + gsm_size = gms_val * 32*MB; + else { + gsm_size = GPU_GSM_SIZE; + warnx("Unknown Graphic Mode (%x): Fallback to %lu MB of Graphics Stolen Memory.", gms_val, gsm_size / MB); + } + + return gsm_size; +} + +/* + * GVT-d: Handler for igd of gen8 (Broadwell) + */ +static uint64_t +igd_gen8_get_gsm_size(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint64_t gsm_size; + + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN8_GGC_GMS_MASK) >> 8; /* Bits 15:8 contain Graphics Mode Select */ + if (gms_val <= 0x10) + gsm_size = gms_val * 32*MB; + else if (gms_val == 0x20) + gsm_size = 1024*MB; + else if (gms_val == 0x30) + gsm_size = 1536*MB; + else if (gms_val == 0x3F) + gsm_size = 2016*MB; + else { + gsm_size = GPU_GSM_SIZE; + warnx("Unknown Graphic Mode (%x): Fallback to %lu MB of Graphics Stolen Memory.", gms_val, gsm_size / MB); + } + + return gsm_size; +} + +/* + * GVT-d: Handler for igd of gen9 (Skylake) + */ +static uint64_t +igd_gen9_get_gsm_size(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint64_t gsm_size; + + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN8_GGC_GMS_MASK) >> 8; /* Bits 15:8 contain Graphics Mode Select */ + if (gms_val <= 0x10) + gsm_size = gms_val * 32*MB; + else if (gms_val == 0x20) + gsm_size = 1024*MB; + else if (gms_val == 0x30) + gsm_size = 1536*MB; + else if (gms_val == 0x40) + gsm_size = 2048*MB; + else if (gms_val >= 0xF0 && gms_val <= 0xFE) + gsm_size = gms_val * 4*MB; + else { + gsm_size = GPU_GSM_SIZE; + warnx("Unknown Graphic Mode (%x): Fallback to %lu MB of Graphics Stolen Memory.", gms_val, gsm_size / MB); + } + + return gsm_size; +} + +// Westmere +struct igd_funcs igd_gen5_75 = { + .get_opregion_hpa = igd_gen5_75_get_opregion_hpa, + .get_gsm_hpa = igd_gen5_75_get_gsm_hpa, + .get_opregion_size = igd_gen5_75_get_opregion_size, + .get_gsm_size = igd_gen5_75_get_gsm_size, + .set_opregion_gpa = igd_gen5_75_set_opregion_gpa, + .set_gsm_gpa = igd_gen5_75_set_gsm_gpa +}; +// Sandy Bridge +struct igd_funcs igd_gen6 = { + .get_opregion_hpa = igd_gen5_75_get_opregion_hpa, + .get_gsm_hpa = igd_gen5_75_get_gsm_hpa, + .get_opregion_size = igd_gen5_75_get_opregion_size, + .get_gsm_size = igd_gen6_get_gsm_size, + .set_opregion_gpa = igd_gen5_75_set_opregion_gpa, + .set_gsm_gpa = igd_gen5_75_set_gsm_gpa +}; +// Ivy Bridge +struct igd_funcs igd_gen7 = { + .get_opregion_hpa = igd_gen5_75_get_opregion_hpa, + .get_gsm_hpa = igd_gen5_75_get_gsm_hpa, + .get_opregion_size = igd_gen5_75_get_opregion_size, + .get_gsm_size = igd_gen6_get_gsm_size, + .set_opregion_gpa = igd_gen5_75_set_opregion_gpa, + .set_gsm_gpa = igd_gen5_75_set_gsm_gpa +}; +// Haswell +struct igd_funcs igd_gen7_5 = { + .get_opregion_hpa = igd_gen5_75_get_opregion_hpa, + .get_gsm_hpa = igd_gen5_75_get_gsm_hpa, + .get_opregion_size = igd_gen5_75_get_opregion_size, + .get_gsm_size = igd_gen6_get_gsm_size, + .set_opregion_gpa = igd_gen5_75_set_opregion_gpa, + .set_gsm_gpa = igd_gen5_75_set_gsm_gpa +}; +// Broadwell +struct igd_funcs igd_gen8 = { + .get_opregion_hpa = igd_gen5_75_get_opregion_hpa, + .get_gsm_hpa = igd_gen5_75_get_gsm_hpa, + .get_opregion_size = igd_gen5_75_get_opregion_size, + .get_gsm_size = igd_gen8_get_gsm_size, + .set_opregion_gpa = igd_gen5_75_set_opregion_gpa, + .set_gsm_gpa = igd_gen5_75_set_gsm_gpa +}; +// Skylake +struct igd_funcs igd_gen9 = { + .get_opregion_hpa = igd_gen5_75_get_opregion_hpa, + .get_gsm_hpa = igd_gen5_75_get_gsm_hpa, + .get_opregion_size = igd_gen5_75_get_opregion_size, + .get_gsm_size = igd_gen9_get_gsm_size, + .set_opregion_gpa = igd_gen5_75_set_opregion_gpa, + .set_gsm_gpa = igd_gen5_75_set_gsm_gpa +}; +// Kabylake & Coffeelake +struct igd_funcs igd_gen9_5 = { + .get_opregion_hpa = igd_gen5_75_get_opregion_hpa, + .get_gsm_hpa = igd_gen5_75_get_gsm_hpa, + .get_opregion_size = igd_gen5_75_get_opregion_size, + .get_gsm_size = igd_gen9_get_gsm_size, + .set_opregion_gpa = igd_gen5_75_set_opregion_gpa, + .set_gsm_gpa = igd_gen5_75_set_gsm_gpa +}; + +static int +gvt_d_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) +{ + int error; + uint32_t opregion_hpa, opregion_gpa, opregion_size, gsm_hpa, gsm_gpa, gsm_size; + struct passthru_softc *sc; + + error = 1; + + if ((error = passthru_init(ctx, pi, opts)) != 0) + goto done; + + sc = pi->pi_arg; + + /* Enable Memory and IO Space for device */ + uint16_t cmd = read_config(&sc->psc_sel, PCIR_COMMAND, 0x02); + cmd |= PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; + write_config(&sc->psc_sel, PCIR_COMMAND, 0x02, cmd); + pci_set_cfgdata16(pi, PCIR_COMMAND, cmd); + /* Use same prefetchable property as physical BAR */ + for (int i = 0; i < PCI_BARMAX; ++i) { + uint32_t bar = pci_get_cfgdata32(pi, PCIR_BAR(i)); + switch (pi->pi_bar[i].type) { + case PCIBAR_IO: + bar &= PCIM_BAR_IO_BASE; + bar |= sc->psc_bar[i].lobits; + break; + case PCIBAR_MEM32: + case PCIBAR_MEM64: + bar &= PCIM_BAR_MEM_BASE; + bar |= sc->psc_bar[i].lobits; + break; + } + pci_set_cfgdata32(pi, PCIR_BAR(i), bar); + } + + sc = pi->pi_arg; + + uint16_t dev_vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 2); + uint16_t dev_id = read_config(&sc->psc_sel, PCIR_DEVICE, 2); + + if (dev_vendor != 0x8086) { + error = -ENODEV; + warnx("Unknown vendor (%x) of igd", dev_vendor); + goto done; + } + + /* + * GVT-d: Create LPC-Device at 0:1f.0 + * + * Otherwise GOP-Driver wouldn't work for Windows + */ + printf("Add igd-lpc at slot 0:1f.0 to enable GVT-d for igd\n"); + if ((error = pci_parse_slot("0:31:0,igd-lpc")) != 0) { + warnx("Failed to add igd-lpc"); + goto done; + } + + /* + * GVT-d: Get IGD funcs + */ + struct igd_funcs *igd; + + switch (dev_id & 0xFFF0) { + case IGD_DEVID_WESTMERE: + igd = &igd_gen5_75; + break; + case IGD_DEVID_SANDYBRIDGE_0: + case IGD_DEVID_SANDYBRIDGE_1: + case IGD_DEVID_SANDYBRIDGE_2: + igd = &igd_gen6; + break; + case IGD_DEVID_IVYBRIDGE_0: + case IGD_DEVID_IVYBRIDGE_1: + igd = &igd_gen7; + break; + default: + switch (dev_id & 0xFF00) { + case IGD_DEVID_HASWELL: + igd = &igd_gen7_5; + break; + case IGD_DEVID_BROADWELL: + igd = &igd_gen8; + break; + case IGD_DEVID_SKYLAKE: + igd = &igd_gen9; + break; + case IGD_DEVID_KABYLAKE: + case IGD_DEVID_COFFEELAKE: + igd = &igd_gen9_5; + break; + default: + warnx("Unsupported igd-device (%x): Try using gen9 graphics code path.", dev_id); + igd = &igd_gen9; + break; + } + break; + } + + /* + * GVT-d: Get hpa and size of Opregion and GSM + */ + opregion_hpa = igd->get_opregion_hpa(ctx, sc); + gsm_hpa = igd->get_gsm_hpa(ctx, sc); + opregion_size = igd->get_opregion_size(ctx, sc); + gsm_size = igd->get_gsm_size(ctx, sc); + + /* + * GVT-d: Allocate Opregion and GSM in guest space + */ + if ((opregion_gpa = pci_emul_alloc_mmio(PCIBAR_MEM32, opregion_size, ~PCIM_ASLS_OPREGION_MASK)) == 0) { + error = -ENOMEM; + goto done; + } + if ((gsm_gpa = pci_emul_alloc_mmio(PCIBAR_MEM32, gsm_size, ~PCIM_BDSM_GSM_MASK)) == 0) { + error = -ENOMEM; + goto done; + } + + /* + * GVT-d: Write address of Opregion and GSM into PCI register + */ + igd->set_opregion_gpa(ctx, sc, opregion_gpa); + igd->set_gsm_gpa(ctx, sc, gsm_gpa); + + /* + * GVT-d: Map Opregion and GSM into guest space + */ + if ((error = vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, opregion_gpa, opregion_size, opregion_hpa)) != 0) + goto done; + if ((error = vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gsm_gpa, gsm_size, gsm_hpa)) != 0) + goto done; + + +done: + if (error) { + vm_unmap_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, opregion_gpa, opregion_size); + vm_unmap_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gsm_gpa, gsm_size); + int bus, slot, func; + if (sc) { + bus = sc->psc_sel.pc_bus; + slot = sc->psc_sel.pc_dev; + func = sc->psc_sel.pc_func; + free(sc); + } + if (bus != 0 || slot != 0 || func != 0) + vm_unassign_pptdev(ctx, bus, slot, func); + } + return error; +} + +static int +gvt_d_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int coff, int bytes, uint32_t val) +{ + /* + * Prevent write to BDSM and ASLS_CTL + * + * BDSM: contains Base of Data Stolen Memory + * ASLS_CTL: contains address of Opregion + */ + if (coff == PCIR_BDSM || coff == PCIR_ASLS_CTL) { + return (0); + } else if (bar_access(coff)) { + struct passthru_softc *sc; + int idx, update_idx; + + sc = pi->pi_arg; + idx = (coff - PCIR_BAR(0)) / 4; + + switch (pi->pi_bar[idx].type) + { + case PCIBAR_NONE: + pi->pi_bar[idx].addr = 0; + break; + case PCIBAR_IO: + case PCIBAR_MEM32: + case PCIBAR_MEM64: + case PCIBAR_MEMHI64: + if (pi->pi_bar[idx].type == PCIBAR_MEMHI64) + update_idx = idx - 1; + else + update_idx = idx; + + uint16_t cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); + if ((cmd & PCIM_CMD_MEMEN && pi->pi_bar[idx].type != PCIBAR_IO) || + (cmd & PCIM_CMD_PORTEN && pi->pi_bar[idx].type == PCIBAR_IO)) { + unregister_bar_gvt_d(pi, update_idx); + } + + if (val != ~0U) { + uint64_t mask, bar; + mask = ~(pi->pi_bar[update_idx].size - 1); + if (pi->pi_bar[idx].type == PCIBAR_MEMHI64) + mask >>= 32; + bar = val & mask; + if (pi->pi_bar[idx].type != PCIBAR_MEMHI64) + bar |= sc->psc_bar[update_idx].lobits; + pci_set_cfgdata32(pi, coff, bar); + + uint32_t lo, hi; + lo = pci_get_cfgdata32(pi, PCIR_BAR(update_idx)) & ~0x0F; + if (pi->pi_bar[update_idx].type == PCIBAR_MEM64) + hi = pci_get_cfgdata32(pi, PCIR_BAR(update_idx + 1)); + else + hi = 0; + if (lo != ~0U && hi != ~0U) { + pi->pi_bar[update_idx].addr = (uint64_t)lo | ((uint64_t)hi << 32U); + if ((cmd & PCIM_CMD_MEMEN && pi->pi_bar[idx].type != PCIBAR_IO) || + (cmd & PCIM_CMD_PORTEN && pi->pi_bar[idx].type == PCIBAR_IO)) { + register_bar_gvt_d(pi, update_idx); + } + } + else + pi->pi_bar[update_idx].addr = 0; + } + else { + pci_set_cfgdata32(pi, coff, ~0U); + pi->pi_bar[update_idx].addr = 0; + } + break; + default: + break; + } + return (0); + } else { + return passthru_cfgwrite(ctx, vcpu, pi, coff, bytes, val); + } +} + +static int +gvt_d_cfgread(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int coff, int bytes, uint32_t *rv) +{ + /* + * Emulate BDSM and ASLS_CTL + * + * BDSM: contains Base of Data Stolen Memory + * ASLS_CTL: contains address of Opregion + */ + if ((coff >= PCIR_BDSM && coff < PCIR_BDSM + 4) || (coff >= PCIR_ASLS_CTL && coff < PCIR_ASLS_CTL + 4)) { + return (-1); + } else if (bar_access(coff)) { + struct passthru_softc *sc; + int idx, update_idx; + + sc = pi->pi_arg; + idx = (coff - PCIR_BAR(0)) / 4; + + if (pi->pi_bar[idx].type == PCIBAR_MEMHI64) + update_idx = idx - 1; + else + update_idx = idx; + + if (pci_get_cfgdata32(pi, 0x10 + idx * 0x04) == ~0U) { + uint64_t size = ~(uint64_t)(pi->pi_bar[update_idx].size - 1); + size |= sc->psc_bar[update_idx].lobits; + if (pi->pi_bar[idx].type == PCIBAR_MEMHI64) + *rv = size >> 32; + else + *rv = size; + if (bytes == 1) + *rv = *rv >> (coff & 0x3); + else if (bytes == 2) + *rv = *rv >> (coff & 0x1); + else + *rv = *rv; + } + else { + if (bytes == 1) + *rv = pci_get_cfgdata8(pi, coff); + else if (bytes == 2) + *rv = pci_get_cfgdata16(pi, coff); + else + *rv = pci_get_cfgdata32(pi, coff); + } + return (0); + } else { + return passthru_cfgread(ctx, vcpu, pi, coff, bytes, rv); + } +} + +static void +gvt_d_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, uint64_t offset, int size, uint64_t value) +{ + passthru_write(ctx, vcpu, pi, baridx, offset, size, value); +} + +static uint64_t +gvt_d_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, uint64_t offset, int size) +{ + return passthru_read(ctx, vcpu, pi, baridx, offset, size); +} + +struct pci_devemu gvt_d = { + .pe_emu = "gvt-d", + .pe_init = gvt_d_init, + .pe_cfgwrite = gvt_d_cfgwrite, + .pe_cfgread = gvt_d_cfgread, + .pe_barwrite = gvt_d_write, + .pe_barread = gvt_d_read +}; +PCI_EMUL_SET(gvt_d); Index: usr.sbin/bhyve/pci_igd_lpc.c =================================================================== --- /dev/null +++ usr.sbin/bhyve/pci_igd_lpc.c @@ -0,0 +1,110 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "pci_emul.h" + +#ifndef _PATH_DEVPCI +#define _PATH_DEVPCI "/dev/pci" +#endif + +int pcifd = -1; + +static uint32_t +read_config(long reg, int width) +{ + struct pci_io pi; + + bzero(&pi, sizeof(pi)); + // igd-lpc is always connected to 0:1f.0 + pi.pi_sel.pc_domain = 0; + pi.pi_sel.pc_bus = 0; + pi.pi_sel.pc_dev = 0x1f; + pi.pi_sel.pc_func = 0; + pi.pi_reg = reg; + pi.pi_width = width; + + if (ioctl(pcifd, PCIOCREAD, &pi) < 0) + return (0); + else + return (pi.pi_data); +} + +static int +pci_igd_lpc_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) +{ + // only allow igd-lpc on 0:1f.0 + if (pi->pi_bus != 0 || pi->pi_slot != 0x1f || pi->pi_func != 0x00) { + warn("igd-lpc only allowed on 0:1f.0"); + return (-1); + } + + // open host device + if (pcifd < 0) { + pcifd = open(_PATH_DEVPCI, O_RDWR, 0); + if (pcifd < 0) { + warn("failed to open %s", _PATH_DEVPCI); + return (-1); + } + } + + /* + * The VID, DID, REVID, SUBVID and SUBDID of igd-lpc need aligned with physical one. + * Without these physical values, GVT-d GOP driver couldn't work. + */ + pci_set_cfgdata16(pi, PCIR_DEVICE, read_config(PCIR_DEVICE, 2)); + pci_set_cfgdata16(pi, PCIR_VENDOR, read_config(PCIR_VENDOR, 2)); + pci_set_cfgdata8(pi, PCIR_REVID, read_config(PCIR_REVID, 1)); + pci_set_cfgdata16(pi, PCIR_SUBVEND_0, read_config(PCIR_SUBVEND_0, 2)); + pci_set_cfgdata16(pi, PCIR_SUBDEV_0, read_config(PCIR_SUBDEV_0, 2)); + + return (0); +} + +struct pci_devemu pci_de_igd_lpc = { + .pe_emu = "igd-lpc", + .pe_init = pci_igd_lpc_init +}; +PCI_EMUL_SET(pci_de_igd_lpc); Index: usr.sbin/bhyve/pci_passthru.h =================================================================== --- /dev/null +++ usr.sbin/bhyve/pci_passthru.h @@ -0,0 +1,63 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __PCI_PASSTHRU_H__ +#define __PCI_PASSTHRU_H__ + +#include + +#include + +#include "pci_emul.h" + +struct passthru_softc { + struct pci_devinst *psc_pi; + struct pcibar psc_bar[PCI_BARMAX + 1]; + struct { + int capoff; + int msgctrl; + int emulated; + } psc_msi; + struct { + int capoff; + } psc_msix; + struct pcisel psc_sel; +}; + +uint32_t read_config(const struct pcisel *sel, long reg, int width); +void write_config(const struct pcisel *sel, long reg, int width, uint32_t data); +int bar_access(int coff); +int passthru_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts); +int passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int coff, int bytes, uint32_t val); +int passthru_cfgread(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int coff, int bytes, uint32_t *rv); +void passthru_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, uint64_t offset, int size, uint64_t value); +uint64_t passthru_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, uint64_t offset, int size); + +#endif Index: usr.sbin/bhyve/pci_passthru.c =================================================================== --- usr.sbin/bhyve/pci_passthru.c +++ usr.sbin/bhyve/pci_passthru.c @@ -58,9 +58,8 @@ #include #include -#include -#include "pci_emul.h" #include "mem.h" +#include "pci_passthru.h" #ifndef _PATH_DEVPCI #define _PATH_DEVPCI "/dev/pci" @@ -83,20 +82,6 @@ static int iofd = -1; static int memfd = -1; -struct passthru_softc { - struct pci_devinst *psc_pi; - struct pcibar psc_bar[PCI_BARMAX + 1]; - struct { - int capoff; - int msgctrl; - int emulated; - } psc_msi; - struct { - int capoff; - } psc_msix; - struct pcisel psc_sel; -}; - static int msi_caplen(int msgctrl) { @@ -119,7 +104,7 @@ return (len); } -static uint32_t +uint32_t read_config(const struct pcisel *sel, long reg, int width) { struct pci_io pi; @@ -135,7 +120,7 @@ return (pi.pi_data); } -static void +void write_config(const struct pcisel *sel, long reg, int width, uint32_t data) { struct pci_io pi; @@ -581,18 +566,33 @@ sc->psc_bar[i].type = bartype; sc->psc_bar[i].size = size; sc->psc_bar[i].addr = base; + sc->psc_bar[i].lobits = 0; /* Allocate the BAR in the guest I/O or MMIO space */ error = pci_emul_alloc_pbar(pi, i, base, bartype, size); if (error) return (-1); + /* Save prefetchable property of physical bar */ + uint8_t lobits = pci_get_cfgdata8(pi, PCIR_BAR(i)); + if (bartype == PCIBAR_MEM32 || bartype == PCIBAR_MEM64) { + if (bar.pbi_base & PCIM_BAR_MEM_PREFETCH) + lobits |= PCIM_BAR_MEM_PREFETCH; + else + lobits &= ~PCIM_BAR_MEM_PREFETCH; + sc->psc_bar[i].lobits = lobits & ~PCIM_BAR_MEM_BASE; + } + else { + lobits |= PCIM_BAR_IO_SPACE; + sc->psc_bar[i].lobits = lobits & ~PCIM_BAR_IO_BASE; + } + /* The MSI-X table needs special handling */ if (i == pci_msix_table_bar(pi)) { error = init_msix_table(ctx, sc, base); if (error) return (-1); - } else if (bartype != PCIBAR_IO) { + } else if (bartype != PCIBAR_IO && !is_gvt_d(pi)) { /* Map the physical BAR in the guest MMIO space */ error = vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, @@ -647,7 +647,7 @@ return (error); } -static int +int passthru_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { int bus, slot, func, error, memflags; @@ -743,7 +743,7 @@ return (error); } -static int +int bar_access(int coff) { if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)) @@ -778,7 +778,7 @@ coff < sc->psc_msix.capoff + MSIX_CAPLEN); } -static int +int passthru_cfgread(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int coff, int bytes, uint32_t *rv) { @@ -822,7 +822,7 @@ return (0); } -static int +int passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int coff, int bytes, uint32_t val) { @@ -898,7 +898,7 @@ return (0); } -static void +void passthru_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, uint64_t offset, int size, uint64_t value) { @@ -921,7 +921,7 @@ } } -static uint64_t +uint64_t passthru_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, uint64_t offset, int size) {