Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F135370637
D26209.id77351.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
39 KB
Referenced Files
None
Subscribers
None
D26209.id77351.diff
View Options
Index: lib/libvmmapi/vmmapi.h
===================================================================
--- lib/libvmmapi/vmmapi.h
+++ lib/libvmmapi/vmmapi.h
@@ -176,6 +176,8 @@
int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func);
int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
+int vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
+ vm_paddr_t gpa, size_t len);
int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot,
int func, uint64_t addr, uint64_t msg, int numvec);
int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot,
Index: lib/libvmmapi/vmmapi.c
===================================================================
--- lib/libvmmapi/vmmapi.c
+++ lib/libvmmapi/vmmapi.c
@@ -980,6 +980,26 @@
return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
}
+int
+vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
+ vm_paddr_t gpa, size_t len)
+{
+ struct vm_pptdev_mmio pptmmio;
+
+ bzero(&pptmmio, sizeof(pptmmio));
+ pptmmio.bus = bus;
+ pptmmio.slot = slot;
+ pptmmio.func = func;
+ pptmmio.gpa = gpa;
+ pptmmio.len = len;
+ pptmmio.hpa = 0;
+
+ if (gpa == 0)
+ return (0);
+
+ return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
+}
+
int
vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
uint64_t addr, uint64_t msg, int numvec)
@@ -1640,7 +1660,7 @@
VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
- VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
+ VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
VM_PPTDEV_MSIX, VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
Index: sys/amd64/include/vmm_dev.h
===================================================================
--- sys/amd64/include/vmm_dev.h
+++ sys/amd64/include/vmm_dev.h
@@ -299,6 +299,7 @@
IOCNUM_BIND_PPTDEV = 40,
IOCNUM_UNBIND_PPTDEV = 41,
IOCNUM_MAP_PPTDEV_MMIO = 42,
+ IOCNUM_UNMAP_PPTDEV_MMIO = 45,
IOCNUM_PPTDEV_MSI = 43,
IOCNUM_PPTDEV_MSIX = 44,
@@ -409,6 +410,8 @@
_IOW('v', IOCNUM_UNBIND_PPTDEV, struct vm_pptdev)
#define VM_MAP_PPTDEV_MMIO \
_IOW('v', IOCNUM_MAP_PPTDEV_MMIO, struct vm_pptdev_mmio)
+#define VM_UNMAP_PPTDEV_MMIO \
+ _IOW('v', IOCNUM_UNMAP_PPTDEV_MMIO, struct vm_pptdev_mmio)
#define VM_PPTDEV_MSI \
_IOW('v', IOCNUM_PPTDEV_MSI, struct vm_pptdev_msi)
#define VM_PPTDEV_MSIX \
Index: sys/amd64/vmm/io/ppt.h
===================================================================
--- sys/amd64/vmm/io/ppt.h
+++ sys/amd64/vmm/io/ppt.h
@@ -34,6 +34,8 @@
int ppt_unassign_all(struct vm *vm);
int ppt_map_mmio(struct vm *vm, int bus, int slot, int func,
vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
+int ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func,
+ vm_paddr_t gpa, size_t len);
int ppt_setup_msi(struct vm *vm, int vcpu, int bus, int slot, int func,
uint64_t addr, uint64_t msg, int numvec);
int ppt_setup_msix(struct vm *vm, int vcpu, int bus, int slot, int func,
Index: sys/amd64/vmm/io/ppt.c
===================================================================
--- sys/amd64/vmm/io/ppt.c
+++ sys/amd64/vmm/io/ppt.c
@@ -218,7 +218,7 @@
}
static void
-ppt_unmap_mmio(struct vm *vm, struct pptdev *ppt)
+ppt_unmap_mmio_all(struct vm *vm, struct pptdev *ppt)
{
int i;
struct pptseg *seg;
@@ -414,7 +414,7 @@
pci_save_state(ppt->dev);
ppt_pci_reset(ppt->dev);
pci_restore_state(ppt->dev);
- ppt_unmap_mmio(vm, ppt);
+ ppt_unmap_mmio_all(vm, ppt);
ppt_teardown_msi(ppt);
ppt_teardown_msix(ppt);
iommu_remove_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev));
@@ -473,6 +473,35 @@
return (ENOENT);
}
+int
+ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func,
+ vm_paddr_t gpa, size_t len)
+{
+ int i, error;
+ struct pptseg *seg;
+ struct pptdev *ppt;
+
+ ppt = ppt_find(bus, slot, func);
+ if (ppt != NULL) {
+ if (ppt->vm != vm)
+ return (EBUSY);
+
+ for (i = 0; i < MAX_MMIOSEGS; i++) {
+ seg = &ppt->mmio[i];
+ if (seg->gpa == gpa && seg->len == len) {
+ error = vm_unmap_mmio(vm, gpa, len);
+ if (error == 0) {
+ seg->gpa = 0;
+ seg->len = 0;
+ }
+ return (error);
+ }
+ }
+ return (ENOSPC);
+ }
+ return (ENOENT);
+}
+
static int
pptintr(void *arg)
{
Index: sys/amd64/vmm/vmm_dev.c
===================================================================
--- sys/amd64/vmm/vmm_dev.c
+++ sys/amd64/vmm/vmm_dev.c
@@ -435,6 +435,7 @@
break;
case VM_MAP_PPTDEV_MMIO:
+ case VM_UNMAP_PPTDEV_MMIO:
case VM_BIND_PPTDEV:
case VM_UNBIND_PPTDEV:
#ifdef COMPAT_FREEBSD12
@@ -520,6 +521,11 @@
pptmmio->func, pptmmio->gpa, pptmmio->len,
pptmmio->hpa);
break;
+ case VM_UNMAP_PPTDEV_MMIO:
+ pptmmio = (struct vm_pptdev_mmio *)data;
+ error = ppt_unmap_mmio(sc->vm, pptmmio->bus, pptmmio->slot,
+ pptmmio->func, pptmmio->gpa, pptmmio->len);
+ break;
case VM_BIND_PPTDEV:
pptdev = (struct vm_pptdev *)data;
error = vm_assign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
Index: sys/dev/pci/pcireg.h
===================================================================
--- sys/dev/pci/pcireg.h
+++ sys/dev/pci/pcireg.h
@@ -1098,3 +1098,28 @@
#define PCIM_OSC_CTL_PCIE_PME 0x04 /* PCIe Native Power Mgt Events */
#define PCIM_OSC_CTL_PCIE_AER 0x08 /* PCIe Advanced Error Reporting */
#define PCIM_OSC_CTL_PCIE_CAP_STRUCT 0x10 /* Various Capability Structures */
+
+/*
+ * GVT-d definitions
+ */
+#define IGD_DEVID_COFFEELAKE 0x3E00 /* 0x3Exx */
+#define IGD_DEVID_KABYLAKE 0x5900 /* 0x59xx */
+#define IGD_DEVID_SKYLAKE 0x1900 /* 0x19xx */
+#define IGD_DEVID_BROADWELL 0x1600 /* 0x16xx */
+#define IGD_DEVID_HASWELL 0x0400 /* 0x04xx */
+#define IGD_DEVID_IVYBRIDGE_1 0x0160 /* 0x016x */
+#define IGD_DEVID_IVYBRIDGE_0 0x0150 /* 0x015x */
+#define IGD_DEVID_SANDYBRIDGE_2 0x0120 /* 0x012x */
+#define IGD_DEVID_SANDYBRIDGE_1 0x0110 /* 0x011x */
+#define IGD_DEVID_SANDYBRIDGE_0 0x0100 /* 0x010x */
+#define IGD_DEVID_WESTMERE 0x0040 /* 0x004x */
+#define PCIR_GGC 0x50 /* GMCH Graphics Control register */
+#define PCIR_BDSM 0x5C /* Base Data of Stolen Memory register */
+#define PCIR_ASLS_CTL 0xFC /* Opregion start address register */
+#define PCIM_GEN5_75_GGC_GMS_MASK 0x000000F0 /* Bits 7:4 contain Graphics Mode Select */
+#define PCIM_GEN6_GGC_GMS_MASK 0x000000F8 /* Bits 7:3 contain Graphics Mode Select */
+#define PCIM_GEN8_GGC_GMS_MASK 0x0000FF00 /* Bits 15:8 contain Graphics Mode Select */
+#define PCIM_BDSM_GSM_MASK 0xFFF00000 /* Bits 31:20 contain base address of gsm */
+#define PCIM_ASLS_OPREGION_MASK 0xFFFFF000 /* Opregion is 4k aligned */
+#define GPU_GSM_SIZE 0x04000000 /* Size of Graphics Stolen Memory (fallback if detection fails) */
+#define GPU_OPREGION_SIZE 0x00004000 /* Size of Opregion */
Index: usr.sbin/bhyve/Makefile
===================================================================
--- usr.sbin/bhyve/Makefile
+++ usr.sbin/bhyve/Makefile
@@ -43,6 +43,7 @@
pci_hda.c \
pci_fbuf.c \
pci_hostbridge.c \
+ pci_igd_lpc.c \
pci_irq.c \
pci_lpc.c \
pci_nvme.c \
Index: usr.sbin/bhyve/pci_emul.h
===================================================================
--- usr.sbin/bhyve/pci_emul.h
+++ usr.sbin/bhyve/pci_emul.h
@@ -92,6 +92,7 @@
enum pcibar_type type; /* io or memory */
uint64_t size;
uint64_t addr;
+ uint8_t lobits;
};
#define PI_NAMESZ 40
@@ -223,6 +224,9 @@
enum pcibar_type type, uint64_t size);
int pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx,
uint64_t hostbase, enum pcibar_type type, uint64_t size);
+uint64_t pci_emul_alloc_mmio(enum pcibar_type type, uint64_t size, uint64_t mask);
+void unregister_bar_passthru(struct pci_devinst *pi, int idx);
+void register_bar_passthru(struct pci_devinst *pi, int idx);
int pci_emul_add_msicap(struct pci_devinst *pi, int msgnum);
int pci_emul_add_pciecap(struct pci_devinst *pi, int pcie_device_type);
void pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes,
Index: usr.sbin/bhyve/pci_emul.c
===================================================================
--- usr.sbin/bhyve/pci_emul.c
+++ usr.sbin/bhyve/pci_emul.c
@@ -459,6 +459,15 @@
return (pci_emul_alloc_pbar(pdi, idx, 0, type, size));
}
+static bool
+is_passthru(struct pci_devinst *pi)
+{
+ if (strcmp(pi->pi_d->pe_emu, "passthru") == 0)
+ return true;
+ else
+ return false;
+}
+
/*
* Register (or unregister) the MMIO or I/O region associated with the BAR
* register 'idx' of an emulated pci device.
@@ -509,15 +518,19 @@
static void
unregister_bar(struct pci_devinst *pi, int idx)
{
-
- modify_bar_registration(pi, idx, 0);
+ if (!is_passthru(pi))
+ modify_bar_registration(pi, idx, 0);
+ else
+ unregister_bar_passthru(pi, idx);
}
static void
register_bar(struct pci_devinst *pi, int idx)
{
-
- modify_bar_registration(pi, idx, 1);
+ if (!is_passthru(pi))
+ modify_bar_registration(pi, idx, 1);
+ else
+ register_bar_passthru(pi, idx);
}
/* Are we decoding i/o port accesses for the emulated pci device? */
@@ -677,13 +690,57 @@
}
cmd = pci_get_cfgdata16(pdi, PCIR_COMMAND);
- if ((cmd & enbit) != enbit)
- pci_set_cfgdata16(pdi, PCIR_COMMAND, cmd | enbit);
- register_bar(pdi, idx);
+ if (is_passthru(pdi)) {
+ if ((cmd & enbit) == enbit && idx != pci_msix_table_bar(pdi))
+ register_bar(pdi, idx);
+ }
+ else {
+ if ((cmd & enbit) != enbit)
+ pci_set_cfgdata16(pdi, PCIR_COMMAND, cmd | enbit);
+ register_bar(pdi, idx);
+ }
return (0);
}
+// mask should be a power of 2 minus 1 (e.g. 0x000FFFFF)
+uint64_t
+pci_emul_alloc_mmio(enum pcibar_type type, uint64_t size, uint64_t mask)
+{
+ int error;
+
+ error = 1;
+
+ uint64_t *baseptr, limit, base;
+
+ switch (type) {
+ case PCIBAR_IO:
+ baseptr = &pci_emul_iobase;
+ limit = PCI_EMUL_IOLIMIT;
+ break;
+ case PCIBAR_MEM32:
+ baseptr = &pci_emul_membase32;
+ limit = PCI_EMUL_MEMLIMIT32;
+ break;
+ case PCIBAR_MEM64:
+ baseptr = &pci_emul_membase64;
+ limit = PCI_EMUL_MEMLIMIT64;
+ break;
+ default:
+ return 0;
+ }
+
+ // align base
+ base = (*baseptr + mask) & ~mask;
+
+ if (base + size > limit)
+ return 0;
+
+ *baseptr = base + size;
+
+ return base;
+}
+
#define CAP_START_OFFSET 0x40
static int
pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen)
@@ -1799,9 +1856,9 @@
/*
* Ignore all writes beyond the standard config space and return all
- * ones on reads.
+ * ones on reads for non passthru devices.
*/
- if (coff >= PCI_REGMAX + 1) {
+ if (coff >= PCI_REGMAX + 1 && !is_passthru(pi)) {
if (in) {
*eax = 0xffffffff;
/*
@@ -1830,8 +1887,14 @@
needcfg = 1;
}
- if (needcfg)
- *eax = CFGREAD(pi, coff, bytes);
+ if (needcfg) {
+ if (coff <= PCI_REGMAX)
+ *eax = CFGREAD(pi, coff, bytes);
+ else if (coff <= PCI_REGMAX + 4)
+ *eax = 0x00000000;
+ else
+ *eax = 0xFFFFFFFF;
+ }
pci_emul_hdrtype_fixup(bus, slot, coff, bytes, eax);
} else {
@@ -1903,7 +1966,7 @@
pci_emul_capwrite(pi, coff, bytes, *eax, 0, 0);
} else if (coff >= PCIR_COMMAND && coff < PCIR_REVID) {
pci_emul_cmdsts_write(pi, coff, *eax, bytes);
- } else {
+ } else if (coff <= PCI_REGMAX) {
CFGWRITE(pi, coff, *eax, bytes);
}
}
Index: usr.sbin/bhyve/pci_igd_lpc.c
===================================================================
--- /dev/null
+++ usr.sbin/bhyve/pci_igd_lpc.c
@@ -0,0 +1,110 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/pciio.h>
+#include <machine/vmm.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <vmmapi.h>
+
+#include "pci_emul.h"
+
+#ifndef _PATH_DEVPCI
+#define _PATH_DEVPCI "/dev/pci"
+#endif
+
+int pcifd = -1;
+
+static uint32_t
+read_config(long reg, int width)
+{
+ struct pci_io pi;
+
+ bzero(&pi, sizeof(pi));
+ // igd-lpc is always connected to 0:1f.0
+ pi.pi_sel.pc_domain = 0;
+ pi.pi_sel.pc_bus = 0;
+ pi.pi_sel.pc_dev = 0x1f;
+ pi.pi_sel.pc_func = 0;
+ pi.pi_reg = reg;
+ pi.pi_width = width;
+
+ if (ioctl(pcifd, PCIOCREAD, &pi) < 0)
+ return (0);
+ else
+ return (pi.pi_data);
+}
+
+static int
+pci_igd_lpc_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
+{
+ // only allow igd-lpc on 0:1f.0
+ if (pi->pi_bus != 0 || pi->pi_slot != 0x1f || pi->pi_func != 0x00) {
+ warn("igd-lpc only allowed on 0:1f.0");
+ return (-1);
+ }
+
+ // open host device
+ if (pcifd < 0) {
+ pcifd = open(_PATH_DEVPCI, O_RDWR, 0);
+ if (pcifd < 0) {
+ warn("failed to open %s", _PATH_DEVPCI);
+ return (-1);
+ }
+ }
+
+ /*
+ * The VID, DID, REVID, SUBVID and SUBDID of igd-lpc need aligned with physical one.
+ * Without these physical values, GVT-d GOP driver couldn't work.
+ */
+ pci_set_cfgdata16(pi, PCIR_DEVICE, read_config(PCIR_DEVICE, 2));
+ pci_set_cfgdata16(pi, PCIR_VENDOR, read_config(PCIR_VENDOR, 2));
+ pci_set_cfgdata8(pi, PCIR_REVID, read_config(PCIR_REVID, 1));
+ pci_set_cfgdata16(pi, PCIR_SUBVEND_0, read_config(PCIR_SUBVEND_0, 2));
+ pci_set_cfgdata16(pi, PCIR_SUBDEV_0, read_config(PCIR_SUBDEV_0, 2));
+
+ return (0);
+}
+
+struct pci_devemu pci_de_igd_lpc = {
+ .pe_emu = "igd-lpc",
+ .pe_init = pci_igd_lpc_init
+};
+PCI_EMUL_SET(pci_de_igd_lpc);
Index: usr.sbin/bhyve/pci_passthru.c
===================================================================
--- usr.sbin/bhyve/pci_passthru.c
+++ usr.sbin/bhyve/pci_passthru.c
@@ -60,6 +60,7 @@
#include <machine/vmm.h>
#include <vmmapi.h>
#include "pci_emul.h"
+#include "inout.h"
#include "mem.h"
#ifndef _PATH_DEVPCI
@@ -79,10 +80,19 @@
#define MSIX_TABLE_COUNT(ctrl) (((ctrl) & PCIM_MSIXCTRL_TABLE_SIZE) + 1)
#define MSIX_CAPLEN 12
+#define KB (1024UL)
+#define MB (1024 * 1024UL)
+#define GB (1024 * 1024 * 1024UL)
+
static int pcifd = -1;
static int iofd = -1;
static int memfd = -1;
+enum passthru_type {
+ PASSTHRU_DEFAULT = 0,
+ PASSTHRU_IGD
+};
+
struct passthru_softc {
struct pci_devinst *psc_pi;
struct pcibar psc_bar[PCI_BARMAX + 1];
@@ -95,6 +105,7 @@
int capoff;
} psc_msix;
struct pcisel psc_sel;
+ enum passthru_type psc_type;
};
static int
@@ -149,6 +160,180 @@
(void)ioctl(pcifd, PCIOCWRITE, &pi); /* XXX */
}
+static int
+pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
+ uint32_t *eax, void *arg)
+{
+ struct pci_devinst *pdi = arg;
+ struct pci_devemu *pe = pdi->pi_d;
+ uint64_t offset;
+ int i;
+
+ for (i = 0; i <= PCI_BARMAX; i++) {
+ if (pdi->pi_bar[i].type == PCIBAR_IO &&
+ port >= pdi->pi_bar[i].addr &&
+ port + bytes <= pdi->pi_bar[i].addr + pdi->pi_bar[i].size) {
+ offset = port - pdi->pi_bar[i].addr;
+ if (in)
+ *eax = (*pe->pe_barread)(ctx, vcpu, pdi, i,
+ offset, bytes);
+ else
+ (*pe->pe_barwrite)(ctx, vcpu, pdi, i, offset,
+ bytes, *eax);
+ return (0);
+ }
+ }
+ return (-1);
+}
+
+void unregister_bar_passthru(struct pci_devinst *pi, int idx)
+{
+ int error;
+ struct passthru_softc *sc;
+ struct inout_port iop;
+
+ if (pi->pi_bar[idx].addr == 0)
+ return;
+
+ sc = pi->pi_arg;
+
+ switch (pi->pi_bar[idx].type) {
+ case PCIBAR_NONE:
+ case PCIBAR_MEMHI64:
+ break;
+ case PCIBAR_IO:
+ /*
+ * ToDo: Passthrough IO
+ *
+ * Use IO-Bitmap to emulate access to IO ports
+ * Prevent VM_EXIT on access to specified IO ports
+ */
+ bzero(&iop, sizeof(struct inout_port));
+ iop.name = pi->pi_name;
+ iop.port = pi->pi_bar[idx].addr;
+ iop.size = pi->pi_bar[idx].size;
+ error = unregister_inout(&iop);
+ break;
+ case PCIBAR_MEM32:
+ case PCIBAR_MEM64:
+ if (idx != pci_msix_table_bar(pi)) {
+ error = vm_unmap_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, pi->pi_bar[idx].addr, pi->pi_bar[idx].size);
+ }
+ // special handling for msix table
+ else {
+ uint32_t table_offset, table_size;
+ uint32_t gpa, len;
+
+ table_offset = rounddown2(pi->pi_msix.table_offset, 4096);
+ table_size = pi->pi_msix.table_offset - table_offset;
+ table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE;
+ table_size = roundup2(table_size, 4096);
+
+ gpa = pi->pi_bar[idx].addr;
+ len = table_offset;
+
+ // unmap everything bevor MSI-X table
+ if (len > 0) {
+ if ((error = vm_unmap_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gpa, len)) != 0)
+ goto done;
+ }
+
+ gpa += table_offset + table_size;
+ len = pi->pi_bar[idx].size - (table_offset + table_size);
+
+ // unmap everything behind MSI-X table
+ if (len > 0) {
+ if ((error = vm_unmap_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gpa, len)) != 0)
+ goto done;
+ }
+ }
+ break;
+ }
+
+done:
+ if (error != 0)
+ err(1, __func__);
+}
+
+void register_bar_passthru(struct pci_devinst *pi, int idx)
+{
+ int error;
+ struct passthru_softc *sc;
+ struct inout_port iop;
+
+ sc = pi->pi_arg;
+
+ switch (pi->pi_bar[idx].type) {
+ case PCIBAR_NONE:
+ case PCIBAR_MEMHI64:
+ break;
+ case PCIBAR_IO:
+ /*
+ * ToDo: Passthrough IO
+ *
+ * Use IO-Bitmap to emulate access to IO ports
+ * Prevent VM_EXIT on access to specified IO ports
+ */
+ bzero(&iop, sizeof(struct inout_port));
+ iop.name = pi->pi_name;
+ iop.port = pi->pi_bar[idx].addr;
+ iop.size = pi->pi_bar[idx].size;
+ iop.flags = IOPORT_F_INOUT;
+ iop.handler = pci_emul_io_handler;
+ iop.arg = pi;
+ error = register_inout(&iop);
+ break;
+ case PCIBAR_MEM32:
+ case PCIBAR_MEM64:
+ if (idx != pci_msix_table_bar(pi)) {
+ error = vm_map_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, pi->pi_bar[idx].addr, pi->pi_bar[idx].size, sc->psc_bar[idx].addr);
+ /*
+ * If the guest writes a new value to a 64-bit BAR, two writes are neccessary.
+ * vm_map_pptdev_mmio can fail in that case due to an invalid address after the first write.
+ */
+ if (error != 0) {
+ pi->pi_bar[idx].addr = 0;
+ error = 0;
+ }
+ }
+ // special handling for msix table
+ else {
+ uint32_t table_offset, table_size;
+ uint32_t gpa, len, hpa;
+
+ table_offset = rounddown2(pi->pi_msix.table_offset, 4096);
+ table_size = pi->pi_msix.table_offset - table_offset;
+ table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE;
+ table_size = roundup2(table_size, 4096);
+
+ hpa = sc->psc_bar[idx].addr;
+ gpa = pi->pi_bar[idx].addr;
+ len = table_offset;
+
+ // map everything bevor MSI-X table
+ if (len > 0) {
+ if ((error = vm_map_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gpa, len, hpa)) != 0)
+ goto done;
+ }
+
+ hpa += table_offset + table_size;
+ gpa += table_offset + table_size;
+ len = pi->pi_bar[idx].size - (table_offset + table_size);
+
+ // map everything behind MSI-X table
+ if (len > 0) {
+ if ((error = vm_map_pptdev_mmio(pi->pi_vmctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gpa, len, hpa)) != 0)
+ goto done;
+ }
+ }
+ break;
+ }
+
+done:
+ if (error != 0)
+ err(1, __func__);
+}
+
#ifdef LEGACY_SUPPORT
static int
passthru_add_msicap(struct pci_devinst *pi, int msgnum, int nextptr)
@@ -581,24 +766,34 @@
sc->psc_bar[i].type = bartype;
sc->psc_bar[i].size = size;
sc->psc_bar[i].addr = base;
+ sc->psc_bar[i].lobits = 0;
/* Allocate the BAR in the guest I/O or MMIO space */
error = pci_emul_alloc_pbar(pi, i, base, bartype, size);
if (error)
return (-1);
+ /*
+ * For passthru devices use same prefetchable property as physical bar
+ */
+ if (bartype == PCIBAR_MEM32 || bartype == PCIBAR_MEM64)
+ {
+ uint8_t lobits = pci_get_cfgdata8(pi, 0x10 + i * 0x04);
+ if (bar.pbi_base & PCIM_BAR_MEM_PREFETCH)
+ lobits |= PCIM_BAR_MEM_PREFETCH;
+ else
+ lobits &= ~PCIM_BAR_MEM_PREFETCH;
+ sc->psc_bar[i].lobits = lobits & 0xF;
+ pci_set_cfgdata8(pi, 0x10 + i * 0x04, lobits);
+ }
+ else
+ sc->psc_bar[i].lobits = PCIM_BAR_IO_SPACE;
+
/* The MSI-X table needs special handling */
if (i == pci_msix_table_bar(pi)) {
error = init_msix_table(ctx, sc, base);
if (error)
return (-1);
- } else if (bartype != PCIBAR_IO) {
- /* Map the physical BAR in the guest MMIO space */
- error = vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus,
- sc->psc_sel.pc_dev, sc->psc_sel.pc_func,
- pi->pi_bar[i].addr, pi->pi_bar[i].size, base);
- if (error)
- return (-1);
}
/*
@@ -633,23 +828,36 @@
goto done;
}
+ /*
+ * Set command register before init of BARs
+ *
+ * cfginitbar checks command register to decide whether to register a new BAR or not
+ */
+ pci_set_cfgdata16(pi, PCIR_COMMAND, read_config(&sc->psc_sel,
+ PCIR_COMMAND, 2));
+
if (cfginitbar(ctx, sc) != 0) {
warnx("failed to initialize BARs for PCI %d/%d/%d",
bus, slot, func);
goto done;
}
- pci_set_cfgdata16(pi, PCIR_COMMAND, read_config(&sc->psc_sel,
- PCIR_COMMAND, 2));
-
error = 0; /* success */
done:
return (error);
}
+
+/*
+ * GVT-d: Declare modified funcs for passthrough of igd-device
+ */
+static int
+passthru_init_igd(struct vmctx *ctx, struct passthru_softc *sc);
+
static int
passthru_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
{
+ char *opt;
int bus, slot, func, error, memflags;
struct passthru_softc *sc;
#ifndef WITHOUT_CAPSICUM
@@ -721,6 +929,12 @@
warnx("invalid passthru options");
return (error);
}
+
+ if ((opt = strchr(opts, ',')) != NULL)
+ {
+ *opt = '\0';
+ opt = opt + 1;
+ }
if (vm_assign_pptdev(ctx, bus, slot, func) != 0) {
warnx("PCI device at %d/%d/%d is not using the ppt(4) driver",
@@ -734,7 +948,18 @@
sc->psc_pi = pi;
/* initialize config space */
- error = cfginit(ctx, pi, bus, slot, func);
+ if ((error = cfginit(ctx, pi, bus, slot, func)) != 0)
+ goto done;
+
+ // init igd (integrated graphics device)
+ if (opt != NULL && strcmp(opt, "igd") == 0) {
+ if ((error = passthru_init_igd(ctx, sc)) != 0) {
+ warnx("Failed to init igd");
+ goto done;
+ }
+ }
+
+ error = 0; /* success */
done:
if (error) {
free(sc);
@@ -743,6 +968,349 @@
return (error);
}
+/*
+ * GVT-d: Handler for passthru of igd
+ */
+struct igd_funcs {
+ uint64_t (*get_opregion_hpa)(struct vmctx *ctx, struct passthru_softc *sc);
+ uint64_t (*get_gsm_hpa)(struct vmctx *ctx, struct passthru_softc *sc);
+ uint64_t (*get_opregion_size)(struct vmctx *ctx, struct passthru_softc *sc);
+ uint64_t (*get_gsm_size)(struct vmctx *ctx, struct passthru_softc *sc);
+ void (*set_opregion_gpa)(struct vmctx *ctx, struct passthru_softc *sc, uint64_t gpa);
+ void (*set_gsm_gpa)(struct vmctx *ctx, struct passthru_softc *sc, uint64_t gpa);
+};
+
+/*
+ * GVT-d: Handler for igd of gen5.75 (Westmere)
+ */
+
+static uint64_t
+igd_gen5_75_get_opregion_hpa(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ return read_config(&sc->psc_sel, PCIR_ASLS_CTL, 4) & PCIM_ASLS_OPREGION_MASK;
+}
+
+static uint64_t
+igd_gen5_75_get_gsm_hpa(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ return read_config(&sc->psc_sel, PCIR_BDSM, 4) & PCIM_BDSM_GSM_MASK;
+}
+
+static uint64_t
+igd_gen5_75_get_opregion_size(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ return GPU_OPREGION_SIZE;
+}
+
+static uint64_t
+igd_gen5_75_get_gsm_size(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ uint64_t gsm_size;
+
+ uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2);
+ uint8_t gms_val = (ggc_val & PCIM_GEN5_75_GGC_GMS_MASK) >> 4; /* Bits 7:4 contain Graphics Mode Select */
+ switch (gms_val) {
+ case 0x05:
+ gsm_size = 32*MB;
+ break;
+ case 0x06:
+ gsm_size = 48*MB;
+ break;
+ case 0x07:
+ gsm_size = 64*MB;
+ break;
+ case 0x08:
+ gsm_size = 128*MB;
+ break;
+ case 0x09:
+ gsm_size = 256*MB;
+ break;
+ case 0x0A:
+ gsm_size = 96*MB;
+ break;
+ case 0x0B:
+ gsm_size = 160*MB;
+ break;
+ case 0x0C:
+ gsm_size = 224*MB;
+ break;
+ case 0x0D:
+ gsm_size = 352*MB;
+ break;
+ default:
+ gsm_size = GPU_GSM_SIZE;
+ warnx("Unknown Graphic Mode (%x): Fallback to %lu MB of Graphics Stolen Memory.", gms_val, gsm_size / MB);
+ break;
+ }
+
+ return gsm_size;
+}
+
+static void
+igd_gen5_75_set_opregion_gpa(struct vmctx *ctx, struct passthru_softc *sc, uint64_t gpa)
+{
+ uint32_t asls_val = read_config(&sc->psc_sel, PCIR_ASLS_CTL, 4);
+ pci_set_cfgdata32(sc->psc_pi, PCIR_ASLS_CTL, gpa | (asls_val & ~PCIM_ASLS_OPREGION_MASK));
+}
+
+static void
+igd_gen5_75_set_gsm_gpa(struct vmctx *ctx, struct passthru_softc *sc, uint64_t gpa)
+{
+ uint32_t bdsm_val = read_config(&sc->psc_sel, PCIR_BDSM, 4);
+ pci_set_cfgdata32(sc->psc_pi, PCIR_BDSM, gpa | (bdsm_val & ~PCIM_BDSM_GSM_MASK));
+}
+
+/*
+ * GVT-d: Handler for igd of gen6 (Sandy Bridge)
+ */
+static uint64_t
+igd_gen6_get_gsm_size(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ uint64_t gsm_size;
+
+ uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2);
+ uint8_t gms_val = (ggc_val & PCIM_GEN6_GGC_GMS_MASK) >> 3; /* Bits 7:3 contain Graphics Mode Select */
+ if (gms_val <= 0x10)
+ gsm_size = gms_val * 32*MB;
+ else {
+ gsm_size = GPU_GSM_SIZE;
+ warnx("Unknown Graphic Mode (%x): Fallback to %lu MB of Graphics Stolen Memory.", gms_val, gsm_size / MB);
+ }
+
+ return gsm_size;
+}
+
+/*
+ * GVT-d: Handler for igd of gen8 (Broadwell)
+ */
+static uint64_t
+igd_gen8_get_gsm_size(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ uint64_t gsm_size;
+
+ uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2);
+ uint8_t gms_val = (ggc_val & PCIM_GEN8_GGC_GMS_MASK) >> 8; /* Bits 15:8 contain Graphics Mode Select */
+ if (gms_val <= 0x10)
+ gsm_size = gms_val * 32*MB;
+ else if (gms_val == 0x20)
+ gsm_size = 1024*MB;
+ else if (gms_val == 0x30)
+ gsm_size = 1536*MB;
+ else if (gms_val == 0x3F)
+ gsm_size = 2016*MB;
+ else {
+ gsm_size = GPU_GSM_SIZE;
+ warnx("Unknown Graphic Mode (%x): Fallback to %lu MB of Graphics Stolen Memory.", gms_val, gsm_size / MB);
+ }
+
+ return gsm_size;
+}
+
+/*
+ * GVT-d: Handler for igd of gen9 (Skylake)
+ */
+static uint64_t
+igd_gen9_get_gsm_size(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ uint64_t gsm_size;
+
+ uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2);
+ uint8_t gms_val = (ggc_val & PCIM_GEN8_GGC_GMS_MASK) >> 8; /* Bits 15:8 contain Graphics Mode Select */
+ if (gms_val <= 0x10)
+ gsm_size = gms_val * 32*MB;
+ else if (gms_val == 0x20)
+ gsm_size = 1024*MB;
+ else if (gms_val == 0x30)
+ gsm_size = 1536*MB;
+ else if (gms_val == 0x40)
+ gsm_size = 2048*MB;
+ else if (gms_val >= 0xF0 && gms_val <= 0xFE)
+ gsm_size = gms_val * 4*MB;
+ else {
+ gsm_size = GPU_GSM_SIZE;
+ warnx("Unknown Graphic Mode (%x): Fallback to %lu MB of Graphics Stolen Memory.", gms_val, gsm_size / MB);
+ }
+
+ return gsm_size;
+}
+
+// Westmere
+struct igd_funcs igd_gen5_75 = {
+ .get_opregion_hpa = igd_gen5_75_get_opregion_hpa,
+ .get_gsm_hpa = igd_gen5_75_get_gsm_hpa,
+ .get_opregion_size = igd_gen5_75_get_opregion_size,
+ .get_gsm_size = igd_gen5_75_get_gsm_size,
+ .set_opregion_gpa = igd_gen5_75_set_opregion_gpa,
+ .set_gsm_gpa = igd_gen5_75_set_gsm_gpa
+};
+// Sandy Bridge
+struct igd_funcs igd_gen6 = {
+ .get_opregion_hpa = igd_gen5_75_get_opregion_hpa,
+ .get_gsm_hpa = igd_gen5_75_get_gsm_hpa,
+ .get_opregion_size = igd_gen5_75_get_opregion_size,
+ .get_gsm_size = igd_gen6_get_gsm_size,
+ .set_opregion_gpa = igd_gen5_75_set_opregion_gpa,
+ .set_gsm_gpa = igd_gen5_75_set_gsm_gpa
+};
+// Ivy Bridge
+struct igd_funcs igd_gen7 = {
+ .get_opregion_hpa = igd_gen5_75_get_opregion_hpa,
+ .get_gsm_hpa = igd_gen5_75_get_gsm_hpa,
+ .get_opregion_size = igd_gen5_75_get_opregion_size,
+ .get_gsm_size = igd_gen6_get_gsm_size,
+ .set_opregion_gpa = igd_gen5_75_set_opregion_gpa,
+ .set_gsm_gpa = igd_gen5_75_set_gsm_gpa
+};
+// Haswell
+struct igd_funcs igd_gen7_5 = {
+ .get_opregion_hpa = igd_gen5_75_get_opregion_hpa,
+ .get_gsm_hpa = igd_gen5_75_get_gsm_hpa,
+ .get_opregion_size = igd_gen5_75_get_opregion_size,
+ .get_gsm_size = igd_gen6_get_gsm_size,
+ .set_opregion_gpa = igd_gen5_75_set_opregion_gpa,
+ .set_gsm_gpa = igd_gen5_75_set_gsm_gpa
+};
+// Broadwell
+struct igd_funcs igd_gen8 = {
+ .get_opregion_hpa = igd_gen5_75_get_opregion_hpa,
+ .get_gsm_hpa = igd_gen5_75_get_gsm_hpa,
+ .get_opregion_size = igd_gen5_75_get_opregion_size,
+ .get_gsm_size = igd_gen8_get_gsm_size,
+ .set_opregion_gpa = igd_gen5_75_set_opregion_gpa,
+ .set_gsm_gpa = igd_gen5_75_set_gsm_gpa
+};
+// Skylake
+struct igd_funcs igd_gen9 = {
+ .get_opregion_hpa = igd_gen5_75_get_opregion_hpa,
+ .get_gsm_hpa = igd_gen5_75_get_gsm_hpa,
+ .get_opregion_size = igd_gen5_75_get_opregion_size,
+ .get_gsm_size = igd_gen9_get_gsm_size,
+ .set_opregion_gpa = igd_gen5_75_set_opregion_gpa,
+ .set_gsm_gpa = igd_gen5_75_set_gsm_gpa
+};
+// Kabylake & Coffeelake
+struct igd_funcs igd_gen9_5 = {
+ .get_opregion_hpa = igd_gen5_75_get_opregion_hpa,
+ .get_gsm_hpa = igd_gen5_75_get_gsm_hpa,
+ .get_opregion_size = igd_gen5_75_get_opregion_size,
+ .get_gsm_size = igd_gen9_get_gsm_size,
+ .set_opregion_gpa = igd_gen5_75_set_opregion_gpa,
+ .set_gsm_gpa = igd_gen5_75_set_gsm_gpa
+};
+
+static int
+passthru_init_igd(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ int error;
+ uint32_t opregion_hpa, opregion_gpa, opregion_size, gsm_hpa, gsm_gpa, gsm_size;
+
+ error = 1;
+
+ sc->psc_type = PASSTHRU_IGD;
+
+ uint16_t dev_vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 2);
+ uint16_t dev_id = read_config(&sc->psc_sel, PCIR_DEVICE, 2);
+
+ if (dev_vendor != 0x8086) {
+ warnx("Unknown vendor (%x) of igd", dev_vendor);
+ goto done;
+ }
+
+ /*
+ * GVT-d: Create LPC-Device at 0:1f.0
+ *
+ * Otherwise GOP-Driver wouldn't work for Windows
+ */
+ printf("Add igd-lpc at slot 0:1f.0 to enable GVT-d for igd\n");
+ if ((error = pci_parse_slot("0:31:0,igd-lpc")) != 0) {
+ warnx("Failed to add igd-lpc");
+ goto done;
+ }
+
+ /*
+ * GVT-d: Get IGD funcs
+ */
+ struct igd_funcs *igd;
+
+ switch (dev_id & 0xFFF0) {
+ case IGD_DEVID_WESTMERE:
+ igd = &igd_gen5_75;
+ break;
+ case IGD_DEVID_SANDYBRIDGE_0:
+ case IGD_DEVID_SANDYBRIDGE_1:
+ case IGD_DEVID_SANDYBRIDGE_2:
+ igd = &igd_gen6;
+ break;
+ case IGD_DEVID_IVYBRIDGE_0:
+ case IGD_DEVID_IVYBRIDGE_1:
+ igd = &igd_gen7;
+ break;
+ default:
+ switch (dev_id & 0xFF00) {
+ case IGD_DEVID_HASWELL:
+ igd = &igd_gen7_5;
+ break;
+ case IGD_DEVID_BROADWELL:
+ igd = &igd_gen8;
+ break;
+ case IGD_DEVID_SKYLAKE:
+ igd = &igd_gen9;
+ break;
+ case IGD_DEVID_KABYLAKE:
+ case IGD_DEVID_COFFEELAKE:
+ igd = &igd_gen9_5;
+ break;
+ default:
+ warnx("Unsupported igd-device (%x): Try using gen9 graphics code path.", dev_id);
+ igd = &igd_gen9;
+ break;
+ }
+ break;
+ }
+
+ /*
+ * GVT-d: Get hpa and size of Opregion and GSM
+ */
+ opregion_hpa = igd->get_opregion_hpa(ctx, sc);
+ gsm_hpa = igd->get_gsm_hpa(ctx, sc);
+ opregion_size = igd->get_opregion_size(ctx, sc);
+ gsm_size = igd->get_gsm_size(ctx, sc);
+
+ /*
+ * GVT-d: Allocate Opregion and GSM in guest space
+ */
+ if ((opregion_gpa = pci_emul_alloc_mmio(PCIBAR_MEM32, opregion_size, ~PCIM_ASLS_OPREGION_MASK)) == 0) {
+ error = -ENOMEM;
+ goto done;
+ }
+ if ((gsm_gpa = pci_emul_alloc_mmio(PCIBAR_MEM32, gsm_size, ~PCIM_BDSM_GSM_MASK)) == 0) {
+ error = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * GVT-d: Write address of Opregion and GSM into PCI register and protect their PCI register
+ */
+ igd->set_opregion_gpa(ctx, sc, opregion_gpa);
+ igd->set_gsm_gpa(ctx, sc, gsm_gpa);
+
+ /*
+ * GVT-d: Map Opregion and GSM into guest space
+ */
+ if ((error = vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, opregion_gpa, opregion_size, opregion_hpa)) != 0)
+ goto done;
+ if ((error = vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gsm_gpa, gsm_size, gsm_hpa)) != 0)
+ goto done;
+
+
+done:
+ if (error) {
+ vm_unmap_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, opregion_gpa, opregion_size);
+ vm_unmap_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, gsm_gpa, gsm_size);
+ }
+ return (error);
+}
+
static int
bar_access(int coff)
{
@@ -779,7 +1347,7 @@
}
static int
-passthru_cfgread(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
+passthru_cfgread_default(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
int coff, int bytes, uint32_t *rv)
{
struct passthru_softc *sc;
@@ -787,11 +1355,47 @@
sc = pi->pi_arg;
/*
- * PCI BARs and MSI capability is emulated.
+ * MSI capability is emulated.
*/
- if (bar_access(coff) || msicap_access(sc, coff))
+ if (msicap_access(sc, coff))
return (-1);
+ if (bar_access(coff))
+ {
+ int idx, update_idx;
+
+ idx = (coff - PCIR_BAR(0)) / 4;
+
+ if (pi->pi_bar[idx].type == PCIBAR_MEMHI64)
+ update_idx = idx - 1;
+ else
+ update_idx = idx;
+
+ if (pci_get_cfgdata32(pi, 0x10 + idx * 0x04) == ~0U) {
+ uint64_t size = ~(uint64_t)(pi->pi_bar[update_idx].size - 1);
+ size |= sc->psc_bar[update_idx].lobits;
+ if (pi->pi_bar[idx].type == PCIBAR_MEMHI64)
+ *rv = size >> 32;
+ else
+ *rv = size;
+ if (bytes == 1)
+ *rv = *rv >> (coff & 0x3);
+ else if (bytes == 2)
+ *rv = *rv >> (coff & 0x1);
+ else
+ *rv = *rv;
+ }
+ else {
+ if (bytes == 1)
+ *rv = pci_get_cfgdata8(pi, coff);
+ else if (bytes == 2)
+ *rv = pci_get_cfgdata16(pi, coff);
+ else
+ *rv = pci_get_cfgdata32(pi, coff);
+ }
+ return (0);
+ }
+
#ifdef LEGACY_SUPPORT
/*
* Emulate PCIR_CAP_PTR if this device does not support MSI capability
@@ -803,19 +1407,6 @@
}
#endif
- /*
- * Emulate the command register. If a single read reads both the
- * command and status registers, read the status register from the
- * device's config space.
- */
- if (coff == PCIR_COMMAND) {
- if (bytes <= 2)
- return (-1);
- *rv = read_config(&sc->psc_sel, PCIR_STATUS, 2) << 16 |
- pci_get_cfgdata16(pi, PCIR_COMMAND);
- return (0);
- }
-
/* Everything else just read from the device's config space */
*rv = read_config(&sc->psc_sel, coff, bytes);
@@ -823,7 +1414,35 @@
}
static int
-passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
+passthru_cfgread_igd(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
+ int coff, int bytes, uint32_t *rv)
+{
+ /*
+ * GVT-d: Emulate BDSM and ASLS_CTL
+ *
+ * BDSM: contains Base of Data Stolen Memory
+ * ASLS_CTL: contains address of Opregion
+ */
+ if ((coff >= PCIR_BDSM && coff < PCIR_BDSM + 4) || (coff >= PCIR_ASLS_CTL && coff < PCIR_ASLS_CTL + 4))
+ return (-1);
+ else
+ return passthru_cfgread_default(ctx, vcpu, pi, coff, bytes, rv);
+}
+
+static int
+passthru_cfgread(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
+ int coff, int bytes, uint32_t *rv)
+{
+ switch (((struct passthru_softc *)pi->pi_arg)->psc_type) {
+ case PASSTHRU_IGD:
+ return passthru_cfgread_igd(ctx, vcpu, pi, coff, bytes, rv);
+ default:
+ return passthru_cfgread_default(ctx, vcpu, pi, coff, bytes, rv);
+ }
+}
+
+static int
+passthru_cfgwrite_default(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
int coff, int bytes, uint32_t val)
{
int error, msix_table_entries, i;
@@ -832,11 +1451,66 @@
sc = pi->pi_arg;
- /*
- * PCI BARs are emulated
- */
if (bar_access(coff))
- return (-1);
+ {
+ int idx, update_idx;
+ idx = (coff - PCIR_BAR(0)) / 4;
+ switch (pi->pi_bar[idx].type)
+ {
+ case PCIBAR_NONE:
+ pi->pi_bar[idx].addr = 0;
+ break;
+ case PCIBAR_IO:
+ case PCIBAR_MEM32:
+ case PCIBAR_MEM64:
+ case PCIBAR_MEMHI64:
+ if (pi->pi_bar[idx].type == PCIBAR_MEMHI64)
+ update_idx = idx - 1;
+ else
+ update_idx = idx;
+
+ uint16_t cmd = read_config(&sc->psc_sel, PCIR_COMMAND, 2);
+ if ((cmd & PCIM_CMD_MEMEN && pi->pi_bar[idx].type != PCIBAR_IO) ||
+ (cmd & PCIM_CMD_PORTEN && pi->pi_bar[idx].type == PCIBAR_IO)) {
+ unregister_bar_passthru(pi, update_idx);
+ }
+
+ if (val != ~0U) {
+ uint64_t mask, bar;
+ mask = ~(pi->pi_bar[update_idx].size - 1);
+ if (pi->pi_bar[idx].type == PCIBAR_MEMHI64)
+ mask >>= 32;
+ bar = val & mask;
+ if (pi->pi_bar[idx].type != PCIBAR_MEMHI64)
+ bar |= sc->psc_bar[update_idx].lobits;
+ pci_set_cfgdata32(pi, coff, bar);
+
+ uint32_t lo, hi;
+ lo = pci_get_cfgdata32(pi, 0x10 + update_idx * 0x04) & ~0x0F;
+ if (pi->pi_bar[update_idx].type == PCIBAR_MEM64)
+ hi = pci_get_cfgdata32(pi, 0x10 + (update_idx + 1) * 0x04);
+ else
+ hi = 0;
+ if (lo != ~0U && hi != ~0U) {
+ pi->pi_bar[update_idx].addr = (uint64_t)lo | ((uint64_t)hi << 32U);
+ if ((cmd & PCIM_CMD_MEMEN && pi->pi_bar[idx].type != PCIBAR_IO) ||
+ (cmd & PCIM_CMD_PORTEN && pi->pi_bar[idx].type == PCIBAR_IO)) {
+ register_bar_passthru(pi, update_idx);
+ }
+ }
+ else
+ pi->pi_bar[update_idx].addr = 0;
+ }
+ else {
+ pci_set_cfgdata32(pi, coff, ~0U);
+ pi->pi_bar[update_idx].addr = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return (0);
+ }
/*
* MSI capability is emulated
@@ -892,12 +1566,42 @@
pci_set_cfgdata8(pi, PCIR_COMMAND, val);
else if (bytes == 2)
pci_set_cfgdata16(pi, PCIR_COMMAND, val);
+ else
+ pci_set_cfgdata32(pi, PCIR_COMMAND, val);
pci_emul_cmd_changed(pi, cmd_old);
}
return (0);
}
+static int
+passthru_cfgwrite_igd(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
+ int coff, int bytes, uint32_t val)
+{
+ /*
+ * GVT-d: Prevent write to BDSM and ASLS_CTL
+ *
+ * BDSM: contains Base of Data Stolen Memory
+ * ASLS_CTL: contains address of Opregion
+ */
+ if (coff == PCIR_BDSM || coff == PCIR_ASLS_CTL)
+ return (0);
+ else
+ return passthru_cfgwrite_default(ctx, vcpu, pi, coff, bytes, val);
+}
+
+static int
+passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
+ int coff, int bytes, uint32_t val)
+{
+ switch (((struct passthru_softc *)pi->pi_arg)->psc_type) {
+ case PASSTHRU_IGD:
+ return passthru_cfgwrite_igd(ctx, vcpu, pi, coff, bytes, val);
+ default:
+ return passthru_cfgwrite_default(ctx, vcpu, pi, coff, bytes, val);
+ }
+}
+
static void
passthru_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size, uint64_t value)
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Mon, Nov 10, 5:05 AM (12 h, 44 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
25097388
Default Alt Text
D26209.id77351.diff (39 KB)
Attached To
Mode
D26209: GVT-d support for bhyve
Attached
Detach File
Event Timeline
Log In to Comment