Page MenuHomeFreeBSD

D26209.id82836.diff
No OneTemporary

D26209.id82836.diff

diff --git a/lib/libvmmapi/vmmapi.h b/lib/libvmmapi/vmmapi.h
--- a/lib/libvmmapi/vmmapi.h
+++ b/lib/libvmmapi/vmmapi.h
@@ -111,6 +111,8 @@
int vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid,
vm_ooffset_t segoff, size_t len, int prot);
+int vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len);
+
int vm_create(const char *name);
int vm_get_device_fd(struct vmctx *ctx);
struct vmctx *vm_open(const char *name);
@@ -176,6 +178,8 @@
int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func);
int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
+int vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
+ vm_paddr_t gpa, size_t len);
int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot,
int func, uint64_t addr, uint64_t msg, int numvec);
int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot,
diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c
--- a/lib/libvmmapi/vmmapi.c
+++ b/lib/libvmmapi/vmmapi.c
@@ -251,6 +251,19 @@
return (0);
}
+int
+vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
+{
+ struct vm_munmap munmap;
+ int error;
+
+ munmap.gpa = gpa;
+ munmap.len = len;
+
+ error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap);
+ return (error);
+}
+
int
vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
@@ -980,6 +993,22 @@
return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
}
+int
+vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
+ vm_paddr_t gpa, size_t len)
+{
+ struct vm_pptdev_mmio pptmmio;
+
+ bzero(&pptmmio, sizeof(pptmmio));
+ pptmmio.bus = bus;
+ pptmmio.slot = slot;
+ pptmmio.func = func;
+ pptmmio.gpa = gpa;
+ pptmmio.len = len;
+
+ return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
+}
+
int
vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
uint64_t addr, uint64_t msg, int numvec)
@@ -1644,7 +1673,7 @@
/* keep in sync with machine/vmm_dev.h */
static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
- VM_MMAP_GETNEXT, VM_SET_REGISTER, VM_GET_REGISTER,
+ VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER,
VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
VM_SET_REGISTER_SET, VM_GET_REGISTER_SET,
VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV,
@@ -1654,7 +1683,7 @@
VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
- VM_PPTDEV_MSIX, VM_PPTDEV_DISABLE_MSIX,
+ VM_PPTDEV_MSIX, VM_PPTDEV_DISABLE_MSIX, VM_UNMAP_PPTDEV_MMIO,
VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -231,6 +231,7 @@
*/
int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
size_t len, int prot, int flags);
+int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
void vm_free_memseg(struct vm *vm, int ident);
int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
diff --git a/sys/amd64/include/vmm_dev.h b/sys/amd64/include/vmm_dev.h
--- a/sys/amd64/include/vmm_dev.h
+++ b/sys/amd64/include/vmm_dev.h
@@ -49,6 +49,11 @@
#define VM_MEMMAP_F_WIRED 0x01
#define VM_MEMMAP_F_IOMMU 0x02
+struct vm_munmap {
+ vm_paddr_t gpa;
+ size_t len;
+};
+
#define VM_MEMSEG_NAME(m) ((m)->name[0] != '\0' ? (m)->name : NULL)
struct vm_memseg {
int segid;
@@ -270,6 +275,7 @@
IOCNUM_MMAP_MEMSEG = 16,
IOCNUM_MMAP_GETNEXT = 17,
IOCNUM_GLA2GPA_NOFAULT = 18,
+ IOCNUM_MUNMAP_MEMSEG = 19,
/* register/state accessors */
IOCNUM_SET_REGISTER = 20,
@@ -302,6 +308,7 @@
IOCNUM_PPTDEV_MSI = 43,
IOCNUM_PPTDEV_MSIX = 44,
IOCNUM_PPTDEV_DISABLE_MSIX = 45,
+ IOCNUM_UNMAP_PPTDEV_MMIO = 46,
/* statistics */
IOCNUM_VM_STATS = 50,
@@ -358,6 +365,8 @@
_IOW('v', IOCNUM_MMAP_MEMSEG, struct vm_memmap)
#define VM_MMAP_GETNEXT \
_IOWR('v', IOCNUM_MMAP_GETNEXT, struct vm_memmap)
+#define VM_MUNMAP_MEMSEG \
+ _IOW('v', IOCNUM_MUNMAP_MEMSEG, struct vm_munmap)
#define VM_SET_REGISTER \
_IOW('v', IOCNUM_SET_REGISTER, struct vm_register)
#define VM_GET_REGISTER \
@@ -416,6 +425,8 @@
_IOW('v', IOCNUM_PPTDEV_MSIX, struct vm_pptdev_msix)
#define VM_PPTDEV_DISABLE_MSIX \
_IOW('v', IOCNUM_PPTDEV_DISABLE_MSIX, struct vm_pptdev)
+#define VM_UNMAP_PPTDEV_MMIO \
+ _IOW('v', IOCNUM_UNMAP_PPTDEV_MMIO, struct vm_pptdev_mmio)
#define VM_INJECT_NMI \
_IOW('v', IOCNUM_INJECT_NMI, struct vm_nmi)
#define VM_STATS \
diff --git a/sys/amd64/vmm/io/ppt.h b/sys/amd64/vmm/io/ppt.h
--- a/sys/amd64/vmm/io/ppt.h
+++ b/sys/amd64/vmm/io/ppt.h
@@ -34,6 +34,8 @@
int ppt_unassign_all(struct vm *vm);
int ppt_map_mmio(struct vm *vm, int bus, int slot, int func,
vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
+int ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func,
+ vm_paddr_t gpa, size_t len);
int ppt_setup_msi(struct vm *vm, int vcpu, int bus, int slot, int func,
uint64_t addr, uint64_t msg, int numvec);
int ppt_setup_msix(struct vm *vm, int vcpu, int bus, int slot, int func,
diff --git a/sys/amd64/vmm/io/ppt.c b/sys/amd64/vmm/io/ppt.c
--- a/sys/amd64/vmm/io/ppt.c
+++ b/sys/amd64/vmm/io/ppt.c
@@ -224,7 +224,7 @@
}
static void
-ppt_unmap_mmio(struct vm *vm, struct pptdev *ppt)
+ppt_unmap_all_mmio(struct vm *vm, struct pptdev *ppt)
{
int i;
struct pptseg *seg;
@@ -412,7 +412,7 @@
pci_save_state(ppt->dev);
ppt_pci_reset(ppt->dev);
pci_restore_state(ppt->dev);
- ppt_unmap_mmio(vm, ppt);
+ ppt_unmap_all_mmio(vm, ppt);
ppt_teardown_msi(ppt);
ppt_teardown_msix(ppt);
iommu_remove_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev));
@@ -466,6 +466,35 @@
return (ENOSPC);
}
+int
+ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func,
+ vm_paddr_t gpa, size_t len)
+{
+ int i, error;
+ struct pptseg *seg;
+ struct pptdev *ppt;
+
+ ppt = ppt_find(bus, slot, func);
+ if (ppt != NULL) {
+ if (ppt->vm != vm)
+ return (EBUSY);
+
+ for (i = 0; i < MAX_MMIOSEGS; i++) {
+ seg = &ppt->mmio[i];
+ if (seg->gpa == gpa && seg->len == len) {
+ error = vm_unmap_mmio(vm, seg->gpa, seg->len);
+ if (error == 0) {
+ seg->gpa = 0;
+ seg->len = 0;
+ }
+ return (error);
+ }
+ }
+ return (ENOENT);
+ }
+ return (ENOENT);
+}
+
static int
pptintr(void *arg)
{
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -797,6 +797,24 @@
return (0);
}
+int
+vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len)
+{
+ struct mem_map *m;
+ int i;
+
+ for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+ m = &vm->mem_maps[i];
+ if (m->gpa == gpa && m->len == len &&
+ (m->flags & VM_MEMMAP_F_IOMMU) == 0) {
+ vm_free_memmap(vm, i);
+ return (0);
+ }
+ }
+
+ return (EINVAL);
+}
+
int
vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c
--- a/sys/amd64/vmm/vmm_dev.c
+++ b/sys/amd64/vmm/vmm_dev.c
@@ -381,6 +381,7 @@
struct vm_rtc_time *rtctime;
struct vm_rtc_data *rtcdata;
struct vm_memmap *mm;
+ struct vm_munmap *mu;
struct vm_cpu_topology *topology;
struct vm_readwrite_kernemu_device *kernemu;
uint64_t *regvals;
@@ -435,6 +436,7 @@
break;
case VM_MAP_PPTDEV_MMIO:
+ case VM_UNMAP_PPTDEV_MMIO:
case VM_BIND_PPTDEV:
case VM_UNBIND_PPTDEV:
#ifdef COMPAT_FREEBSD12
@@ -442,6 +444,7 @@
#endif
case VM_ALLOC_MEMSEG:
case VM_MMAP_MEMSEG:
+ case VM_MUNMAP_MEMSEG:
case VM_REINIT:
/*
* ioctls that operate on the entire virtual machine must
@@ -525,6 +528,11 @@
pptmmio->func, pptmmio->gpa, pptmmio->len,
pptmmio->hpa);
break;
+ case VM_UNMAP_PPTDEV_MMIO:
+ pptmmio = (struct vm_pptdev_mmio *)data;
+ error = ppt_unmap_mmio(sc->vm, pptmmio->bus, pptmmio->slot,
+ pptmmio->func, pptmmio->gpa, pptmmio->len);
+ break;
case VM_BIND_PPTDEV:
pptdev = (struct vm_pptdev *)data;
error = vm_assign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
@@ -643,6 +651,10 @@
error = vm_mmap_memseg(sc->vm, mm->gpa, mm->segid, mm->segoff,
mm->len, mm->prot, mm->flags);
break;
+ case VM_MUNMAP_MEMSEG:
+ mu = (struct vm_munmap *)data;
+ error = vm_munmap_memseg(sc->vm, mu->gpa, mu->len);
+ break;
#ifdef COMPAT_FREEBSD12
case VM_ALLOC_MEMSEG_FBSD12:
error = alloc_memseg(sc, (struct vm_memseg *)data,
diff --git a/sys/dev/pci/pcireg.h b/sys/dev/pci/pcireg.h
--- a/sys/dev/pci/pcireg.h
+++ b/sys/dev/pci/pcireg.h
@@ -1098,3 +1098,9 @@
#define PCIM_OSC_CTL_PCIE_PME 0x04 /* PCIe Native Power Mgt Events */
#define PCIM_OSC_CTL_PCIE_AER 0x08 /* PCIe Advanced Error Reporting */
#define PCIM_OSC_CTL_PCIE_CAP_STRUCT 0x10 /* Various Capability Structures */
+
+/*
+ * PCI Vendors
+ */
+#define PCI_VENDOR_INTEL 0x8086
+#define PCI_VENDOR_AMD 0x1002
diff --git a/usr.sbin/bhyve/Makefile b/usr.sbin/bhyve/Makefile
--- a/usr.sbin/bhyve/Makefile
+++ b/usr.sbin/bhyve/Makefile
@@ -41,6 +41,7 @@
pci_emul.c \
pci_hda.c \
pci_fbuf.c \
+ pci_gvt-d.c \
pci_hostbridge.c \
pci_irq.c \
pci_lpc.c \
diff --git a/usr.sbin/bhyve/pci_emul.h b/usr.sbin/bhyve/pci_emul.h
--- a/usr.sbin/bhyve/pci_emul.h
+++ b/usr.sbin/bhyve/pci_emul.h
@@ -40,6 +40,8 @@
#include <assert.h>
+#include <string.h>
+
#define PCI_BARMAX PCIR_MAX_BAR_0 /* BAR registers in a Type 0 header */
struct vmctx;
@@ -53,6 +55,9 @@
/* instance creation */
int (*pe_init)(struct vmctx *, struct pci_devinst *,
char *opts);
+ int (*pe_quirks_init)(struct vmctx *ctx, struct pci_devinst *pi,
+ char *opts);
+ void (*pe_quirks_deinit)(struct vmctx *ctx, struct pci_devinst *pi);
/* ACPI DSDT enumeration */
void (*pe_write_dsdt)(struct pci_devinst *);
@@ -73,6 +78,9 @@
struct pci_devinst *pi, int baridx,
uint64_t offset, int size);
+ int (*pe_baraddr)(struct vmctx *ctx, struct pci_devinst *pi, int baridx,
+ int enabled, uint64_t address);
+
/* Save/restore device state */
int (*pe_snapshot)(struct vm_snapshot_meta *meta);
int (*pe_pause)(struct vmctx *ctx, struct pci_devinst *pi);
@@ -92,6 +100,7 @@
enum pcibar_type type; /* io or memory */
uint64_t size;
uint64_t addr;
+ uint8_t lobits;
};
#define PI_NAMESZ 40
@@ -221,6 +230,7 @@
void pci_callback(void);
int pci_emul_alloc_bar(struct pci_devinst *pdi, int idx,
enum pcibar_type type, uint64_t size);
+uint64_t pci_emul_alloc_gsm(uint64_t size);
int pci_emul_add_msicap(struct pci_devinst *pi, int msgnum);
int pci_emul_add_pciecap(struct pci_devinst *pi, int pcie_device_type);
void pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes,
diff --git a/usr.sbin/bhyve/pci_emul.c b/usr.sbin/bhyve/pci_emul.c
--- a/usr.sbin/bhyve/pci_emul.c
+++ b/usr.sbin/bhyve/pci_emul.c
@@ -72,6 +72,8 @@
#define MAXSLOTS (PCI_SLOTMAX + 1)
#define MAXFUNCS (PCI_FUNCMAX + 1)
+#define GB (1024 * 1024 * 1024UL)
+
struct funcinfo {
char *fi_name;
char *fi_param;
@@ -101,10 +103,21 @@
SET_DECLARE(pci_devemu_set, struct pci_devemu);
static uint64_t pci_emul_iobase;
+static uint64_t pci_emul_iolim;
static uint64_t pci_emul_membase32;
+static uint64_t pci_emul_memlim32;
static uint64_t pci_emul_membase64;
static uint64_t pci_emul_memlim64;
+struct pcibarlist {
+ struct pci_devinst *pdi;
+ int idx;
+ enum pcibar_type type;
+ uint64_t size;
+ struct pcibarlist *next;
+};
+struct pcibarlist *pci_bars;
+
#define PCI_EMUL_IOBASE 0x2000
#define PCI_EMUL_IOLIMIT 0x10000
@@ -114,6 +127,8 @@
#define PCI_EMUL_MEMLIMIT32 PCI_EMUL_ECFG_BASE
+#define PCI_EMUL_MEMSIZE64 (32*GB)
+
static struct pci_devemu *pci_emul_finddev(char *name);
static void pci_lintr_route(struct pci_devinst *pi);
static void pci_lintr_update(struct pci_devinst *pi);
@@ -461,10 +476,16 @@
static void
modify_bar_registration(struct pci_devinst *pi, int idx, int registration)
{
+ struct pci_devemu *pe;
int error;
struct inout_port iop;
struct mem_range mr;
+ pe = pi->pi_d;
+ if (pe->pe_baraddr != NULL &&
+ (*pe->pe_baraddr)(pi->pi_vmctx, pi, idx, registration, pi->pi_bar[idx].addr) == 0)
+ return;
+
switch (pi->pi_bar[idx].type) {
case PCIBAR_IO:
bzero(&iop, sizeof(struct inout_port));
@@ -544,8 +565,9 @@
* the address range decoded by the BAR register.
*/
static void
-update_bar_address(struct pci_devinst *pi, uint64_t addr, int idx, int type)
+update_bar_address(struct pci_devinst *pi, int idx, uint32_t val)
{
+ int update_idx = idx;
int decode;
if (pi->pi_bar[idx].type == PCIBAR_IO)
@@ -553,38 +575,110 @@
else
decode = memen(pi);
- if (decode)
- unregister_bar(pi, idx);
-
- switch (type) {
+ switch (pi->pi_bar[idx].type) {
+ case PCIBAR_MEMHI64:
+ --update_idx;
case PCIBAR_IO:
case PCIBAR_MEM32:
- pi->pi_bar[idx].addr = addr;
- break;
case PCIBAR_MEM64:
- pi->pi_bar[idx].addr &= ~0xffffffffUL;
- pi->pi_bar[idx].addr |= addr;
+ {
+ struct pcibar *bar = &pi->pi_bar[update_idx];
+
+ if (decode && (bar->addr != 0))
+ unregister_bar(pi, update_idx);
+
+ if (val == ~0U) {
+ /* guest wants to read size of BAR */
+ pci_set_cfgdata32(pi, PCIR_BAR(idx), ~0U);
+ bar->addr = 0;
+ break;
+ }
+
+ /* guest sets address of BAR */
+ uint64_t mask;
+ uint32_t bar_val;
+ mask = ~(bar->size - 1UL);
+ if (pi->pi_bar[idx].type == PCIBAR_MEMHI64)
+ mask >>= 32UL;
+ bar_val = val & mask;
+ bar_val |= pi->pi_bar[idx].lobits;
+ pci_set_cfgdata32(pi, PCIR_BAR(idx), bar_val);
+
+ /* Only register BAR if it contains a valid address */
+ uint32_t lo, hi;
+ lo = pci_get_cfgdata32(pi, PCIR_BAR(update_idx));
+ hi = 0;
+ if (bar->type == PCIBAR_MEM64)
+ hi = pci_get_cfgdata32(pi, PCIR_BAR(update_idx + 1));
+ if (lo == ~0U || hi == ~0U) {
+ bar->addr = 0;
+ break;
+ }
+
+ if (bar->type == PCIBAR_IO)
+ lo &= PCIM_BAR_IO_BASE;
+ else
+ lo &= PCIM_BAR_MEM_BASE;
+ bar->addr = (uint64_t)lo | ((uint64_t)hi << 32UL);
+ if (decode)
+ register_bar(pi, update_idx);
+
break;
- case PCIBAR_MEMHI64:
- pi->pi_bar[idx].addr &= 0xffffffff;
- pi->pi_bar[idx].addr |= addr;
+ }
+ case PCIBAR_NONE:
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static uint32_t
+read_bar_value(struct pci_devinst *pi, int coff, int bytes)
+{
+ uint8_t idx;
+ idx = (coff - PCIR_BAR(0)) / 4;
+ assert(idx <= PCI_BARMAX);
+
+ uint8_t update_idx = idx;
+ uint64_t val;
+
+ if (pi->pi_bar[idx].type == PCIBAR_MEMHI64)
+ --update_idx;
+
+ val = pci_get_cfgdata32(pi, PCIR_BAR(idx));
+
+ /* return size of BAR */
+ if (val == ~0U) {
+ val = ~(pi->pi_bar[update_idx].size - 1);
+ val |= pi->pi_bar[update_idx].lobits;
+ if (pi->pi_bar[idx].type == PCIBAR_MEMHI64)
+ val >>= 32;
+ }
+
+ switch (bytes) {
+ case 1:
+ val = (val >> (8 * (coff & 0x03))) & 0xFF;
+ break;
+ case 2:
+ assert((coff & 0x01) == 0);
+ val = (val >> (8 * (coff & 0x02))) & 0xFFFF;
+ break;
+ case 4:
+ assert((coff & 0x03) == 0);
+ val = (uint32_t)val;
break;
default:
assert(0);
}
- if (decode)
- register_bar(pi, idx);
+ return val;
}
+/* add BAR to BAR-List */
int
pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type,
uint64_t size)
{
- int error;
- uint64_t *baseptr, limit, addr, mask, lobits, bar;
- uint16_t cmd, enbit;
-
assert(idx >= 0 && idx <= PCI_BARMAX);
if ((size & (size - 1)) != 0)
@@ -599,6 +693,45 @@
size = 16;
}
+ struct pcibarlist *newBar = malloc(sizeof(struct pcibarlist));
+ memset(newBar, 0, sizeof(struct pcibarlist));
+ newBar->pdi = pdi;
+ newBar->idx = idx;
+ newBar->type = type;
+ newBar->size = size;
+ if (pci_bars == NULL) {
+ /* first BAR */
+ pci_bars = newBar;
+ } else {
+ struct pcibarlist *bar = pci_bars;
+ struct pcibarlist *lastBar = NULL;
+ do {
+ if (bar->size < size)
+ break;
+ lastBar = bar;
+ bar = bar->next;
+ } while (bar != NULL);
+ newBar->next = bar;
+ if (lastBar != NULL)
+ lastBar->next = newBar;
+ else
+ pci_bars = newBar;
+ }
+ return (0);
+}
+
+static int
+pci_emul_assign_bar(struct pcibarlist *pci_bar)
+{
+ struct pci_devinst *pdi = pci_bar->pdi;
+ int idx = pci_bar->idx;
+ enum pcibar_type type = pci_bar->type;
+ uint64_t size = pci_bar->size;
+
+ int error;
+ uint64_t *baseptr, limit, addr, mask, lobits, bar;
+ uint16_t cmd, enbit;
+
switch (type) {
case PCIBAR_NONE:
baseptr = NULL;
@@ -606,36 +739,39 @@
break;
case PCIBAR_IO:
baseptr = &pci_emul_iobase;
- limit = PCI_EMUL_IOLIMIT;
+ limit = pci_emul_iolim;
mask = PCIM_BAR_IO_BASE;
lobits = PCIM_BAR_IO_SPACE;
enbit = PCIM_CMD_PORTEN;
break;
case PCIBAR_MEM64:
+ assert(idx + 1 <= PCI_BARMAX);
/*
* XXX
* Some drivers do not work well if the 64-bit BAR is allocated
* above 4GB. Allow for this by allocating small requests under
* 4GB unless then allocation size is larger than some arbitrary
- * number (128MB currently).
+ * number (256MB currently).
*/
- if (size > 128 * 1024 * 1024) {
+ if (size > 256 * 1024 * 1024) {
baseptr = &pci_emul_membase64;
limit = pci_emul_memlim64;
mask = PCIM_BAR_MEM_BASE;
lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 |
PCIM_BAR_MEM_PREFETCH;
- } else {
- baseptr = &pci_emul_membase32;
- limit = PCI_EMUL_MEMLIMIT32;
- mask = PCIM_BAR_MEM_BASE;
- lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64;
+ enbit = PCIM_CMD_MEMEN;
+ break;
}
- enbit = PCIM_CMD_MEMEN;
- break;
+ /*
+ * Use 32 bit BARs for small requests:
+ * Fallthrough into MEM32 case
+ */
+ type = PCIBAR_MEM32;
+ pdi->pi_bar[idx + 1].type = PCIBAR_NONE;
+ pdi->pi_bar[idx].lobits &= ~PCIM_BAR_MEM_64;
case PCIBAR_MEM32:
baseptr = &pci_emul_membase32;
- limit = PCI_EMUL_MEMLIMIT32;
+ limit = pci_emul_memlim32;
mask = PCIM_BAR_MEM_BASE;
lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32;
enbit = PCIM_CMD_MEMEN;
@@ -654,13 +790,19 @@
pdi->pi_bar[idx].type = type;
pdi->pi_bar[idx].addr = addr;
pdi->pi_bar[idx].size = size;
+ /* passthru devices are using same lobits as physical device
+ * they set this property
+ */
+ if (pdi->pi_bar[idx].lobits != 0)
+ lobits = pdi->pi_bar[idx].lobits;
+ else
+ pdi->pi_bar[idx].lobits = lobits;
/* Initialize the BAR register in config space */
bar = (addr & mask) | lobits;
pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar);
if (type == PCIBAR_MEM64) {
- assert(idx + 1 <= PCI_BARMAX);
pdi->pi_bar[idx + 1].type = PCIBAR_MEMHI64;
pci_set_cfgdata32(pdi, PCIR_BAR(idx + 1), bar >> 32);
}
@@ -673,6 +815,24 @@
return (0);
}
+uint64_t
+pci_emul_alloc_gsm(uint64_t size)
+{
+ uint64_t *baseptr = &pci_emul_membase32;
+ uint64_t *limptr = &pci_emul_memlim32;
+
+ /* align addr */
+ const uint64_t addr = ((*limptr) - size) & ~(size - 1);
+
+ /* if limit < base ==> ENOMEM */
+ if ((*limptr) < (*baseptr))
+ return 0;
+
+ *limptr = addr;
+
+ return addr;
+}
+
#define CAP_START_OFFSET 0x40
static int
pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen)
@@ -1081,6 +1241,8 @@
#define BUSIO_ROUNDUP 32
#define BUSMEM_ROUNDUP (1024 * 1024)
+#define ALIGN_VALUE(Value, Alignment) ((Value) + (((Alignment) - (Value)) & ((Alignment) - 1)))
+
int
init_pci(struct vmctx *ctx)
{
@@ -1090,25 +1252,17 @@
struct slotinfo *si;
struct funcinfo *fi;
size_t lowmem;
- uint64_t cpu_maxphysaddr, pci_emul_memresv64;
- u_int regs[4];
int bus, slot, func, error;
pci_emul_iobase = PCI_EMUL_IOBASE;
+ pci_emul_iolim = PCI_EMUL_IOLIMIT;
+
pci_emul_membase32 = vm_get_lowmem_limit(ctx);
+ pci_emul_memlim32 = PCI_EMUL_MEMLIMIT32;
- do_cpuid(0x80000008, regs);
- cpu_maxphysaddr = 1ULL << (regs[0] & 0xff);
- if (cpu_maxphysaddr > VM_MAXUSER_ADDRESS_LA48)
- cpu_maxphysaddr = VM_MAXUSER_ADDRESS_LA48;
- pci_emul_memresv64 = cpu_maxphysaddr / 4;
- /*
- * Max power of 2 that is less then
- * cpu_maxphysaddr - pci_emul_memresv64.
- */
- pci_emul_membase64 = 1ULL << (flsl(cpu_maxphysaddr -
- pci_emul_memresv64) - 1);
- pci_emul_memlim64 = cpu_maxphysaddr;
+ pci_emul_membase64 = 4*GB + vm_get_highmem_size(ctx);
+ pci_emul_membase64 = ALIGN_VALUE(pci_emul_membase64, PCI_EMUL_MEMSIZE64);
+ pci_emul_memlim64 = pci_emul_membase64 + PCI_EMUL_MEMSIZE64;
for (bus = 0; bus < MAXBUSES; bus++) {
if ((bi = pci_businfo[bus]) == NULL)
@@ -1121,6 +1275,7 @@
bi->membase32 = pci_emul_membase32;
bi->membase64 = pci_emul_membase64;
+ // first run: init devices
for (slot = 0; slot < MAXSLOTS; slot++) {
si = &bi->slotinfo[slot];
for (func = 0; func < MAXFUNCS; func++) {
@@ -1136,6 +1291,15 @@
}
}
+ // second run: assign BARs and free BAR list
+ struct pcibarlist *bar = pci_bars;
+ while (bar != NULL) {
+ pci_emul_assign_bar(bar);
+ struct pcibarlist *old = bar;
+ bar = bar->next;
+ free(old);
+ }
+
/*
* Add some slop to the I/O and memory resources decoded by
* this bus to give a guest some flexibility if it wants to
@@ -1780,7 +1944,6 @@
struct pci_devinst *pi;
struct pci_devemu *pe;
int idx, needcfg;
- uint64_t addr, bar, mask;
if ((bi = pci_businfo[bus]) != NULL) {
si = &bi->slotinfo[slot];
@@ -1832,8 +1995,13 @@
needcfg = 1;
}
- if (needcfg)
- *eax = CFGREAD(pi, coff, bytes);
+ if (needcfg) {
+ if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)) {
+ *eax = read_bar_value(pi, coff, bytes);
+ } else {
+ *eax = CFGREAD(pi, coff, bytes);
+ }
+ }
pci_emul_hdrtype_fixup(bus, slot, coff, bytes, eax);
} else {
@@ -1852,55 +2020,10 @@
*/
if (bytes != 4 || (coff & 0x3) != 0)
return;
+
idx = (coff - PCIR_BAR(0)) / 4;
- mask = ~(pi->pi_bar[idx].size - 1);
- switch (pi->pi_bar[idx].type) {
- case PCIBAR_NONE:
- pi->pi_bar[idx].addr = bar = 0;
- break;
- case PCIBAR_IO:
- addr = *eax & mask;
- addr &= 0xffff;
- bar = addr | PCIM_BAR_IO_SPACE;
- /*
- * Register the new BAR value for interception
- */
- if (addr != pi->pi_bar[idx].addr) {
- update_bar_address(pi, addr, idx,
- PCIBAR_IO);
- }
- break;
- case PCIBAR_MEM32:
- addr = bar = *eax & mask;
- bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32;
- if (addr != pi->pi_bar[idx].addr) {
- update_bar_address(pi, addr, idx,
- PCIBAR_MEM32);
- }
- break;
- case PCIBAR_MEM64:
- addr = bar = *eax & mask;
- bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 |
- PCIM_BAR_MEM_PREFETCH;
- if (addr != (uint32_t)pi->pi_bar[idx].addr) {
- update_bar_address(pi, addr, idx,
- PCIBAR_MEM64);
- }
- break;
- case PCIBAR_MEMHI64:
- mask = ~(pi->pi_bar[idx - 1].size - 1);
- addr = ((uint64_t)*eax << 32) & mask;
- bar = addr >> 32;
- if (bar != pi->pi_bar[idx - 1].addr >> 32) {
- update_bar_address(pi, addr, idx - 1,
- PCIBAR_MEMHI64);
- }
- break;
- default:
- assert(0);
- }
- pci_set_cfgdata32(pi, coff, bar);
-
+
+ update_bar_address(pi, idx, *eax);
} else if (pci_emul_iscap(pi, coff)) {
pci_emul_capwrite(pi, coff, bytes, *eax, 0, 0);
} else if (coff >= PCIR_COMMAND && coff < PCIR_REVID) {
diff --git a/usr.sbin/bhyve/pci_fbuf.c b/usr.sbin/bhyve/pci_fbuf.c
--- a/usr.sbin/bhyve/pci_fbuf.c
+++ b/usr.sbin/bhyve/pci_fbuf.c
@@ -42,6 +42,7 @@
#include <stdlib.h>
#include <string.h>
+#include <err.h>
#include <errno.h>
#include <unistd.h>
@@ -224,6 +225,32 @@
return (value);
}
+static int
+pci_fbuf_baraddr(struct vmctx *ctx, struct pci_devinst *pi, int baridx,
+ int enabled, uint64_t address)
+{
+ struct pci_fbuf_softc *sc;
+ int prot;
+
+ if (baridx != 1)
+ return (-1);
+
+ sc = pi->pi_arg;
+ if (!enabled && sc->fbaddr != 0) {
+ if (vm_munmap_memseg(ctx, sc->fbaddr, FB_SIZE) != 0)
+ warnx("pci_fbuf: munmap_memseg failed");
+ sc->fbaddr = 0;
+ } else if (sc->fb_base != NULL && sc->fbaddr == 0) {
+ prot = PROT_READ | PROT_WRITE;
+ if (vm_mmap_memseg(ctx, address, VM_FRAMEBUFFER, 0, FB_SIZE, prot) != 0)
+ warnx("pci_fbuf: mmap_memseg failed");
+ sc->fbaddr = address;
+ }
+
+ return (0);
+}
+
+
static int
pci_fbuf_parse_opts(struct pci_fbuf_softc *sc, char *opts)
{
@@ -353,7 +380,7 @@
static int
pci_fbuf_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
{
- int error, prot;
+ int error;
struct pci_fbuf_softc *sc;
if (fbuf_sc != NULL) {
@@ -409,19 +436,6 @@
DPRINTF(DEBUG_INFO, ("fbuf frame buffer base: %p [sz %lu]",
sc->fb_base, FB_SIZE));
- /*
- * Map the framebuffer into the guest address space.
- * XXX This may fail if the BAR is different than a prior
- * run. In this case flag the error. This will be fixed
- * when a change_memseg api is available.
- */
- prot = PROT_READ | PROT_WRITE;
- if (vm_mmap_memseg(ctx, sc->fbaddr, VM_FRAMEBUFFER, 0, FB_SIZE, prot) != 0) {
- EPRINTLN("pci_fbuf: mapseg failed - try deleting VM and restarting");
- error = -1;
- goto done;
- }
-
console_init(sc->memregs.width, sc->memregs.height, sc->fb_base);
console_fb_register(pci_fbuf_render, sc);
@@ -459,6 +473,7 @@
.pe_init = pci_fbuf_init,
.pe_barwrite = pci_fbuf_write,
.pe_barread = pci_fbuf_read,
+ .pe_baraddr = pci_fbuf_baraddr,
#ifdef BHYVE_SNAPSHOT
.pe_snapshot = pci_fbuf_snapshot,
#endif
diff --git a/usr.sbin/bhyve/pci_gvt-d.c b/usr.sbin/bhyve/pci_gvt-d.c
new file mode 100644
--- /dev/null
+++ b/usr.sbin/bhyve/pci_gvt-d.c
@@ -0,0 +1,385 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/param.h>
+
+#include <machine/vmm.h>
+
+#include <dev/pci/pcireg.h>
+
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+#include <unistd.h>
+
+#include "inout.h"
+#include "pci_passthru.h"
+
+#define MB (1024 * 1024UL)
+
+/*
+ * PCI definitions
+ */
+#define PCIR_GGC 0x50 /* GMCH Graphics Control register */
+#define PCIR_BDSM 0x5C /* Base Data of Stolen Memory register */
+#define PCIR_ASLS_CTL 0xFC /* Opregion start address register */
+#define PCIM_GEN5_75_GGC_GMS_MASK \
+ 0x000000F0 /* Bits 7:4 contain Graphics Mode Select */
+#define PCIM_GEN6_GGC_GMS_MASK \
+ 0x000000F8 /* Bits 7:3 contain Graphics Mode Select */
+#define PCIM_GEN8_GGC_GMS_MASK \
+ 0x0000FF00 /* Bits 15:8 contain Graphics Mode Select */
+#define PCIM_BDSM_GSM_MASK \
+ 0xFFF00000 /* Bits 31:20 contain base address of gsm */
+#define PCIM_ASLS_OPREGION_MASK 0xFFFFF000 /* Opregion is 4k aligned */
+#define GPU_OPREGION_LEN 0x00004000 /* Size of Opregion (16 KB) */
+
+/*
+ * Known device ids for different generations of Intel graphics
+ * see https://www.graphics-drivers.eu/intel-pci-hardware-id-string.html for
+ * complete list
+ */
+/* Westmere & Ironlake */
+static const uint16_t igd_devid_gen5_75[] = { 0x0042, 0x0046 };
+/* Sandy Bridge */
+static const uint16_t igd_devid_gen6[] = { 0x0102, 0x0106, 0x010A, 0x0112,
+ 0x0116, 0x0122, 0x0126 };
+/* Ivy Bridge */
+static const uint16_t igd_devid_gen7[] = { 0x0152, 0x0156, 0x015A, 0x0162,
+ 0x0166, 0x016A };
+/* Haswsell */
+static const uint16_t igd_devid_gen7_5[] = { 0x0402, 0x0406, 0x040A, 0x0412,
+ 0x0416, 0x041A, 0x041E, 0x0A06, 0x0A0E, 0x0A16, 0x0A1E, 0x0A26, 0x0A2E,
+ 0x0C02, 0x0C06, 0x0C12, 0x0C16, 0x0C22, 0x0C26, 0x0D06, 0x0D16, 0x0D22,
+ 0x0D26 };
+/* Broadwell */
+static const uint16_t igd_devid_gen8[] = { 0x1606, 0x160E, 0x1612, 0x1616,
+ 0x161A, 0x161E, 0x1622, 0x1626, 0x162A, 0x162B };
+/* Skylake */
+static const uint16_t igd_devid_gen9[] = { 0x1902, 0x1906, 0x190B, 0x190E,
+ 0x1912, 0x1913, 0x1916, 0x1917, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923,
+ 0x1926, 0x1927, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D };
+/* Kaby Lake & Whiskey Lake & Amber Lake & Coffee Lake & Comet Lake */
+static const uint16_t igd_devid_gen9_5[] = { 0x3E90, 0x3E91, 0x3E92, 0x3E93,
+ 0x3E94, 0x3E96, 0x3E98, 0x3E99, 0x3E9A, 0x3E9B, 0x3E9C, 0x3EA0, 0x3EA1,
+ 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8, 0x3EA9, 0x5902, 0x5906, 0x590B, 0x5912,
+ 0x5916, 0x5917, 0x591B, 0x591C, 0x591D, 0x591E, 0x5921, 0x5926, 0x5927,
+ 0x87C0, 0x87CA, 0x9B21, 0x9B41, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA,
+ 0x9BAC, 0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCC, 0x9BE6,
+ 0x9BF6 };
+
+static int
+array_contains(const uint16_t *array, uint64_t elements, uint16_t item)
+{
+ for (uint64_t i = 0; i < elements; ++i)
+ if (array[i] == item)
+ return 1;
+ return 0;
+}
+
+#define IGD_FUNC_IS_IGD_GEN(gen) \
+ static int igd_gen##gen##_is_igd_gen(int devid) \
+ { \
+ return array_contains(igd_devid_gen##gen, \
+ sizeof(igd_devid_gen##gen) / sizeof(uint16_t), devid); \
+ }
+
+/* GVT-d definitions */
+#define GVT_D_MAP_OPREGION 0
+#define GVT_D_MAP_GSM 1
+
+/*
+ * Handler for passthru of igd
+ *
+ * Keep it as struct instead of a single function pointer, since new
+ * generations of Intel graphics could need other funcs.
+ * e.g. Intel Elkhartlake and Intel Tigerlake:
+ * They will need different handling for GSM and Opregion (See ACRN-Hypervisor
+ * <https://github.com/projectacrn/acrn-hypervisor/blob/master/devicemodel/hw/pci/passthrough.c>)
+ */
+struct igd_funcs {
+ int (*is_igd_gen)(int devid);
+ uint64_t (*get_gsm_len)(struct vmctx *ctx, struct passthru_softc *sc);
+};
+
+/* Handler for igd of gen5.75 (Westmere & Ironlake) */
+IGD_FUNC_IS_IGD_GEN(5_75);
+
+static uint64_t
+igd_gen5_75_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2);
+ uint8_t gms_val = (ggc_val & PCIM_GEN5_75_GGC_GMS_MASK) >>
+ 4; /* Bits 7:4 contain Graphics Mode Select */
+ switch (gms_val) {
+ case 0x05:
+ return 32 * MB;
+ case 0x06:
+ return 48 * MB;
+ case 0x07:
+ return 64 * MB;
+ case 0x08:
+ return 128 * MB;
+ case 0x09:
+ return 256 * MB;
+ case 0x0A:
+ return 96 * MB;
+ case 0x0B:
+ return 160 * MB;
+ case 0x0C:
+ return 224 * MB;
+ case 0x0D:
+ return 352 * MB;
+ }
+
+ warnx("Unknown Graphic Mode (%x)", gms_val);
+ return 0;
+}
+
+/* Handler for igd of gen6 (Sandy Bridge) */
+IGD_FUNC_IS_IGD_GEN(6);
+
+static uint64_t
+igd_gen6_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2);
+ uint8_t gms_val = (ggc_val & PCIM_GEN6_GGC_GMS_MASK) >>
+ 3; /* Bits 7:3 contain Graphics Mode Select */
+ if (gms_val <= 0x10)
+ return gms_val * 32 * MB;
+
+ warnx("Unknown Graphic Mode (%x)", gms_val);
+ return 0;
+}
+
+/* Handler for igd of gen7 (Ivy Bridge) */
+IGD_FUNC_IS_IGD_GEN(7);
+
+/* Handler for igd of gen7.5 (Haswell) */
+IGD_FUNC_IS_IGD_GEN(7_5);
+
+/* Handler for igd of gen8 (Broadwell) */
+IGD_FUNC_IS_IGD_GEN(8);
+
+static uint64_t
+igd_gen8_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2);
+ uint8_t gms_val = (ggc_val & PCIM_GEN8_GGC_GMS_MASK) >>
+ 8; /* Bits 15:8 contain Graphics Mode Select */
+ if ((gms_val <= 0x10) || (gms_val == 0x20) || (gms_val == 0x30) ||
+ (gms_val == 0x3F))
+ return gms_val * 32 * MB;
+
+ warnx("Unknown Graphic Mode (%x)", gms_val);
+ return 0;
+}
+
+/* Handler for igd of gen9 (Skylake) */
+IGD_FUNC_IS_IGD_GEN(9);
+
+static uint64_t
+igd_gen9_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc)
+{
+ uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2);
+ uint8_t gms_val = (ggc_val & PCIM_GEN8_GGC_GMS_MASK) >>
+ 8; /* Bits 15:8 contain Graphics Mode Select */
+ if ((gms_val <= 0x10) || (gms_val == 0x20) || (gms_val == 0x30) ||
+ (gms_val == 0x40))
+ return gms_val * 32 * MB;
+ else if (gms_val >= 0xF0 && gms_val <= 0xFE)
+ return gms_val * 4 * MB;
+
+ warnx("Unknown Graphic Mode (%x)", gms_val);
+ return 0;
+}
+
+/*
+ * Handler for igd of gen9.5 (Kaby Lake & Whiskey Lake & Amber Lake & Coffee
+ * Lake & Comet Lake)
+ */
+IGD_FUNC_IS_IGD_GEN(9_5);
+
+/* Westmere & Ironlake */
+static const struct igd_funcs igd_gen5_75 = {
+ .is_igd_gen = igd_gen5_75_is_igd_gen,
+ .get_gsm_len = igd_gen5_75_get_gsm_len
+};
+/* Sandy Bridge */
+static const struct igd_funcs igd_gen6 = { .is_igd_gen = igd_gen6_is_igd_gen,
+ .get_gsm_len = igd_gen6_get_gsm_len };
+/* Ivy Bridge */
+static const struct igd_funcs igd_gen7 = { .is_igd_gen = igd_gen7_is_igd_gen,
+ .get_gsm_len = igd_gen6_get_gsm_len };
+/* Haswell */
+static const struct igd_funcs igd_gen7_5 = {
+ .is_igd_gen = igd_gen7_5_is_igd_gen,
+ .get_gsm_len = igd_gen6_get_gsm_len
+};
+/* Broadwell */
+static const struct igd_funcs igd_gen8 = { .is_igd_gen = igd_gen8_is_igd_gen,
+ .get_gsm_len = igd_gen8_get_gsm_len };
+/* Skylake */
+static const struct igd_funcs igd_gen9 = { .is_igd_gen = igd_gen9_is_igd_gen,
+ .get_gsm_len = igd_gen9_get_gsm_len };
+/* Kaby Lake & Whiskey Lake & Amber Lake & Coffee Lake & Comet Lake */
+static const struct igd_funcs igd_gen9_5 = {
+ .is_igd_gen = igd_gen9_5_is_igd_gen,
+ .get_gsm_len = igd_gen9_get_gsm_len
+};
+
+static const struct igd_funcs *igd_gen_map[] = { &igd_gen5_75, &igd_gen6,
+ &igd_gen7, &igd_gen7_5, &igd_gen8, &igd_gen9, &igd_gen9_5 };
+
+static const struct igd_funcs *
+get_igd_funcs(const uint16_t devid)
+{
+ for (int i = 0; i < sizeof(igd_gen_map) / sizeof(struct igd_funcs *);
+ ++i) {
+ if (igd_gen_map[i]->is_igd_gen(devid))
+ return igd_gen_map[i];
+ }
+ return NULL;
+}
+
+int
+gvt_d_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
+{
+ int error;
+ struct passthru_softc *sc;
+
+ sc = pi->pi_arg;
+
+ /* check vendor == Intel */
+ const uint16_t dev_vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 2);
+ if (dev_vendor != 0x8086) {
+ warnx("Unknown vendor (%x) of igd", dev_vendor);
+ return -ENODEV;
+ }
+
+ /* check if device is a display device */
+ if (read_config(&sc->psc_sel, PCIR_CLASS, 1) != PCIC_DISPLAY) {
+ warnx("%s is no display device", pi->pi_name);
+ return -ENODEV;
+ }
+
+ /* Get IGD funcs */
+ const struct igd_funcs *igd = get_igd_funcs(
+ read_config(&sc->psc_sel, PCIR_DEVICE, 2));
+ if (igd == NULL) {
+ warnx("Unsupported igd-device (%x)",
+ read_config(&sc->psc_sel, PCIR_DEVICE, 2));
+ return -ENODEV;
+ }
+
+ struct passthru_mmio_mapping *opregion =
+ &sc->psc_mmio_map[GVT_D_MAP_OPREGION];
+ struct passthru_mmio_mapping *gsm = &sc->psc_mmio_map[GVT_D_MAP_GSM];
+
+ /* Get Opregion length */
+ opregion->len = GPU_OPREGION_LEN;
+ /* Get Opregion HPA */
+ opregion->hpa = read_config(&sc->psc_sel, PCIR_ASLS_CTL, 4) &
+ PCIM_ASLS_OPREGION_MASK;
+ /* Get Graphics Stolen Memory len */
+ gsm->len = igd->get_gsm_len(ctx, sc);
+ /* Get Graphics Stolen Memory HPA */
+ gsm->hpa = read_config(&sc->psc_sel, PCIR_BDSM, 4) & PCIM_BDSM_GSM_MASK;
+
+ if (opregion->len == 0 || gsm->len == 0) {
+ warnx("Could not determine size of opregion or gsm");
+ return -ENODEV;
+ }
+
+ /* Allocate Opregion and GSM in guest space */
+ gsm->gpa = pci_emul_alloc_gsm(gsm->len);
+ opregion->gpa = pci_emul_alloc_gsm(opregion->len);
+ if (opregion->gpa == 0 || gsm->gpa == 0) {
+ error = -ENOMEM;
+ goto failed_opregion;
+ }
+
+ /* Write address of Opregion and GSM into PCI register */
+ /* Set Opregion GPA */
+ uint32_t asls_val = read_config(&sc->psc_sel, PCIR_ASLS_CTL, 4);
+ pci_set_cfgdata32(sc->psc_pi, PCIR_ASLS_CTL,
+ opregion->gpa | (asls_val & ~PCIM_ASLS_OPREGION_MASK));
+ /* Set Graphics Stolen Memory GPA */
+ uint32_t bdsm_val = read_config(&sc->psc_sel, PCIR_BDSM, 4);
+ pci_set_cfgdata32(
+ sc->psc_pi, PCIR_BDSM, gsm->gpa | (bdsm_val & ~PCIM_BDSM_GSM_MASK));
+
+ /* Map Opregion and GSM into guest space */
+ if ((error = passthru_modify_pptdev_mmio(
+ ctx, sc, opregion, PT_MAP_PPTDEV_MMIO)) != 0)
+ goto failed_opregion;
+ if ((error = passthru_modify_pptdev_mmio(
+ ctx, sc, gsm, PT_MAP_PPTDEV_MMIO)) != 0)
+ goto failed_gsm;
+
+ /* Protect PCI register */
+ set_pcir_prot(sc, PCIR_ASLS_CTL, 0x04, PPT_PCIR_PROT_NA);
+ set_pcir_prot(sc, PCIR_BDSM, 0x04, PPT_PCIR_PROT_NA);
+
+ return (0);
+
+failed_opregion:
+ opregion->gpa = 0;
+failed_gsm:
+ gsm->gpa = 0;
+ return error;
+}
+
+void
+gvt_d_deinit(struct vmctx *ctx, struct pci_devinst *pi)
+{
+ struct passthru_softc *sc;
+
+ sc = pi->pi_arg;
+
+ struct passthru_mmio_mapping *gsm = &sc->psc_mmio_map[GVT_D_MAP_GSM];
+ struct passthru_mmio_mapping *opregion =
+ &sc->psc_mmio_map[GVT_D_MAP_OPREGION];
+
+ /* GPA is only set, if it's initialized */
+ if (gsm->gpa)
+ passthru_modify_pptdev_mmio(ctx, sc, gsm, PT_UNMAP_PPTDEV_MMIO);
+ if (opregion->gpa)
+ passthru_modify_pptdev_mmio(
+ ctx, sc, opregion, PT_UNMAP_PPTDEV_MMIO);
+}
diff --git a/usr.sbin/bhyve/pci_lpc.c b/usr.sbin/bhyve/pci_lpc.c
--- a/usr.sbin/bhyve/pci_lpc.c
+++ b/usr.sbin/bhyve/pci_lpc.c
@@ -33,9 +33,13 @@
__FBSDID("$FreeBSD$");
#include <sys/types.h>
+#include <sys/pciio.h>
#include <machine/vmm.h>
#include <machine/vmm_snapshot.h>
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -83,6 +87,29 @@
static bool pctestdev_present;
+#ifndef _PATH_DEVPCI
+#define _PATH_DEVPCI "/dev/pci"
+#endif
+
+static int pcifd = -1;
+
+static uint32_t
+read_config(struct pcisel *sel, long reg, int width)
+{
+ struct pci_io pi;
+ pi.pi_sel.pc_domain = sel->pc_domain;
+ pi.pi_sel.pc_bus = sel->pc_bus;
+ pi.pi_sel.pc_dev = sel->pc_dev;
+ pi.pi_sel.pc_func = sel->pc_func;
+ pi.pi_reg = reg;
+ pi.pi_width = width;
+
+ if (ioctl(pcifd, PCIOCREAD, &pi) < 0)
+ return (0);
+
+ return (pi.pi_data);
+}
+
/*
* LPC device configuration is in the following form:
* <lpc_device_name>[,<options>]
@@ -446,6 +473,35 @@
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_BRIDGE);
pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_BRIDGE_ISA);
+ /* open host device */
+ if (pcifd < 0) {
+ pcifd = open(_PATH_DEVPCI, O_RDWR, 0);
+ if (pcifd < 0) {
+ warn("failed to open %s", _PATH_DEVPCI);
+ return (-1);
+ }
+ }
+
+ /* on Intel systems lpc is always connected to 0:1f.0 */
+ struct pcisel sel;
+ sel.pc_domain = 0;
+ sel.pc_bus = 0;
+ sel.pc_dev = 0x1f;
+ sel.pc_func = 0;
+
+ if (read_config(&sel, PCIR_VENDOR, 2) == PCI_VENDOR_INTEL) {
+ /*
+ * The VID, DID, REVID, SUBVID and SUBDID of igd-lpc need aligned with
+ * physical one. Without these physical values, GVT-d GOP driver
+ * couldn't work.
+ */
+ pci_set_cfgdata16(pi, PCIR_DEVICE, read_config(&sel, PCIR_DEVICE, 2));
+ pci_set_cfgdata16(pi, PCIR_VENDOR, read_config(&sel, PCIR_VENDOR, 2));
+ pci_set_cfgdata8(pi, PCIR_REVID, read_config(&sel, PCIR_REVID, 1));
+ pci_set_cfgdata16(pi, PCIR_SUBVEND_0, read_config(&sel, PCIR_SUBVEND_0, 2));
+ pci_set_cfgdata16(pi, PCIR_SUBDEV_0, read_config(&sel, PCIR_SUBDEV_0, 2));
+ }
+
lpc_bridge = pi;
return (0);
diff --git a/usr.sbin/bhyve/pci_passthru.h b/usr.sbin/bhyve/pci_passthru.h
new file mode 100644
--- /dev/null
+++ b/usr.sbin/bhyve/pci_passthru.h
@@ -0,0 +1,83 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __PCI_PASSTHRU_H__
+#define __PCI_PASSTHRU_H__
+
+#include <sys/pciio.h>
+
+#include <vmmapi.h>
+
+#include "pci_emul.h"
+
+struct passthru_mmio_mapping {
+ uint64_t gpa;
+ uint64_t len;
+ uint64_t hpa;
+};
+
+struct passthru_softc {
+ struct pci_devinst *psc_pi;
+ struct pcibar psc_bar[PCI_BARMAX + 1];
+ struct {
+ int capoff;
+ int msgctrl;
+ int emulated;
+ } psc_msi;
+ struct {
+ int capoff;
+ } psc_msix;
+ struct pcisel psc_sel;
+
+ struct passthru_mmio_mapping psc_mmio_map[2];
+ uint8_t psc_pcir_prot_map[(PCI_REGMAX + 1) / 4];
+};
+
+#define PT_MAP_PPTDEV_MMIO 1
+#define PT_UNMAP_PPTDEV_MMIO 0
+
+#define PPT_PCIR_PROT_NA 0 /* No Access to physical values */
+#define PPT_PCIR_PROT_RO 1 /* Read Only access to physical values */
+#define PPT_PCIR_PROT_WO 2 /* Write Only access to physical values */
+#define PPT_PCIR_PROT_RW \
+ (PPT_PCIR_PROT_RO | \
+ PPT_PCIR_PROT_WO) /* Read/Write access to physical values */
+#define PPT_PCIR_PROT_MASK 0x03
+
+int passthru_modify_pptdev_mmio(struct vmctx *ctx, struct passthru_softc *sc,
+ struct passthru_mmio_mapping *map, int registration);
+uint32_t read_config(const struct pcisel *sel, long reg, int width);
+void write_config(const struct pcisel *sel, long reg, int width, uint32_t data);
+int set_pcir_prot(
+ struct passthru_softc *sc, uint32_t reg, uint32_t len, uint8_t prot);
+int gvt_d_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts);
+void gvt_d_deinit(struct vmctx *ctx, struct pci_devinst *pi);
+
+#endif
diff --git a/usr.sbin/bhyve/pci_passthru.c b/usr.sbin/bhyve/pci_passthru.c
--- a/usr.sbin/bhyve/pci_passthru.c
+++ b/usr.sbin/bhyve/pci_passthru.c
@@ -58,9 +58,8 @@
#include <unistd.h>
#include <machine/vmm.h>
-#include <vmmapi.h>
-#include "pci_emul.h"
#include "mem.h"
+#include "pci_passthru.h"
#ifndef _PATH_DEVPCI
#define _PATH_DEVPCI "/dev/pci"
@@ -83,20 +82,6 @@
static int iofd = -1;
static int memfd = -1;
-struct passthru_softc {
- struct pci_devinst *psc_pi;
- struct pcibar psc_bar[PCI_BARMAX + 1];
- struct {
- int capoff;
- int msgctrl;
- int emulated;
- } psc_msi;
- struct {
- int capoff;
- } psc_msix;
- struct pcisel psc_sel;
-};
-
static int
msi_caplen(int msgctrl)
{
@@ -119,7 +104,7 @@
return (len);
}
-static uint32_t
+uint32_t
read_config(const struct pcisel *sel, long reg, int width)
{
struct pci_io pi;
@@ -135,7 +120,7 @@
return (pi.pi_data);
}
-static void
+void
write_config(const struct pcisel *sel, long reg, int width, uint32_t data)
{
struct pci_io pi;
@@ -149,6 +134,70 @@
(void)ioctl(pcifd, PCIOCWRITE, &pi); /* XXX */
}
+int
+passthru_modify_pptdev_mmio(struct vmctx *ctx, struct passthru_softc *sc, struct passthru_mmio_mapping *map, int registration)
+{
+ if (registration == PT_MAP_PPTDEV_MMIO)
+ return vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, map->gpa, map->len, map->hpa);
+ else
+ return vm_unmap_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, map->gpa, map->len);
+}
+
+static int
+passthru_modify_bar_registration(struct pci_devinst *pi, int idx, int registration)
+{
+ int error;
+ struct passthru_softc *sc;
+ struct passthru_mmio_mapping map;
+
+ sc = pi->pi_arg;
+
+ /*
+ * If the guest writes a new value to a 64-bit BAR, two writes are neccessary.
+ * vm_map_pptdev_mmio can fail in that case due to an invalid address after the first write.
+ * To avoid it, skip registration in that case.
+ */
+ if ((registration == PT_MAP_PPTDEV_MMIO) && (pi->pi_bar[idx].type == PCIBAR_MEM64))
+ if ((pci_get_cfgdata32(pi, PCIR_BAR(idx + 0)) == ~0U) ||
+ (pci_get_cfgdata32(pi, PCIR_BAR(idx + 1)) == ~0U))
+ return 0;
+
+ if (idx != pci_msix_table_bar(pi)) {
+ map.gpa = pi->pi_bar[idx].addr;
+ map.len = pi->pi_bar[idx].size;
+ map.hpa = sc->psc_bar[idx].addr;
+ return passthru_modify_pptdev_mmio(pi->pi_vmctx, sc, &map, registration);
+ }
+
+ /* special handling for MSI-X table */
+ uint32_t table_offset, table_size;
+
+ table_offset = rounddown2(pi->pi_msix.table_offset, 4096);
+ table_size = pi->pi_msix.table_offset - table_offset;
+ table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE;
+ table_size = roundup2(table_size, 4096);
+
+ map.gpa = pi->pi_bar[idx].addr;
+ map.len = table_offset;
+ map.hpa = sc->psc_bar[idx].addr;
+
+ /* map/unmap everything before MSI-X table */
+ if (map.len > 0)
+ if ((error = passthru_modify_pptdev_mmio(pi->pi_vmctx, sc, &map, registration)) != 0)
+ return error;
+
+ map.gpa += table_offset + table_size;
+ map.len = pi->pi_bar[idx].size - (table_offset + table_size);
+ map.hpa += table_offset + table_size;
+
+ /* map/unmap everything behind MSI-X table */
+ if (map.len > 0)
+ if ((error = passthru_modify_pptdev_mmio(pi->pi_vmctx, sc, &map, registration)) != 0)
+ return error;
+
+ return (0);
+}
+
#ifdef LEGACY_SUPPORT
static int
passthru_add_msicap(struct pci_devinst *pi, int msgnum, int nextptr)
@@ -438,8 +487,8 @@
init_msix_table(struct vmctx *ctx, struct passthru_softc *sc, uint64_t base)
{
int b, s, f;
- int error, idx;
- size_t len, remaining;
+ int idx;
+ size_t remaining;
uint32_t table_size, table_offset;
uint32_t pba_size, pba_offset;
vm_paddr_t start;
@@ -501,31 +550,6 @@
}
}
- /* Map everything before the MSI-X table */
- if (table_offset > 0) {
- len = table_offset;
- error = vm_map_pptdev_mmio(ctx, b, s, f, start, len, base);
- if (error)
- return (error);
-
- base += len;
- start += len;
- remaining -= len;
- }
-
- /* Skip the MSI-X table */
- base += table_size;
- start += table_size;
- remaining -= table_size;
-
- /* Map everything beyond the end of the MSI-X table */
- if (remaining > 0) {
- len = remaining;
- error = vm_map_pptdev_mmio(ctx, b, s, f, start, len, base);
- if (error)
- return (error);
- }
-
return (0);
}
@@ -581,24 +605,28 @@
sc->psc_bar[i].type = bartype;
sc->psc_bar[i].size = size;
sc->psc_bar[i].addr = base;
+ sc->psc_bar[i].lobits = 0;
/* Allocate the BAR in the guest I/O or MMIO space */
error = pci_emul_alloc_bar(pi, i, bartype, size);
if (error)
return (-1);
+ /* Use same lobits as physical bar */
+ uint8_t lobits = read_config(&sc->psc_sel, PCIR_BAR(i), 0x01);
+ if (bartype == PCIBAR_MEM32 || bartype == PCIBAR_MEM64) {
+ lobits &= ~PCIM_BAR_MEM_BASE;
+ } else {
+ lobits &= ~PCIM_BAR_IO_BASE;
+ }
+ sc->psc_bar[i].lobits = lobits;
+ pi->pi_bar[i].lobits = lobits;
+
/* The MSI-X table needs special handling */
if (i == pci_msix_table_bar(pi)) {
error = init_msix_table(ctx, sc, base);
if (error)
return (-1);
- } else if (bartype != PCIBAR_IO) {
- /* Map the physical BAR in the guest MMIO space */
- error = vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus,
- sc->psc_sel.pc_dev, sc->psc_sel.pc_func,
- pi->pi_bar[i].addr, pi->pi_bar[i].size, base);
- if (error)
- return (-1);
}
/*
@@ -639,14 +667,89 @@
goto done;
}
- pci_set_cfgdata16(pi, PCIR_COMMAND, read_config(&sc->psc_sel,
- PCIR_COMMAND, 2));
+ /* sync command register */
+ write_config(&sc->psc_sel, PCIR_COMMAND, 0x02,
+ pci_get_cfgdata16(pi, PCIR_COMMAND));
error = 0; /* success */
done:
return (error);
}
+#define PPT_PCIR_PROT(reg) ((sc->psc_pcir_prot_map[reg / 4] >> (reg & 0x03)) & PPT_PCIR_PROT_MASK)
+
+int
+set_pcir_prot(struct passthru_softc *sc, uint32_t reg, uint32_t len, uint8_t prot)
+{
+ if (reg > PCI_REGMAX || reg + len > PCI_REGMAX + 1)
+ return (-1);
+
+ prot &= PPT_PCIR_PROT_MASK;
+
+ for (int i = reg; i < reg + len; ++i) {
+ /* delete old prot value */
+ sc->psc_pcir_prot_map[i / 4] &= ~(PPT_PCIR_PROT_MASK << (i & 0x03));
+ /* set new prot value */
+ sc->psc_pcir_prot_map[i / 4] |= prot << (i & 0x03);
+ }
+
+ return (0);
+}
+
+static int
+is_pcir_writable(struct passthru_softc *sc, uint32_t reg)
+{
+ if (reg > PCI_REGMAX)
+ return (0);
+
+ return ((PPT_PCIR_PROT(reg) & PPT_PCIR_PROT_WO) != 0);
+}
+
+static int
+is_pcir_readable(struct passthru_softc *sc, uint32_t reg)
+{
+ if (reg > PCI_REGMAX)
+ return (0);
+
+ return ((PPT_PCIR_PROT(reg) & PPT_PCIR_PROT_RO) != 0);
+}
+
+static int
+passthru_init_quirks(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
+{
+ struct passthru_softc *sc = pi->pi_arg;
+
+ uint16_t vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 0x02);
+ uint8_t class = read_config(&sc->psc_sel, PCIR_CLASS, 0x01);
+
+ /* currently only display devices have quirks */
+ if (class != PCIC_DISPLAY)
+ return (0);
+
+ if (vendor == PCI_VENDOR_INTEL)
+ return gvt_d_init(ctx, pi, opts);
+
+ return (0);
+}
+
+static void
+passthru_deinit_quirks(struct vmctx *ctx, struct pci_devinst *pi)
+{
+ struct passthru_softc *sc = pi->pi_arg;
+
+ uint16_t vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 0x02);
+ uint8_t class = read_config(&sc->psc_sel, PCIR_CLASS, 0x01);
+
+ /* currently only display devices have quirks */
+ if (class != PCIC_DISPLAY)
+ return;
+
+ if (vendor == PCI_VENDOR_INTEL)
+ return gvt_d_deinit(ctx, pi);
+
+ return;
+}
+
static int
passthru_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
{
@@ -734,9 +837,20 @@
sc->psc_pi = pi;
/* initialize config space */
- error = cfginit(ctx, pi, bus, slot, func);
+ if ((error = cfginit(ctx, pi, bus, slot, func)) != 0)
+ goto done;
+
+ /* allow access to all PCI registers */
+ if ((error = set_pcir_prot(sc, 0, PCI_REGMAX + 1, PPT_PCIR_PROT_RW)) != 0)
+ goto done;
+
+ if ((error = passthru_init_quirks(ctx, pi, opts)) != 0)
+ goto done;
+
+ error = 0; /* success */
done:
if (error) {
+ passthru_deinit_quirks(ctx, pi);
free(sc);
vm_unassign_pptdev(ctx, bus, slot, func);
}
@@ -786,6 +900,10 @@
sc = pi->pi_arg;
+ /* skip for protected PCI registers */
+ if (!is_pcir_readable(sc, coff))
+ return (-1);
+
/*
* PCI BARs and MSI capability is emulated.
*/
@@ -832,6 +950,10 @@
sc = pi->pi_arg;
+ /* skip for protected PCI registers */
+ if (!is_pcir_writable(sc, coff))
+ return (-1);
+
/*
* PCI BARs are emulated
*/
@@ -954,6 +1076,19 @@
return (val);
}
+static int
+passthru_addr(struct vmctx *ctx, struct pci_devinst *pi, int baridx,
+ int enabled, uint64_t address)
+{
+ int error;
+ if (pi->pi_bar[baridx].type == PCIBAR_IO)
+ return (-1);
+
+ error = passthru_modify_bar_registration(pi, baridx, enabled);
+ assert(error == 0);
+ return error;
+}
+
struct pci_devemu passthru = {
.pe_emu = "passthru",
.pe_init = passthru_init,
@@ -961,5 +1096,6 @@
.pe_cfgread = passthru_cfgread,
.pe_barwrite = passthru_write,
.pe_barread = passthru_read,
+ .pe_baraddr = passthru_addr,
};
PCI_EMUL_SET(passthru);

File Metadata

Mime Type
text/plain
Expires
Tue, Jan 13, 3:08 PM (8 h, 38 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27629400
Default Alt Text
D26209.id82836.diff (51 KB)

Event Timeline