Index: lib/libvmmapi/vmmapi.h =================================================================== --- lib/libvmmapi/vmmapi.h +++ lib/libvmmapi/vmmapi.h @@ -73,6 +73,8 @@ VM_SYSMEM, VM_BOOTROM, VM_FRAMEBUFFER, + VM_VIDEOBIOS, + VM_NOTHING /* Last entry */ }; /* @@ -111,6 +113,8 @@ int vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t segoff, size_t len, int prot); +int vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len); + int vm_create(const char *name); int vm_get_device_fd(struct vmctx *ctx); struct vmctx *vm_open(const char *name); @@ -176,6 +180,10 @@ int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func); int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); +int vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, + vm_paddr_t gpa, size_t len); +int vm_get_vbios(struct vmctx *ctx, int bus, int slot, int func, + uint16_t vendor, uint16_t dev_id, void *bios, uint64_t *size); int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func, uint64_t addr, uint64_t msg, int numvec); int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, Index: lib/libvmmapi/vmmapi.c =================================================================== --- lib/libvmmapi/vmmapi.c +++ lib/libvmmapi/vmmapi.c @@ -251,6 +251,19 @@ return (0); } +int +vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len) +{ + struct vm_munmap munmap; + int error; + + munmap.gpa = gpa; + munmap.len = len; + + error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap); + return (error); +} + int vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid, vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) @@ -980,6 +993,49 @@ return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio)); } +int +vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func, + vm_paddr_t gpa, size_t len) +{ + struct vm_pptdev_mmio pptmmio; + + bzero(&pptmmio, sizeof(pptmmio)); + pptmmio.bus = bus; + pptmmio.slot = slot; + pptmmio.func = func; + pptmmio.gpa = gpa; + pptmmio.len = len; + + return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio)); +} + +int +vm_get_vbios(struct vmctx *ctx, int bus, int slot, int func, + uint16_t vendor, uint16_t dev_id, void *bios, uint64_t *size) +{ + int error; + struct vm_vbios vbios; + + bzero(&vbios, sizeof(vbios)); + vbios.bus = bus; + vbios.slot = slot; + vbios.func = func; + vbios.vendor = vendor; + vbios.dev_id = dev_id; + vbios.bios = bios; + if (size != NULL) + vbios.size = *size; + else + vbios.size = 0; + + error = ioctl(ctx->fd, VM_GET_VBIOS, &vbios); + + if (size) + *size = vbios.size; + + return (error); +} + int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func, uint64_t addr, uint64_t msg, int numvec) @@ -1644,7 +1700,7 @@ /* keep in sync with machine/vmm_dev.h */ static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT, VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG, - VM_MMAP_GETNEXT, VM_SET_REGISTER, VM_GET_REGISTER, + VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER, VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR, VM_SET_REGISTER_SET, VM_GET_REGISTER_SET, VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV, @@ -1654,8 +1710,8 @@ VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER, VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV, VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI, - VM_PPTDEV_MSIX, VM_PPTDEV_DISABLE_MSIX, - VM_INJECT_NMI, VM_STATS, VM_STAT_DESC, + VM_PPTDEV_MSIX, VM_PPTDEV_DISABLE_MSIX, VM_UNMAP_PPTDEV_MMIO, + VM_GET_VBIOS, VM_INJECT_NMI, VM_STATS, VM_STAT_DESC, VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE, VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA, VM_GLA2GPA_NOFAULT, Index: sys/amd64/include/vmm.h =================================================================== --- sys/amd64/include/vmm.h +++ sys/amd64/include/vmm.h @@ -231,6 +231,7 @@ */ int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off, size_t len, int prot, int flags); +int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len); int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem); void vm_free_memseg(struct vm *vm, int ident); int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); Index: sys/amd64/include/vmm_dev.h =================================================================== --- sys/amd64/include/vmm_dev.h +++ sys/amd64/include/vmm_dev.h @@ -49,6 +49,11 @@ #define VM_MEMMAP_F_WIRED 0x01 #define VM_MEMMAP_F_IOMMU 0x02 +struct vm_munmap { + vm_paddr_t gpa; + size_t len; +}; + #define VM_MEMSEG_NAME(m) ((m)->name[0] != '\0' ? (m)->name : NULL) struct vm_memseg { int segid; @@ -141,6 +146,16 @@ size_t len; }; +struct vm_vbios { + int bus; + int slot; + int func; + uint16_t vendor; + uint16_t dev_id; + void *bios; + uint64_t size; +}; + struct vm_pptdev_msi { int vcpu; int bus; @@ -270,6 +285,7 @@ IOCNUM_MMAP_MEMSEG = 16, IOCNUM_MMAP_GETNEXT = 17, IOCNUM_GLA2GPA_NOFAULT = 18, + IOCNUM_MUNMAP_MEMSEG = 19, /* register/state accessors */ IOCNUM_SET_REGISTER = 20, @@ -302,6 +318,8 @@ IOCNUM_PPTDEV_MSI = 43, IOCNUM_PPTDEV_MSIX = 44, IOCNUM_PPTDEV_DISABLE_MSIX = 45, + IOCNUM_UNMAP_PPTDEV_MMIO = 46, + IOCNUM_GET_VBIOS = 47, /* statistics */ IOCNUM_VM_STATS = 50, @@ -358,6 +376,8 @@ _IOW('v', IOCNUM_MMAP_MEMSEG, struct vm_memmap) #define VM_MMAP_GETNEXT \ _IOWR('v', IOCNUM_MMAP_GETNEXT, struct vm_memmap) +#define VM_MUNMAP_MEMSEG \ + _IOW('v', IOCNUM_MUNMAP_MEMSEG, struct vm_munmap) #define VM_SET_REGISTER \ _IOW('v', IOCNUM_SET_REGISTER, struct vm_register) #define VM_GET_REGISTER \ @@ -416,6 +436,10 @@ _IOW('v', IOCNUM_PPTDEV_MSIX, struct vm_pptdev_msix) #define VM_PPTDEV_DISABLE_MSIX \ _IOW('v', IOCNUM_PPTDEV_DISABLE_MSIX, struct vm_pptdev) +#define VM_UNMAP_PPTDEV_MMIO \ + _IOW('v', IOCNUM_UNMAP_PPTDEV_MMIO, struct vm_pptdev_mmio) +#define VM_GET_VBIOS \ + _IOWR('v', IOCNUM_GET_VBIOS, struct vm_vbios) #define VM_INJECT_NMI \ _IOW('v', IOCNUM_INJECT_NMI, struct vm_nmi) #define VM_STATS \ Index: sys/amd64/vmm/amd/amdgpu_bios.h =================================================================== --- /dev/null +++ sys/amd64/vmm/amd/amdgpu_bios.h @@ -0,0 +1,40 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _AMDGPU_BIOS_H_ +#define _AMDGPU_BIOS_H_ + +#include +#include + +int vm_amdgpu_get_vbios(struct vm *vm, int bus, int slot, int func, + uint16_t vendor, uint16_t dev_id, void *bios, uint64_t *size); + +#endif /* !_AMD_AMDGPU_H_ */ Index: sys/amd64/vmm/amd/amdgpu_bios.c =================================================================== --- /dev/null +++ sys/amd64/vmm/amd/amdgpu_bios.c @@ -0,0 +1,1039 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* + * This is a copy of + */ + +#define __FBSD__ + +#ifdef __FBSD__ +/* build defines */ +#define CONFIG_ACPI + +/* includes */ +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "amdgpu_bios.h" +#include "contrib/dev/acpica/include/acpi.h" +#include "contrib/dev/acpica/include/acpixf.h" + +/* type definitons */ +#define __iomem +#define true 1 +#define false 0 +typedef uint8_t UCHAR; +typedef uint16_t USHORT; +typedef uint32_t ULONG; +typedef uint32_t acpi_size; + +/* log definitions */ +#define DRM_DEBUG uprintf +#define DRM_INFO uprintf +#define DRM_ERROR uprintf + +#define GFP_NATIVE_MASK (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_ZERO) +#define GFP_KERNEL M_WAITOK +#define __GFP_ZERO M_ZERO +#define kzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO) + +#define memcpy_fromio(a, b, c) memcpy((a), (b), (c)) + +#define acpi_get_table AcpiGetTable + +#define PCI_DEVFN(bus, slot, func) ((((bus) & 0xff) << 8) | (((slot) & 0x1f) << 3) | ((func) & 0x07)) +#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) +#define PCI_FUNC(devfn) ((devfn) & 0x07) +#define PCI_BUS_NUM(devfn) (((devfn) >> 8) & 0xff) + +//MALLOC_DECLARE(M_KMALLOC); +MALLOC_DECLARE(M_VMMDEV); + +typedef unsigned gfp_t; + +static inline gfp_t +linux_check_m_flags(gfp_t flags) +{ + const gfp_t m = M_NOWAIT | M_WAITOK; + + /* make sure either M_NOWAIT or M_WAITOK is set */ + if ((flags & m) == 0) + flags |= M_NOWAIT; + else if ((flags & m) == m) + flags &= ~M_WAITOK; + + /* mask away LinuxKPI specific flags */ + return (flags & GFP_NATIVE_MASK); +} + +static inline void * +kmalloc(size_t size, gfp_t flags) +{ + return (malloc(size, M_VMMDEV, linux_check_m_flags(flags))); +} + +static inline void +kfree(const void *ptr) +{ + free(__DECONST(void *, ptr), M_VMMDEV); +} + +static inline void * +kmemdup(const void *src, size_t len, gfp_t gfp) +{ + void *dst; + + dst = kmalloc(len, gfp); + if (dst != NULL) { + memcpy(dst, src, len); + } + return (dst); +} + +/* structs */ +/* device copied from sys/compat/linuxkpi/common/include/linux/pci.h */ +struct device { +#ifndef __FBSD__ + struct device *parent; + struct list_head irqents; +#endif /*__FBSD__*/ + device_t bsddev; +#ifndef __FBSD__ + /* + * The following flag is used to determine if the LinuxKPI is + * responsible for detaching the BSD device or not. If the + * LinuxKPI got the BSD device using devclass_get_device(), it + * must not try to detach or delete it, because it's already + * done somewhere else. + */ + bool bsddev_attached_here; + struct device_driver *driver; + struct device_type *type; + dev_t devt; + struct class *class; + void (*release)(struct device *dev); + struct kobject kobj; + uint64_t *dma_mask; + void *driver_data; + unsigned int irq; +#define LINUX_IRQ_INVALID 65535 + unsigned int irq_start; + unsigned int irq_end; + const struct attribute_group **groups; + struct fwnode_handle *fwnode; + + spinlock_t devres_lock; + struct list_head devres_head; +#endif /*__FBSD__*/ +}; + +/* pci_dev copied from sys/compat/linuxkpi/common/include/linux/pci.h */ +struct pci_dev { + struct device dev; +#ifndef __FBSD__ + struct list_head links; + struct pci_driver *pdrv; + struct pci_bus *bus; + uint64_t dma_mask; +#endif + uint16_t device; + uint16_t vendor; +#ifndef __FBSD__ + uint16_t subsystem_vendor; + uint16_t subsystem_device; + unsigned int irq; +#endif + unsigned int devfn; +#ifndef __FBSD__ + uint32_t class; + uint8_t revision; + bool msi_enabled; +#endif +}; +/* amdgpu_device copied from */ +struct amdgpu_device { +#ifndef __FBSD__ + struct device *dev; + struct drm_device *ddev; +#endif /*__FBSD__*/ + struct pci_dev *pdev; + +#ifndef __FBSD__ +#ifdef CONFIG_DRM_AMD_ACP + struct amdgpu_acp acp; +#endif + + /* ASIC */ + enum amd_asic_type asic_type; + uint32_t family; + uint32_t rev_id; + uint32_t external_rev_id; + unsigned long flags; + int usec_timeout; + const struct amdgpu_asic_funcs *asic_funcs; + bool shutdown; + bool need_swiotlb; + bool accel_working; + struct notifier_block acpi_nb; + struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; + struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; + unsigned debugfs_count; +#if defined(CONFIG_DEBUG_FS) + struct dentry *debugfs_preempt; + struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; +#endif + struct amdgpu_atif *atif; + struct amdgpu_atcs atcs; + struct mutex srbm_mutex; + /* GRBM index mutex. Protects concurrent access to GRBM index */ + struct mutex grbm_idx_mutex; + struct dev_pm_domain vga_pm_domain; + bool have_disp_power_ref; + bool have_atomics_support; + + /* BIOS */ + bool is_atom_fw; +#endif /*__FBSD__*/ + uint8_t *bios; + uint32_t bios_size; +#ifndef __FBSD__ + struct amdgpu_bo *stolen_vga_memory; + uint32_t bios_scratch_reg_offset; + uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; + + /* Register/doorbell mmio */ + resource_size_t rmmio_base; + resource_size_t rmmio_size; + void __iomem *rmmio; + /* protects concurrent MM_INDEX/DATA based register access */ + spinlock_t mmio_idx_lock; + struct amdgpu_mmio_remap rmmio_remap; + /* protects concurrent SMC based register access */ + spinlock_t smc_idx_lock; + amdgpu_rreg_t smc_rreg; + amdgpu_wreg_t smc_wreg; + /* protects concurrent PCIE register access */ + spinlock_t pcie_idx_lock; + amdgpu_rreg_t pcie_rreg; + amdgpu_wreg_t pcie_wreg; + amdgpu_rreg_t pciep_rreg; + amdgpu_wreg_t pciep_wreg; + amdgpu_rreg64_t pcie_rreg64; + amdgpu_wreg64_t pcie_wreg64; + /* protects concurrent UVD register access */ + spinlock_t uvd_ctx_idx_lock; + amdgpu_rreg_t uvd_ctx_rreg; + amdgpu_wreg_t uvd_ctx_wreg; + /* protects concurrent DIDT register access */ + spinlock_t didt_idx_lock; + amdgpu_rreg_t didt_rreg; + amdgpu_wreg_t didt_wreg; + /* protects concurrent gc_cac register access */ + spinlock_t gc_cac_idx_lock; + amdgpu_rreg_t gc_cac_rreg; + amdgpu_wreg_t gc_cac_wreg; + /* protects concurrent se_cac register access */ + spinlock_t se_cac_idx_lock; + amdgpu_rreg_t se_cac_rreg; + amdgpu_wreg_t se_cac_wreg; + /* protects concurrent ENDPOINT (audio) register access */ + spinlock_t audio_endpt_idx_lock; + amdgpu_block_rreg_t audio_endpt_rreg; + amdgpu_block_wreg_t audio_endpt_wreg; + void __iomem *rio_mem; + resource_size_t rio_mem_size; +#ifdef __FreeBSD__ + int rio_rid; + int rio_type; + struct resource *rio_res; +#endif + struct amdgpu_doorbell doorbell; + + /* clock/pll info */ + struct amdgpu_clock clock; + + /* MC */ + struct amdgpu_gmc gmc; + struct amdgpu_gart gart; + dma_addr_t dummy_page_addr; + struct amdgpu_vm_manager vm_manager; + struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; + unsigned num_vmhubs; + + /* memory management */ + struct amdgpu_mman mman; + struct amdgpu_vram_scratch vram_scratch; + struct amdgpu_wb wb; + atomic64_t num_bytes_moved; + atomic64_t num_evictions; + atomic64_t num_vram_cpu_page_faults; + atomic_t gpu_reset_counter; + atomic_t vram_lost_counter; + + /* data for buffer migration throttling */ + struct { + spinlock_t lock; + s64 last_update_us; + s64 accum_us; /* accumulated microseconds */ + s64 accum_us_vis; /* for visible VRAM */ + u32 log2_max_MBps; + } mm_stats; + + /* display */ + bool enable_virtual_display; + struct amdgpu_mode_info mode_info; + /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ + struct work_struct hotplug_work; + struct amdgpu_irq_src crtc_irq; + struct amdgpu_irq_src vupdate_irq; + struct amdgpu_irq_src pageflip_irq; + struct amdgpu_irq_src hpd_irq; + + /* rings */ + u64 fence_context; + unsigned num_rings; + struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; + bool ib_pool_ready; + struct amdgpu_sa_manager ring_tmp_bo; + + /* interrupts */ + struct amdgpu_irq irq; + + /* powerplay */ + struct amd_powerplay powerplay; + bool pp_force_state_enabled; + + /* smu */ + struct smu_context smu; + + /* dpm */ + struct amdgpu_pm pm; + u32 cg_flags; + u32 pg_flags; + + /* gfx */ + struct amdgpu_gfx gfx; + + /* sdma */ + struct amdgpu_sdma sdma; + + /* uvd */ + struct amdgpu_uvd uvd; + + /* vce */ + struct amdgpu_vce vce; + + /* vcn */ + struct amdgpu_vcn vcn; + + /* firmwares */ + struct amdgpu_firmware firmware; + + /* PSP */ + struct psp_context psp; + + /* GDS */ + struct amdgpu_gds gds; + + /* KFD */ + struct amdgpu_kfd_dev kfd; + + /* UMC */ + struct amdgpu_umc umc; + + /* display related functionality */ + struct amdgpu_display_manager dm; + + /* discovery */ + uint8_t *discovery; + + /* mes */ + bool enable_mes; + struct amdgpu_mes mes; + + struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; + int num_ip_blocks; + struct mutex mn_lock; + DECLARE_HASHTABLE(mn_hash, 7); + + /* tracking pinned memory */ + atomic64_t vram_pin_size; + atomic64_t visible_pin_size; + atomic64_t gart_pin_size; + + /* soc15 register offset based on ip, instance and segment */ + uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; + + const struct amdgpu_nbio_funcs *nbio_funcs; + const struct amdgpu_df_funcs *df_funcs; + const struct amdgpu_mmhub_funcs *mmhub_funcs; + + /* delayed work_func for deferring clockgating during resume */ + struct delayed_work delayed_init_work; + + struct amdgpu_virt virt; + /* firmware VRAM reservation */ + struct amdgpu_fw_vram_usage fw_vram_usage; + + /* link all shadow bo */ + struct list_head shadow_list; + struct mutex shadow_list_lock; + /* keep an lru list of rings by HW IP */ + struct list_head ring_lru_list; + spinlock_t ring_lru_list_lock; + + /* record hw reset is performed */ + bool has_hw_reset; + u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; + + /* s3/s4 mask */ + bool in_suspend; + + /* record last mm index being written through WREG32*/ + unsigned long last_mm_index; + bool in_gpu_reset; + enum pp_mp1_state mp1_state; + struct mutex lock_reset; + struct amdgpu_doorbell_index doorbell_index; + + int asic_reset_res; + struct work_struct xgmi_reset_work; + + bool in_baco_reset; + + long gfx_timeout; + long sdma_timeout; + long video_timeout; + long compute_timeout; + + uint64_t unique_id; + uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; +#endif /*__FBSD__*/ +}; + +/* AMD_ACPI_DESCRIPTION_HEADER copied from */ +typedef struct { + ULONG Signature; + ULONG TableLength; //Length + UCHAR Revision; + UCHAR Checksum; + UCHAR OemId[6]; + UCHAR OemTableId[8]; //UINT64 OemTableId; + ULONG OemRevision; + ULONG CreatorId; + ULONG CreatorRevision; +} AMD_ACPI_DESCRIPTION_HEADER; + +/* UEFI_ACPI_VFCT copied from */ +typedef struct { + AMD_ACPI_DESCRIPTION_HEADER SHeader; + UCHAR TableUUID[16]; //0x24 + ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture. + ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture. + ULONG Reserved[4]; //0x3C +}UEFI_ACPI_VFCT; + +/* VFCT_IMAGE_HEADER copied from */ +typedef struct { + ULONG PCIBus; //0x4C + ULONG PCIDevice; //0x50 + ULONG PCIFunction; //0x54 + USHORT VendorID; //0x58 + USHORT DeviceID; //0x5A + USHORT SSVID; //0x5C + USHORT SSID; //0x5E + ULONG Revision; //0x60 + ULONG ImageLength; //0x64 +}VFCT_IMAGE_HEADER; + + +/* GOP_VBIOS_CONTENT copied from */ +typedef struct { + VFCT_IMAGE_HEADER VbiosHeader; + UCHAR VbiosContent[1]; +}GOP_VBIOS_CONTENT; + +#endif /*__FBSD__*/ + +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ + +#ifndef __FBSD__ +#include "amdgpu.h" +#include "atom.h" + +#include +#include +#include +#endif /*__FBSD__*/ + +/* + * BIOS. + */ + +#define AMD_VBIOS_SIGNATURE " 761295520" +#define AMD_VBIOS_SIGNATURE_OFFSET 0x30 +#define AMD_VBIOS_SIGNATURE_SIZE sizeof(AMD_VBIOS_SIGNATURE) +#define AMD_VBIOS_SIGNATURE_END (AMD_VBIOS_SIGNATURE_OFFSET + AMD_VBIOS_SIGNATURE_SIZE) +#define AMD_IS_VALID_VBIOS(p) ((p)[0] == 0x55 && (p)[1] == 0xAA) +#define AMD_VBIOS_LENGTH(p) ((p)[2] << 9) + +/* Check if current bios is an ATOM BIOS. + * Return true if it is ATOM BIOS. Otherwise, return false. + */ +static bool check_atom_bios(uint8_t *bios, size_t size) +{ + uint16_t tmp, bios_header_start; + + if (!bios || size < 0x49) { + DRM_INFO("vbios mem is null or mem size is wrong\n"); + return false; + } + + if (!AMD_IS_VALID_VBIOS(bios)) { + DRM_INFO("BIOS signature incorrect %x %x\n", bios[0], bios[1]); + return false; + } + + bios_header_start = bios[0x48] | (bios[0x49] << 8); + if (!bios_header_start) { + DRM_INFO("Can't locate bios header\n"); + return false; + } + + tmp = bios_header_start + 4; + if (size < tmp) { + DRM_INFO("BIOS header is broken\n"); + return false; + } + + if (!memcmp(bios + tmp, "ATOM", 4) || + !memcmp(bios + tmp, "MOTA", 4)) { + DRM_DEBUG("ATOMBIOS detected\n"); + return true; + } + + return false; +} + +#ifndef __FBSD__ +/* If you boot an IGP board with a discrete card as the primary, + * the IGP rom is not accessible via the rom bar as the IGP rom is + * part of the system bios. On boot, the system bios puts a + * copy of the igp rom at the start of vram if a discrete card is + * present. + */ +static bool igp_read_bios_from_vram(struct amdgpu_device *adev) +{ + uint8_t __iomem *bios; + resource_size_t vram_base; + resource_size_t size = 256 * 1024; /* ??? */ + + if (!(adev->flags & AMD_IS_APU)) + if (amdgpu_device_need_post(adev)) + return false; + + adev->bios = NULL; + vram_base = pci_resource_start(adev->pdev, 0); + bios = ioremap_wc(vram_base, size); + if (!bios) { + return false; + } + + adev->bios = kmalloc(size, GFP_KERNEL); + if (!adev->bios) { + iounmap(bios); + return false; + } + adev->bios_size = size; + memcpy_fromio(adev->bios, bios, size); + iounmap(bios); + + if (!check_atom_bios(adev->bios, size)) { + kfree(adev->bios); + return false; + } + + return true; +} +#else /*__FBSD__*/ +static bool igp_read_bios_from_vram(struct amdgpu_device *adev) +{ + return false; +} +#endif /*__FBSD__*/ + +#ifndef __FBSD__ +#ifdef __FreeBSD__ +#define pci_map_rom(pdev, sizep) \ + vga_pci_map_bios(device_get_parent(pdev->dev.bsddev), sizep) +#define pci_unmap_rom(pdev, bios) \ + vga_pci_unmap_bios(device_get_parent(pdev->dev.bsddev), bios) +#endif +#else /*__FBSD__*/ +#define pci_map_rom(pdev, sizep) \ + vga_pci_map_bios(pdev->dev.bsddev, sizep) +#define pci_unmap_rom(pdev, bios) \ + vga_pci_unmap_bios(pdev->dev.bsddev, bios) +#endif /*__FBSD__*/ + +#ifdef __FBSD__ +static +#endif +bool amdgpu_read_bios(struct amdgpu_device *adev) +{ + uint8_t __iomem *bios; + size_t size; + + adev->bios = NULL; + /* XXX: some cards may return 0 for rom size? ddx has a workaround */ + bios = pci_map_rom(adev->pdev, &size); + if (!bios) { + return false; + } + + adev->bios = kzalloc(size, GFP_KERNEL); + if (adev->bios == NULL) { + pci_unmap_rom(adev->pdev, bios); + return false; + } + adev->bios_size = size; + memcpy_fromio(adev->bios, bios, size); + pci_unmap_rom(adev->pdev, bios); + + if (!check_atom_bios(adev->bios, size)) { + kfree(adev->bios); + return false; + } + + return true; +} + +#ifndef __FBSD__ +static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) +{ + u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0}; + int len; + + if (!adev->asic_funcs->read_bios_from_rom) + return false; + + /* validate VBIOS signature */ + if (amdgpu_asic_read_bios_from_rom(adev, &header[0], sizeof(header)) == false) + return false; + header[AMD_VBIOS_SIGNATURE_END] = 0; + + if ((!AMD_IS_VALID_VBIOS(header)) || + 0 != memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET], + AMD_VBIOS_SIGNATURE, + strlen(AMD_VBIOS_SIGNATURE))) + return false; + + /* valid vbios, go on */ + len = AMD_VBIOS_LENGTH(header); + len = ALIGN(len, 4); + adev->bios = kmalloc(len, GFP_KERNEL); + if (!adev->bios) { + DRM_ERROR("no memory to allocate for BIOS\n"); + return false; + } + adev->bios_size = len; + + /* read complete BIOS */ + amdgpu_asic_read_bios_from_rom(adev, adev->bios, len); + + if (!check_atom_bios(adev->bios, len)) { + kfree(adev->bios); + return false; + } + + return true; +} +#else /*__FBSD__*/ +static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) +{ + return false; +} +#endif + +#ifndef __FBSD__ +static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) +{ + uint8_t __iomem *bios; + size_t size; + + adev->bios = NULL; + + bios = pci_platform_rom(adev->pdev, &size); + if (!bios) { + return false; + } + + adev->bios = kzalloc(size, GFP_KERNEL); + if (adev->bios == NULL) + return false; + + memcpy_fromio(adev->bios, bios, size); + + if (!check_atom_bios(adev->bios, size)) { + kfree(adev->bios); + return false; + } + + adev->bios_size = size; + + return true; +} +#else /*__FBSD__*/ +static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) +{ + return false; +} +#endif /*__FBSD__*/ + +#ifdef CONFIG_ACPI +#ifndef __FBSD__ +/* ATRM is used to get the BIOS on the discrete cards in + * dual-gpu systems. + */ +/* retrieve the ROM in 4k blocks */ +#define ATRM_BIOS_PAGE 4096 +/** + * amdgpu_atrm_call - fetch a chunk of the vbios + * + * @atrm_handle: acpi ATRM handle + * @bios: vbios image pointer + * @offset: offset of vbios image data to fetch + * @len: length of vbios image data to fetch + * + * Executes ATRM to fetch a chunk of the discrete + * vbios image on PX systems (all asics). + * Returns the length of the buffer fetched. + */ +static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios, + int offset, int len) +{ + acpi_status status; + union acpi_object atrm_arg_elements[2], *obj; + struct acpi_object_list atrm_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; + + atrm_arg.Count = 2; + atrm_arg.Pointer = &atrm_arg_elements[0]; + + atrm_arg_elements[0].Type = ACPI_TYPE_INTEGER; + atrm_arg_elements[0].Integer.Value = offset; + + atrm_arg_elements[1].Type = ACPI_TYPE_INTEGER; + atrm_arg_elements[1].Integer.Value = len; + + status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); + return -ENODEV; + } + + obj = (union acpi_object *)buffer.Pointer; + memcpy(bios+offset, obj->Buffer.Pointer, obj->Buffer.Length); + len = obj->Buffer.Length; + kfree(buffer.Pointer); + return len; +} + +static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) +{ + int ret; + int size = 256 * 1024; + int i; + struct pci_dev *pdev = NULL; + acpi_handle dhandle, atrm_handle; + acpi_status status; + bool found = false; + + /* ATRM is for the discrete card only */ + if (adev->flags & AMD_IS_APU) + return false; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } + + if (!found) { + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } + } + + if (!found) + return false; + + adev->bios = kmalloc(size, GFP_KERNEL); + if (!adev->bios) { + DRM_ERROR("Unable to allocate bios\n"); + return false; + } + + for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { + ret = amdgpu_atrm_call(atrm_handle, + adev->bios, + (i * ATRM_BIOS_PAGE), + ATRM_BIOS_PAGE); + if (ret < ATRM_BIOS_PAGE) + break; + } + + if (!check_atom_bios(adev->bios, size)) { + kfree(adev->bios); + return false; + } + adev->bios_size = size; + return true; +} +#else /*__FBSD__*/ +static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) +{ + return false; +} +#endif /*__FBSD__*/ +#else +static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) +{ + return false; +} +#endif + +#ifndef __FBSD__ +static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) +{ + if (adev->flags & AMD_IS_APU) + return igp_read_bios_from_vram(adev); + else + return amdgpu_asic_read_disabled_bios(adev); +} +#else /*__FBSD__*/ +static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) +{ + return false; +} +#endif /*__FBSD__*/ + +#ifdef CONFIG_ACPI +static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) +{ + struct acpi_table_header *hdr; + acpi_size tbl_size; + UEFI_ACPI_VFCT *vfct; + unsigned offset; + + if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr))) + return false; +#ifdef __FreeBSD__ + tbl_size = hdr->Length; +#else + tbl_size = hdr->length; +#endif + if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { + DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); + return false; + } + + vfct = (UEFI_ACPI_VFCT *)hdr; + offset = vfct->VBIOSImageOffset; + + while (offset < tbl_size) { + GOP_VBIOS_CONTENT *vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + offset); + VFCT_IMAGE_HEADER *vhdr = &vbios->VbiosHeader; + + offset += sizeof(VFCT_IMAGE_HEADER); + if (offset > tbl_size) { + DRM_ERROR("ACPI VFCT image header truncated\n"); + return false; + } + + offset += vhdr->ImageLength; + if (offset > tbl_size) { + DRM_ERROR("ACPI VFCT image truncated\n"); + return false; + } + + if (vhdr->ImageLength && +#ifndef __FBSD__ + vhdr->PCIBus == adev->pdev->bus->number && +#else + vhdr->PCIBus == PCI_BUS_NUM(adev->pdev->devfn) && +#endif + vhdr->PCIDevice == PCI_SLOT(adev->pdev->devfn) && + vhdr->PCIFunction == PCI_FUNC(adev->pdev->devfn) && + vhdr->VendorID == adev->pdev->vendor && + vhdr->DeviceID == adev->pdev->device) { + adev->bios = kmemdup(&vbios->VbiosContent, + vhdr->ImageLength, + GFP_KERNEL); + + if (!check_atom_bios(adev->bios, vhdr->ImageLength)) { + kfree(adev->bios); + return false; + } + adev->bios_size = vhdr->ImageLength; + return true; + } + } + + DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); + return false; +} +#else +static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) +{ + return false; +} +#endif + +#ifdef __FBSD__ +static +#endif +bool amdgpu_get_bios(struct amdgpu_device *adev) +{ + if (amdgpu_atrm_get_bios(adev)) + goto success; + + if (amdgpu_acpi_vfct_bios(adev)) + goto success; + + if (igp_read_bios_from_vram(adev)) + goto success; + + if (amdgpu_read_bios(adev)) + goto success; + + if (amdgpu_read_bios_from_rom(adev)) + goto success; + + if (amdgpu_read_disabled_bios(adev)) + goto success; + + if (amdgpu_read_platform_bios(adev)) + goto success; + + DRM_ERROR("Unable to locate a BIOS ROM\n"); + return false; + +success: +#ifndef __FBSD__ + adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false; +#endif + return true; +} + +#ifdef __FBSD__ +int +vm_amdgpu_get_vbios(struct vm *vm, int bus, int slot, int func, + uint16_t vendor, uint16_t dev_id, void *bios, uint64_t *size) +{ + int error; + + error = 0; + + struct pci_dev pdev; + struct amdgpu_device adev; + + adev.pdev = &pdev; + pdev.dev.bsddev = pci_find_bsf(bus, slot, func); + pdev.devfn = PCI_DEVFN(bus, slot, func); + pdev.vendor = vendor; + pdev.device = dev_id; + + if (!amdgpu_get_bios(&adev)) + return ENOENT; + + if (bios) { + *size = min(adev.bios_size, *size); + error = copyout(adev.bios, bios, *size); + } else if (size) { + *size = adev.bios_size; + } + + kfree(adev.bios); + + return (error); +} +#endif /*__FBSD__*/ Index: sys/amd64/vmm/io/ppt.h =================================================================== --- sys/amd64/vmm/io/ppt.h +++ sys/amd64/vmm/io/ppt.h @@ -34,6 +34,8 @@ int ppt_unassign_all(struct vm *vm); int ppt_map_mmio(struct vm *vm, int bus, int slot, int func, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); +int ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func, + vm_paddr_t gpa, size_t len); int ppt_setup_msi(struct vm *vm, int vcpu, int bus, int slot, int func, uint64_t addr, uint64_t msg, int numvec); int ppt_setup_msix(struct vm *vm, int vcpu, int bus, int slot, int func, Index: sys/amd64/vmm/io/ppt.c =================================================================== --- sys/amd64/vmm/io/ppt.c +++ sys/amd64/vmm/io/ppt.c @@ -224,7 +224,7 @@ } static void -ppt_unmap_mmio(struct vm *vm, struct pptdev *ppt) +ppt_unmap_all_mmio(struct vm *vm, struct pptdev *ppt) { int i; struct pptseg *seg; @@ -412,7 +412,7 @@ pci_save_state(ppt->dev); ppt_pci_reset(ppt->dev); pci_restore_state(ppt->dev); - ppt_unmap_mmio(vm, ppt); + ppt_unmap_all_mmio(vm, ppt); ppt_teardown_msi(ppt); ppt_teardown_msix(ppt); iommu_remove_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev)); @@ -466,6 +466,35 @@ return (ENOSPC); } +int +ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func, + vm_paddr_t gpa, size_t len) +{ + int i, error; + struct pptseg *seg; + struct pptdev *ppt; + + ppt = ppt_find(bus, slot, func); + if (ppt != NULL) { + if (ppt->vm != vm) + return (EBUSY); + + for (i = 0; i < MAX_MMIOSEGS; i++) { + seg = &ppt->mmio[i]; + if (seg->gpa == gpa && seg->len == len) { + error = vm_unmap_mmio(vm, seg->gpa, seg->len); + if (error == 0) { + seg->gpa = 0; + seg->len = 0; + } + return (error); + } + } + return (ENOENT); + } + return (ENOENT); +} + static int pptintr(void *arg) { Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c +++ sys/amd64/vmm/vmm.c @@ -134,7 +134,7 @@ bool sysmem; struct vm_object *object; }; -#define VM_MAX_MEMSEGS 3 +#define VM_MAX_MEMSEGS 4 struct mem_map { vm_paddr_t gpa; @@ -797,6 +797,24 @@ return (0); } +int +vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) +{ + struct mem_map *m; + int i; + + for (i = 0; i < VM_MAX_MEMMAPS; i++) { + m = &vm->mem_maps[i]; + if (m->gpa == gpa && m->len == len && + (m->flags & VM_MEMMAP_F_IOMMU) == 0) { + vm_free_memmap(vm, i); + return (0); + } + } + + return (EINVAL); +} + int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) Index: sys/amd64/vmm/vmm_dev.c =================================================================== --- sys/amd64/vmm/vmm_dev.c +++ sys/amd64/vmm/vmm_dev.c @@ -60,6 +60,7 @@ #include #include +#include "amd/amdgpu_bios.h" #include "vmm_lapic.h" #include "vmm_stat.h" #include "vmm_mem.h" @@ -366,6 +367,7 @@ struct vm_capability *vmcap; struct vm_pptdev *pptdev; struct vm_pptdev_mmio *pptmmio; + struct vm_vbios *vbios; struct vm_pptdev_msi *pptmsi; struct vm_pptdev_msix *pptmsix; struct vm_nmi *vmnmi; @@ -381,6 +383,7 @@ struct vm_rtc_time *rtctime; struct vm_rtc_data *rtcdata; struct vm_memmap *mm; + struct vm_munmap *mu; struct vm_cpu_topology *topology; struct vm_readwrite_kernemu_device *kernemu; uint64_t *regvals; @@ -435,6 +438,7 @@ break; case VM_MAP_PPTDEV_MMIO: + case VM_UNMAP_PPTDEV_MMIO: case VM_BIND_PPTDEV: case VM_UNBIND_PPTDEV: #ifdef COMPAT_FREEBSD12 @@ -442,6 +446,7 @@ #endif case VM_ALLOC_MEMSEG: case VM_MMAP_MEMSEG: + case VM_MUNMAP_MEMSEG: case VM_REINIT: /* * ioctls that operate on the entire virtual machine must @@ -525,6 +530,17 @@ pptmmio->func, pptmmio->gpa, pptmmio->len, pptmmio->hpa); break; + case VM_UNMAP_PPTDEV_MMIO: + pptmmio = (struct vm_pptdev_mmio *)data; + error = ppt_unmap_mmio(sc->vm, pptmmio->bus, pptmmio->slot, + pptmmio->func, pptmmio->gpa, pptmmio->len); + break; + case VM_GET_VBIOS: + vbios = (struct vm_vbios *)data; + // currently only amd cpus are supported + error = vm_amdgpu_get_vbios(sc->vm, vbios->bus, vbios->slot, vbios->func, + vbios->vendor, vbios->dev_id, vbios->bios, &vbios->size); + break; case VM_BIND_PPTDEV: pptdev = (struct vm_pptdev *)data; error = vm_assign_pptdev(sc->vm, pptdev->bus, pptdev->slot, @@ -649,6 +665,10 @@ sizeof(((struct vm_memseg_fbsd12 *)0)->name)); break; #endif + case VM_MUNMAP_MEMSEG: + mu = (struct vm_munmap *)data; + error = vm_munmap_memseg(sc->vm, mu->gpa, mu->len); + break; case VM_ALLOC_MEMSEG: error = alloc_memseg(sc, (struct vm_memseg *)data, sizeof(((struct vm_memseg *)0)->name)); Index: sys/dev/pci/pcireg.h =================================================================== --- sys/dev/pci/pcireg.h +++ sys/dev/pci/pcireg.h @@ -1098,3 +1098,9 @@ #define PCIM_OSC_CTL_PCIE_PME 0x04 /* PCIe Native Power Mgt Events */ #define PCIM_OSC_CTL_PCIE_AER 0x08 /* PCIe Advanced Error Reporting */ #define PCIM_OSC_CTL_PCIE_CAP_STRUCT 0x10 /* Various Capability Structures */ + +/* + * PCI Vendors + */ +#define PCI_VENDOR_INTEL 0x8086 +#define PCI_VENDOR_AMD 0x1002 Index: sys/modules/vmm/Makefile =================================================================== --- sys/modules/vmm/Makefile +++ sys/modules/vmm/Makefile @@ -56,7 +56,8 @@ npt.c \ ivrs_drv.c \ amdvi_hw.c \ - svm_msr.c + svm_msr.c \ + amdgpu_bios.c .if ${KERN_OPTS:MBHYVE_SNAPSHOT} != "" SRCS+= vmm_snapshot.c Index: usr.sbin/bhyve/Makefile =================================================================== --- usr.sbin/bhyve/Makefile +++ usr.sbin/bhyve/Makefile @@ -39,15 +39,18 @@ net_backends.c \ net_utils.c \ pci_ahci.c \ + pci_apu-d.c \ pci_e82545.c \ pci_emul.c \ pci_hda.c \ pci_fbuf.c \ + pci_gvt-d.c \ pci_hostbridge.c \ pci_irq.c \ pci_lpc.c \ pci_nvme.c \ pci_passthru.c \ + pci_rom_alloc.c \ pci_virtio_9p.c \ pci_virtio_block.c \ pci_virtio_console.c \ Index: usr.sbin/bhyve/pci_apu-d.c =================================================================== --- /dev/null +++ usr.sbin/bhyve/pci_apu-d.c @@ -0,0 +1,116 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pci_passthru.h" + +#define VBIOS_ADDR 0xC0000 + +#define max(a, b) (((a) > (b)) ? (a) : (b)) + +int +apu_d_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) +{ + int error; + struct passthru_softc *sc; + + error = 0; + + sc = pi->pi_arg; + + uint16_t vendor, dev_id; + uint64_t bios_size; + vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 0x02); + dev_id = read_config(&sc->psc_sel, PCIR_DEVICE, 0x02); + + /* get VBIOS size */ + if ((error = vm_get_vbios(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, + sc->psc_sel.pc_func, vendor, dev_id, NULL, &bios_size)) != 0) { + warnx("vm_get_vbios: %x", errno); + goto done; + } + + /* + * round up size to a power of two + * check in descendig order to avoid endless loop + */ + pi->pi_vbios.len = 1ULL << 63; + while (pi->pi_vbios.len > bios_size) { + pi->pi_vbios.len >>= 1; + } + pi->pi_vbios.len <<= 1; + /* ROM size should be greater than 2 KB */ + pi->pi_vbios.len = max(pi->pi_vbios.len, (~PCIM_BIOS_ADDR_MASK) + 1); + + pi->pi_vbios.gpa = VBIOS_ADDR; + pi->pi_vbios.hpa = (uint64_t)vm_create_devmem( + ctx, VM_VIDEOBIOS, "videobios", pi->pi_vbios.len); + if ((void *)pi->pi_vbios.hpa == MAP_FAILED) { + warnx("vm_create_devmem: %x", errno); + error = -1; + goto done; + } + + /* get VBIOS */ + if ((error = vm_get_vbios(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, + sc->psc_sel.pc_func, vendor, dev_id, (void *)pi->pi_vbios.hpa, + &bios_size)) != 0) { + warnx("vm_get_vbios: %x", errno); + goto done; + } + + /* assign a ROM to this device */ + if ((error = pci_emul_alloc_rom( + pi, VM_VIDEOBIOS, pi->pi_vbios.gpa, pi->pi_vbios.len)) != 0) { + warnx("pci_emul_alloc_rom: %x", error); + goto done; + } + +done: + return (error); +} Index: usr.sbin/bhyve/pci_emul.h =================================================================== --- usr.sbin/bhyve/pci_emul.h +++ usr.sbin/bhyve/pci_emul.h @@ -40,6 +40,8 @@ #include +#include + #define PCI_BARMAX PCIR_MAX_BAR_0 /* BAR registers in a Type 0 header */ struct vmctx; @@ -73,6 +75,9 @@ struct pci_devinst *pi, int baridx, uint64_t offset, int size); + void (*pe_baraddr)(struct vmctx *ctx, struct pci_devinst *pi, + int baridx, int enabled, uint64_t address); + /* Save/restore device state */ int (*pe_snapshot)(struct vm_snapshot_meta *meta); int (*pe_pause)(struct vmctx *ctx, struct pci_devinst *pi); @@ -85,13 +90,15 @@ PCIBAR_IO, PCIBAR_MEM32, PCIBAR_MEM64, - PCIBAR_MEMHI64 + PCIBAR_MEMHI64, + PCIBAR_ROM, }; struct pcibar { enum pcibar_type type; /* io or memory */ uint64_t size; uint64_t addr; + uint8_t lobits; }; #define PI_NAMESZ 40 @@ -116,6 +123,12 @@ PENDING }; +struct pci_vbiosemu { + uint64_t hpa; + uint64_t len; + uint64_t gpa; +}; + struct pci_devinst { struct pci_devemu *pi_d; struct vmctx *pi_vmctx; @@ -158,6 +171,11 @@ u_char pi_cfgdata[PCI_REGMAX + 1]; struct pcibar pi_bar[PCI_BARMAX + 1]; + struct pcibar pi_rom_bar; + int pi_rom_segment; + int pi_rom_enabled; + + struct pci_vbiosemu pi_vbios; }; struct msicap { @@ -221,6 +239,9 @@ void pci_callback(void); int pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type, uint64_t size); +uint64_t pci_emul_alloc_mmio(enum pcibar_type type, uint64_t size, uint64_t mask); +int pci_emul_alloc_rom(struct pci_devinst *pdi, int segid, + uint32_t addr, uint32_t size); int pci_emul_add_msicap(struct pci_devinst *pi, int msgnum); int pci_emul_add_pciecap(struct pci_devinst *pi, int pcie_device_type); void pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes, Index: usr.sbin/bhyve/pci_emul.c =================================================================== --- usr.sbin/bhyve/pci_emul.c +++ usr.sbin/bhyve/pci_emul.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -461,10 +462,12 @@ static void modify_bar_registration(struct pci_devinst *pi, int idx, int registration) { + struct pci_devemu *pe; int error; struct inout_port iop; struct mem_range mr; + pe = pi->pi_d; switch (pi->pi_bar[idx].type) { case PCIBAR_IO: bzero(&iop, sizeof(struct inout_port)); @@ -478,6 +481,9 @@ error = register_inout(&iop); } else error = unregister_inout(&iop); + if (pe->pe_baraddr != NULL) + (*pe->pe_baraddr)(pi->pi_vmctx, pi, idx, registration, + pi->pi_bar[idx].addr); break; case PCIBAR_MEM32: case PCIBAR_MEM64: @@ -493,6 +499,9 @@ error = register_mem(&mr); } else error = unregister_mem(&mr); + if (pe->pe_baraddr != NULL) + (*pe->pe_baraddr)(pi->pi_vmctx, pi, idx, registration, + pi->pi_bar[idx].addr); break; default: error = EINVAL; @@ -577,6 +586,72 @@ register_bar(pi, idx); } +int +pci_emul_alloc_rom(struct pci_devinst *pdi, int segid, + uint32_t addr, uint32_t size) +{ + /* The segment ID must be valid */ + if (segid <= VM_SYSMEM || segid >= VM_NOTHING) + return (-1); + + /* The size must be a power of two >= 4 KiB */ + if ((size & (size - 1)) != 0 || size < 4096) + return (-2); + + /* The address must be at a multiple of size */ + if ((addr & (size - 1)) != 0) + return (-3); + + pdi->pi_rom_segment = segid; + pdi->pi_rom_bar.type = PCIBAR_ROM; + pdi->pi_rom_bar.addr = addr; + pdi->pi_rom_bar.size = size; + pdi->pi_rom_enabled = 0; + + pci_set_cfgdata32(pdi, PCIR_BIOS, addr); + + return (0); +} + +static int +update_rom_address(struct pci_devinst *pdi) +{ + uint32_t bar; + int enable; + + /* Ignore if we don't have a ROM */ + if (pdi->pi_rom_bar.size == 0) + return (0); + + /* Check if ROM is enabled */ + bar = pci_get_cfgdata32(pdi, PCIR_BIOS); + enable = (bar & PCIM_BIOS_ENABLE) ? 1 : 0; + + + /* Something has changed; unmap the segment if it's mapped */ + if (pdi->pi_rom_enabled) { + vm_munmap_memseg(pdi->pi_vmctx, pdi->pi_rom_bar.addr, pdi->pi_rom_bar.size); + } + + pdi->pi_rom_enabled = enable && memen(pdi); + + /* Map the segment only if both ROM and global Memory Space are on */ + if (pdi->pi_rom_enabled && pdi->pi_rom_bar.addr != 0) { + int error; + + error = vm_mmap_memseg(pdi->pi_vmctx, pdi->pi_rom_bar.addr, + pdi->pi_rom_segment, 0, pdi->pi_rom_bar.size, + PROT_READ | PROT_EXEC); + + if (error) { + perror("ROM mapping failed"); + return (-1); + } + } + + return (0); +} + int pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type, uint64_t size) @@ -673,6 +748,40 @@ return (0); } +/* mask should be a power of 2 minus 1 (e.g. 0x000FFFFF) */ +uint64_t +pci_emul_alloc_mmio(enum pcibar_type type, uint64_t size, uint64_t mask) +{ + uint64_t *baseptr, limit; + + switch (type) { + case PCIBAR_IO: + baseptr = &pci_emul_iobase; + limit = PCI_EMUL_IOLIMIT; + break; + case PCIBAR_MEM32: + baseptr = &pci_emul_membase32; + limit = PCI_EMUL_MEMLIMIT32; + break; + case PCIBAR_MEM64: + baseptr = &pci_emul_membase64; + limit = PCI_EMUL_MEMLIMIT64; + break; + default: + return 0; + } + + /* align base */ + const uint64_t base = (*baseptr + mask) & ~mask; + + if (base + size > limit) + return 0; + + *baseptr = base + size; + + return base; +} + #define CAP_START_OFFSET 0x40 static int pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen) @@ -1738,6 +1847,9 @@ } } + if ((changed & PCIM_CMD_MEMEN)) + update_rom_address(pi); + /* * If INTx has been unmasked and is pending, assert the * interrupt. @@ -1901,6 +2013,32 @@ } pci_set_cfgdata32(pi, coff, bar); + /* + * The BAR register for an Expansion ROM is slightly different. + */ + } else if (coff >= PCIR_BIOS && coff < PCIR_BIOS + 4) { + + /* Well, it's ignored for ordinary BAR registers... */ + if (bytes != 4 || (coff & 0x3) != 0) + return; + + /* Do not enable ROM if we don't have a ROM */ + if (pi->pi_rom_bar.size == 0) + *eax &= ~PCIM_BIOS_ENABLE; + + if ((*eax & PCIM_BIOS_ADDR_MASK) == PCIM_BIOS_ADDR_MASK) { + /* guest wants to read size of ROM */ + pi->pi_rom_bar.addr = 0; + pci_set_cfgdata32(pi, coff, (~(pi->pi_rom_bar.size - 1)) | (*eax & PCIM_BIOS_ENABLE)); + } + else { + /* guest sets address of ROM */ + pi->pi_rom_bar.addr = *eax & (~(pi->pi_rom_bar.size - 1)); + pci_set_cfgdata32(pi, coff, pi->pi_rom_bar.addr | (*eax & PCIM_BIOS_ENABLE)); + } + + update_rom_address(pi); + } else if (pci_emul_iscap(pi, coff)) { pci_emul_capwrite(pi, coff, bytes, *eax, 0, 0); } else if (coff >= PCIR_COMMAND && coff < PCIR_REVID) { Index: usr.sbin/bhyve/pci_fbuf.c =================================================================== --- usr.sbin/bhyve/pci_fbuf.c +++ usr.sbin/bhyve/pci_fbuf.c @@ -42,6 +42,7 @@ #include #include +#include #include #include @@ -224,6 +225,30 @@ return (value); } +static void +pci_fbuf_baraddr(struct vmctx *ctx, struct pci_devinst *pi, int baridx, + int enabled, uint64_t address) +{ + struct pci_fbuf_softc *sc; + int prot; + + if (baridx != 1) + return; + + sc = pi->pi_arg; + if (!enabled && sc->fbaddr != 0) { + if (vm_munmap_memseg(ctx, sc->fbaddr, FB_SIZE) != 0) + warnx("pci_fbuf: munmap_memseg failed"); + sc->fbaddr = 0; + } else if (sc->fb_base != NULL && sc->fbaddr == 0) { + prot = PROT_READ | PROT_WRITE; + if (vm_mmap_memseg(ctx, address, VM_FRAMEBUFFER, 0, FB_SIZE, prot) != 0) + warnx("pci_fbuf: mmap_memseg failed"); + sc->fbaddr = address; + } +} + + static int pci_fbuf_parse_opts(struct pci_fbuf_softc *sc, char *opts) { @@ -462,5 +487,6 @@ #ifdef BHYVE_SNAPSHOT .pe_snapshot = pci_fbuf_snapshot, #endif + .pe_baraddr = pci_fbuf_baraddr, }; PCI_EMUL_SET(pci_fbuf); Index: usr.sbin/bhyve/pci_gvt-d.c =================================================================== --- /dev/null +++ usr.sbin/bhyve/pci_gvt-d.c @@ -0,0 +1,387 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "inout.h" +#include "pci_passthru.h" + +#define MB (1024 * 1024UL) + +/* + * PCI definitions + */ +#define PCIR_GGC 0x50 /* GMCH Graphics Control register */ +#define PCIR_BDSM 0x5C /* Base Data of Stolen Memory register */ +#define PCIR_ASLS_CTL 0xFC /* Opregion start address register */ +#define PCIM_GEN5_75_GGC_GMS_MASK \ + 0x000000F0 /* Bits 7:4 contain Graphics Mode Select */ +#define PCIM_GEN6_GGC_GMS_MASK \ + 0x000000F8 /* Bits 7:3 contain Graphics Mode Select */ +#define PCIM_GEN8_GGC_GMS_MASK \ + 0x0000FF00 /* Bits 15:8 contain Graphics Mode Select */ +#define PCIM_BDSM_GSM_MASK \ + 0xFFF00000 /* Bits 31:20 contain base address of gsm */ +#define PCIM_ASLS_OPREGION_MASK 0xFFFFF000 /* Opregion is 4k aligned */ +#define GPU_OPREGION_LEN 0x00004000 /* Size of Opregion (16 KB) */ + +/* + * Known device ids for different generations of Intel graphics + * see https://www.graphics-drivers.eu/intel-pci-hardware-id-string.html for + * complete list + */ +/* Westmere & Ironlake */ +static const uint16_t igd_devid_gen5_75[] = { 0x0042, 0x0046 }; +/* Sandy Bridge */ +static const uint16_t igd_devid_gen6[] = { 0x0102, 0x0106, 0x010A, 0x0112, + 0x0116, 0x0122, 0x0126 }; +/* Ivy Bridge */ +static const uint16_t igd_devid_gen7[] = { 0x0152, 0x0156, 0x015A, 0x0162, + 0x0166, 0x016A }; +/* Haswsell */ +static const uint16_t igd_devid_gen7_5[] = { 0x0402, 0x0406, 0x040A, 0x0412, + 0x0416, 0x041A, 0x041E, 0x0A06, 0x0A0E, 0x0A16, 0x0A1E, 0x0A26, 0x0A2E, + 0x0C02, 0x0C06, 0x0C12, 0x0C16, 0x0C22, 0x0C26, 0x0D06, 0x0D16, 0x0D22, + 0x0D26 }; +/* Broadwell */ +static const uint16_t igd_devid_gen8[] = { 0x1606, 0x160E, 0x1612, 0x1616, + 0x161A, 0x161E, 0x1622, 0x1626, 0x162A, 0x162B }; +/* Skylake */ +static const uint16_t igd_devid_gen9[] = { 0x1902, 0x1906, 0x190B, 0x190E, + 0x1912, 0x1913, 0x1916, 0x1917, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, + 0x1926, 0x1927, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D }; +/* Kaby Lake & Whiskey Lake & Amber Lake & Coffee Lake & Comet Lake */ +static const uint16_t igd_devid_gen9_5[] = { 0x3E90, 0x3E91, 0x3E92, 0x3E93, + 0x3E94, 0x3E96, 0x3E98, 0x3E99, 0x3E9A, 0x3E9B, 0x3E9C, 0x3EA0, 0x3EA1, + 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8, 0x3EA9, 0x5902, 0x5906, 0x590B, 0x5912, + 0x5916, 0x5917, 0x591B, 0x591C, 0x591D, 0x591E, 0x5921, 0x5926, 0x5927, + 0x87C0, 0x87CA, 0x9B21, 0x9B41, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, + 0x9BAC, 0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCC, 0x9BE6, + 0x9BF6 }; + +static int +array_contains(const uint16_t *array, uint64_t elements, uint16_t item) +{ + for (uint64_t i = 0; i < elements; ++i) + if (array[i] == item) + return 1; + return 0; +} + +#define IGD_FUNC_IS_IGD_GEN(gen) \ + static int igd_gen##gen##_is_igd_gen(int devid) \ + { \ + return array_contains(igd_devid_gen##gen, \ + sizeof(igd_devid_gen##gen) / sizeof(uint16_t), devid); \ + } + +/* GVT-d definitions */ +#define GVT_D_MAP_OPREGION 0 +#define GVT_D_MAP_GSM 1 + +/* + * Handler for passthru of igd + * + * Keep it as struct instead of a single function pointer, since new + * generations of Intel graphics could need other funcs. + * e.g. Intel Elkhartlake and Intel Tigerlake: + * They will need different handling for GSM and Opregion (See ACRN-Hypervisor + * ) + */ +struct igd_funcs { + int (*is_igd_gen)(int devid); + uint64_t (*get_gsm_len)(struct vmctx *ctx, struct passthru_softc *sc); +}; + +/* Handler for igd of gen5.75 (Westmere & Ironlake) */ +IGD_FUNC_IS_IGD_GEN(5_75); + +static uint64_t +igd_gen5_75_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN5_75_GGC_GMS_MASK) >> + 4; /* Bits 7:4 contain Graphics Mode Select */ + switch (gms_val) { + case 0x05: + return 32 * MB; + case 0x06: + return 48 * MB; + case 0x07: + return 64 * MB; + case 0x08: + return 128 * MB; + case 0x09: + return 256 * MB; + case 0x0A: + return 96 * MB; + case 0x0B: + return 160 * MB; + case 0x0C: + return 224 * MB; + case 0x0D: + return 352 * MB; + } + + warnx("Unknown Graphic Mode (%x)", gms_val); + return 0; +} + +/* Handler for igd of gen6 (Sandy Bridge) */ +IGD_FUNC_IS_IGD_GEN(6); + +static uint64_t +igd_gen6_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN6_GGC_GMS_MASK) >> + 3; /* Bits 7:3 contain Graphics Mode Select */ + if (gms_val <= 0x10) + return gms_val * 32 * MB; + + warnx("Unknown Graphic Mode (%x)", gms_val); + return 0; +} + +/* Handler for igd of gen7 (Ivy Bridge) */ +IGD_FUNC_IS_IGD_GEN(7); + +/* Handler for igd of gen7.5 (Haswell) */ +IGD_FUNC_IS_IGD_GEN(7_5); + +/* Handler for igd of gen8 (Broadwell) */ +IGD_FUNC_IS_IGD_GEN(8); + +static uint64_t +igd_gen8_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN8_GGC_GMS_MASK) >> + 8; /* Bits 15:8 contain Graphics Mode Select */ + if ((gms_val <= 0x10) || (gms_val == 0x20) || (gms_val == 0x30) || + (gms_val == 0x3F)) + return gms_val * 32 * MB; + + warnx("Unknown Graphic Mode (%x)", gms_val); + return 0; +} + +/* Handler for igd of gen9 (Skylake) */ +IGD_FUNC_IS_IGD_GEN(9); + +static uint64_t +igd_gen9_get_gsm_len(struct vmctx *ctx, struct passthru_softc *sc) +{ + uint16_t ggc_val = read_config(&sc->psc_sel, PCIR_GGC, 2); + uint8_t gms_val = (ggc_val & PCIM_GEN8_GGC_GMS_MASK) >> + 8; /* Bits 15:8 contain Graphics Mode Select */ + if ((gms_val <= 0x10) || (gms_val == 0x20) || (gms_val == 0x30) || + (gms_val == 0x40)) + return gms_val * 32 * MB; + else if (gms_val >= 0xF0 && gms_val <= 0xFE) + return gms_val * 4 * MB; + + warnx("Unknown Graphic Mode (%x)", gms_val); + return 0; +} + +/* + * Handler for igd of gen9.5 (Kaby Lake & Whiskey Lake & Amber Lake & Coffee + * Lake & Comet Lake) + */ +IGD_FUNC_IS_IGD_GEN(9_5); + +/* Westmere & Ironlake */ +static const struct igd_funcs igd_gen5_75 = { + .is_igd_gen = igd_gen5_75_is_igd_gen, + .get_gsm_len = igd_gen5_75_get_gsm_len +}; +/* Sandy Bridge */ +static const struct igd_funcs igd_gen6 = { .is_igd_gen = igd_gen6_is_igd_gen, + .get_gsm_len = igd_gen6_get_gsm_len }; +/* Ivy Bridge */ +static const struct igd_funcs igd_gen7 = { .is_igd_gen = igd_gen7_is_igd_gen, + .get_gsm_len = igd_gen6_get_gsm_len }; +/* Haswell */ +static const struct igd_funcs igd_gen7_5 = { + .is_igd_gen = igd_gen7_5_is_igd_gen, + .get_gsm_len = igd_gen6_get_gsm_len +}; +/* Broadwell */ +static const struct igd_funcs igd_gen8 = { .is_igd_gen = igd_gen8_is_igd_gen, + .get_gsm_len = igd_gen8_get_gsm_len }; +/* Skylake */ +static const struct igd_funcs igd_gen9 = { .is_igd_gen = igd_gen9_is_igd_gen, + .get_gsm_len = igd_gen9_get_gsm_len }; +/* Kaby Lake & Whiskey Lake & Amber Lake & Coffee Lake & Comet Lake */ +static const struct igd_funcs igd_gen9_5 = { + .is_igd_gen = igd_gen9_5_is_igd_gen, + .get_gsm_len = igd_gen9_get_gsm_len +}; + +static const struct igd_funcs *igd_gen_map[] = { &igd_gen5_75, &igd_gen6, + &igd_gen7, &igd_gen7_5, &igd_gen8, &igd_gen9, &igd_gen9_5 }; + +static const struct igd_funcs * +get_igd_funcs(const uint16_t devid) +{ + for (int i = 0; i < sizeof(igd_gen_map) / sizeof(struct igd_funcs *); + ++i) { + if (igd_gen_map[i]->is_igd_gen(devid)) + return igd_gen_map[i]; + } + return NULL; +} + +int +gvt_d_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) +{ + int error; + struct passthru_softc *sc; + + sc = pi->pi_arg; + + /* check vendor == Intel */ + const uint16_t dev_vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 2); + if (dev_vendor != 0x8086) { + warnx("Unknown vendor (%x) of igd", dev_vendor); + return -ENODEV; + } + + /* check if device is a display device */ + if (read_config(&sc->psc_sel, PCIR_CLASS, 1) != PCIC_DISPLAY) { + warnx("%s is no display device", pi->pi_name); + return -ENODEV; + } + + /* Get IGD funcs */ + const struct igd_funcs *igd = get_igd_funcs( + read_config(&sc->psc_sel, PCIR_DEVICE, 2)); + if (igd == NULL) { + warnx("Unsupported igd-device (%x)", + read_config(&sc->psc_sel, PCIR_DEVICE, 2)); + return -ENODEV; + } + + struct passthru_mmio_mapping *opregion = + &sc->psc_mmio_map[GVT_D_MAP_OPREGION]; + struct passthru_mmio_mapping *gsm = &sc->psc_mmio_map[GVT_D_MAP_GSM]; + + /* Get Opregion length */ + opregion->len = GPU_OPREGION_LEN; + /* Get Opregion HPA */ + opregion->hpa = read_config(&sc->psc_sel, PCIR_ASLS_CTL, 4) & + PCIM_ASLS_OPREGION_MASK; + /* Get Graphics Stolen Memory len */ + gsm->len = igd->get_gsm_len(ctx, sc); + /* Get Graphics Stolen Memory HPA */ + gsm->hpa = read_config(&sc->psc_sel, PCIR_BDSM, 4) & PCIM_BDSM_GSM_MASK; + + if (opregion->len == 0 || gsm->len == 0) { + warnx("Could not determine size of opregion or gsm"); + return -ENODEV; + } + + /* Allocate Opregion and GSM in guest space */ + opregion->gpa = pci_emul_alloc_mmio( + PCIBAR_MEM32, opregion->len, ~PCIM_ASLS_OPREGION_MASK); + gsm->gpa = pci_emul_alloc_mmio( + PCIBAR_MEM32, gsm->len, ~PCIM_BDSM_GSM_MASK); + if (opregion->gpa == 0 || gsm->gpa == 0) { + error = -ENOMEM; + goto failed_opregion; + } + + /* Write address of Opregion and GSM into PCI register */ + /* Set Opregion GPA */ + uint32_t asls_val = read_config(&sc->psc_sel, PCIR_ASLS_CTL, 4); + pci_set_cfgdata32(sc->psc_pi, PCIR_ASLS_CTL, + opregion->gpa | (asls_val & ~PCIM_ASLS_OPREGION_MASK)); + /* Set Graphics Stolen Memory GPA */ + uint32_t bdsm_val = read_config(&sc->psc_sel, PCIR_BDSM, 4); + pci_set_cfgdata32( + sc->psc_pi, PCIR_BDSM, gsm->gpa | (bdsm_val & ~PCIM_BDSM_GSM_MASK)); + + /* Map Opregion and GSM into guest space */ + if ((error = passthru_modify_pptdev_mmio( + ctx, sc, opregion, PT_MAP_PPTDEV_MMIO)) != 0) + goto failed_opregion; + if ((error = passthru_modify_pptdev_mmio( + ctx, sc, gsm, PT_MAP_PPTDEV_MMIO)) != 0) + goto failed_gsm; + + /* Protect PCI register */ + set_pcir_prot(sc, PCIR_ASLS_CTL, 0x04, PPT_PCIR_PROT_NA); + set_pcir_prot(sc, PCIR_BDSM, 0x04, PPT_PCIR_PROT_NA); + + return (0); + +failed_opregion: + opregion->gpa = 0; +failed_gsm: + gsm->gpa = 0; + return error; +} + +void +gvt_d_deinit(struct vmctx *ctx, struct pci_devinst *pi) +{ + struct passthru_softc *sc; + + sc = pi->pi_arg; + + struct passthru_mmio_mapping *gsm = &sc->psc_mmio_map[GVT_D_MAP_GSM]; + struct passthru_mmio_mapping *opregion = + &sc->psc_mmio_map[GVT_D_MAP_OPREGION]; + + /* GPA is only set, if it's initialized */ + if (gsm->gpa) + passthru_modify_pptdev_mmio(ctx, sc, gsm, PT_UNMAP_PPTDEV_MMIO); + if (opregion->gpa) + passthru_modify_pptdev_mmio( + ctx, sc, opregion, PT_UNMAP_PPTDEV_MMIO); +} Index: usr.sbin/bhyve/pci_lpc.c =================================================================== --- usr.sbin/bhyve/pci_lpc.c +++ usr.sbin/bhyve/pci_lpc.c @@ -33,9 +33,13 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include +#include +#include +#include #include #include #include @@ -83,6 +87,29 @@ static bool pctestdev_present; +#ifndef _PATH_DEVPCI +#define _PATH_DEVPCI "/dev/pci" +#endif + +static int pcifd = -1; + +static uint32_t +read_config(struct pcisel *sel, long reg, int width) +{ + struct pci_io pi; + pi.pi_sel.pc_domain = sel->pc_domain; + pi.pi_sel.pc_bus = sel->pc_bus; + pi.pi_sel.pc_dev = sel->pc_dev; + pi.pi_sel.pc_func = sel->pc_func; + pi.pi_reg = reg; + pi.pi_width = width; + + if (ioctl(pcifd, PCIOCREAD, &pi) < 0) + return (0); + + return (pi.pi_data); +} + /* * LPC device configuration is in the following form: * [,] @@ -446,6 +473,35 @@ pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_BRIDGE); pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_BRIDGE_ISA); + /* open host device */ + if (pcifd < 0) { + pcifd = open(_PATH_DEVPCI, O_RDWR, 0); + if (pcifd < 0) { + warn("failed to open %s", _PATH_DEVPCI); + return (-1); + } + } + + /* on Intel systems lpc is always connected to 0:1f.0 */ + struct pcisel sel; + sel.pc_domain = 0; + sel.pc_bus = 0; + sel.pc_dev = 0x1f; + sel.pc_func = 0; + + if (read_config(&sel, PCIR_VENDOR, 2) == PCI_VENDOR_INTEL) { + /* + * The VID, DID, REVID, SUBVID and SUBDID of igd-lpc need aligned with + * physical one. Without these physical values, GVT-d GOP driver + * couldn't work. + */ + pci_set_cfgdata16(pi, PCIR_DEVICE, read_config(&sel, PCIR_DEVICE, 2)); + pci_set_cfgdata16(pi, PCIR_VENDOR, read_config(&sel, PCIR_VENDOR, 2)); + pci_set_cfgdata8(pi, PCIR_REVID, read_config(&sel, PCIR_REVID, 1)); + pci_set_cfgdata16(pi, PCIR_SUBVEND_0, read_config(&sel, PCIR_SUBVEND_0, 2)); + pci_set_cfgdata16(pi, PCIR_SUBDEV_0, read_config(&sel, PCIR_SUBDEV_0, 2)); + } + lpc_bridge = pi; return (0); Index: usr.sbin/bhyve/pci_passthru.h =================================================================== --- /dev/null +++ usr.sbin/bhyve/pci_passthru.h @@ -0,0 +1,84 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __PCI_PASSTHRU_H__ +#define __PCI_PASSTHRU_H__ + +#include + +#include + +#include "pci_emul.h" + +struct passthru_mmio_mapping { + uint64_t gpa; + uint64_t len; + uint64_t hpa; +}; + +struct passthru_softc { + struct pci_devinst *psc_pi; + struct pcibar psc_bar[PCI_BARMAX + 1]; + struct { + int capoff; + int msgctrl; + int emulated; + } psc_msi; + struct { + int capoff; + } psc_msix; + struct pcisel psc_sel; + + struct passthru_mmio_mapping psc_mmio_map[2]; + uint8_t psc_pcir_prot_map[(PCI_REGMAX + 1) / 4]; +}; + +#define PT_MAP_PPTDEV_MMIO 1 +#define PT_UNMAP_PPTDEV_MMIO 0 + +#define PPT_PCIR_PROT_NA 0 /* No Access to physical values */ +#define PPT_PCIR_PROT_RO 1 /* Read Only access to physical values */ +#define PPT_PCIR_PROT_WO 2 /* Write Only access to physical values */ +#define PPT_PCIR_PROT_RW \ + (PPT_PCIR_PROT_RO | \ + PPT_PCIR_PROT_WO) /* Read/Write access to physical values */ +#define PPT_PCIR_PROT_MASK 0x03 + +int passthru_modify_pptdev_mmio(struct vmctx *ctx, struct passthru_softc *sc, + struct passthru_mmio_mapping *map, int registration); +uint32_t read_config(const struct pcisel *sel, long reg, int width); +void write_config(const struct pcisel *sel, long reg, int width, uint32_t data); +int set_pcir_prot( + struct passthru_softc *sc, uint32_t reg, uint32_t len, uint8_t prot); +int gvt_d_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts); +void gvt_d_deinit(struct vmctx *ctx, struct pci_devinst *pi); +int apu_d_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts); + +#endif Index: usr.sbin/bhyve/pci_passthru.c =================================================================== --- usr.sbin/bhyve/pci_passthru.c +++ usr.sbin/bhyve/pci_passthru.c @@ -58,9 +58,8 @@ #include #include -#include -#include "pci_emul.h" #include "mem.h" +#include "pci_passthru.h" #ifndef _PATH_DEVPCI #define _PATH_DEVPCI "/dev/pci" @@ -83,20 +82,6 @@ static int iofd = -1; static int memfd = -1; -struct passthru_softc { - struct pci_devinst *psc_pi; - struct pcibar psc_bar[PCI_BARMAX + 1]; - struct { - int capoff; - int msgctrl; - int emulated; - } psc_msi; - struct { - int capoff; - } psc_msix; - struct pcisel psc_sel; -}; - static int msi_caplen(int msgctrl) { @@ -119,7 +104,7 @@ return (len); } -static uint32_t +uint32_t read_config(const struct pcisel *sel, long reg, int width) { struct pci_io pi; @@ -135,7 +120,7 @@ return (pi.pi_data); } -static void +void write_config(const struct pcisel *sel, long reg, int width, uint32_t data) { struct pci_io pi; @@ -149,6 +134,70 @@ (void)ioctl(pcifd, PCIOCWRITE, &pi); /* XXX */ } +int +passthru_modify_pptdev_mmio(struct vmctx *ctx, struct passthru_softc *sc, struct passthru_mmio_mapping *map, int registration) +{ + if (registration == PT_MAP_PPTDEV_MMIO) + return vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, map->gpa, map->len, map->hpa); + else + return vm_unmap_pptdev_mmio(ctx, sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, sc->psc_sel.pc_func, map->gpa, map->len); +} + +static int +passthru_modify_bar_registration(struct pci_devinst *pi, int idx, int registration) +{ + int error; + struct passthru_softc *sc; + struct passthru_mmio_mapping map; + + sc = pi->pi_arg; + + /* + * If the guest writes a new value to a 64-bit BAR, two writes are neccessary. + * vm_map_pptdev_mmio can fail in that case due to an invalid address after the first write. + * To avoid it, skip registration in that case. + */ + if ((registration == PT_MAP_PPTDEV_MMIO) && (pi->pi_bar[idx].type == PCIBAR_MEM64)) + if ((pci_get_cfgdata32(pi, PCIR_BAR(idx + 0)) == ~0U) || + (pci_get_cfgdata32(pi, PCIR_BAR(idx + 1)) == ~0U)) + return 0; + + if (idx != pci_msix_table_bar(pi)) { + map.gpa = pi->pi_bar[idx].addr; + map.len = pi->pi_bar[idx].size; + map.hpa = sc->psc_bar[idx].addr; + return passthru_modify_pptdev_mmio(pi->pi_vmctx, sc, &map, registration); + } + + /* special handling for MSI-X table */ + uint32_t table_offset, table_size; + + table_offset = rounddown2(pi->pi_msix.table_offset, 4096); + table_size = pi->pi_msix.table_offset - table_offset; + table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE; + table_size = roundup2(table_size, 4096); + + map.gpa = pi->pi_bar[idx].addr; + map.len = table_offset; + map.hpa = sc->psc_bar[idx].addr; + + /* map/unmap everything before MSI-X table */ + if (map.len > 0) + if ((error = passthru_modify_pptdev_mmio(pi->pi_vmctx, sc, &map, registration)) != 0) + return error; + + map.gpa += table_offset + table_size; + map.len = pi->pi_bar[idx].size - (table_offset + table_size); + map.hpa += table_offset + table_size; + + /* map/unmap everything behind MSI-X table */ + if (map.len > 0) + if ((error = passthru_modify_pptdev_mmio(pi->pi_vmctx, sc, &map, registration)) != 0) + return error; + + return (0); +} + #ifdef LEGACY_SUPPORT static int passthru_add_msicap(struct pci_devinst *pi, int msgnum, int nextptr) @@ -438,8 +487,8 @@ init_msix_table(struct vmctx *ctx, struct passthru_softc *sc, uint64_t base) { int b, s, f; - int error, idx; - size_t len, remaining; + int idx; + size_t remaining; uint32_t table_size, table_offset; uint32_t pba_size, pba_offset; vm_paddr_t start; @@ -501,31 +550,6 @@ } } - /* Map everything before the MSI-X table */ - if (table_offset > 0) { - len = table_offset; - error = vm_map_pptdev_mmio(ctx, b, s, f, start, len, base); - if (error) - return (error); - - base += len; - start += len; - remaining -= len; - } - - /* Skip the MSI-X table */ - base += table_size; - start += table_size; - remaining -= table_size; - - /* Map everything beyond the end of the MSI-X table */ - if (remaining > 0) { - len = remaining; - error = vm_map_pptdev_mmio(ctx, b, s, f, start, len, base); - if (error) - return (error); - } - return (0); } @@ -581,24 +605,36 @@ sc->psc_bar[i].type = bartype; sc->psc_bar[i].size = size; sc->psc_bar[i].addr = base; + sc->psc_bar[i].lobits = 0; /* Allocate the BAR in the guest I/O or MMIO space */ error = pci_emul_alloc_bar(pi, i, bartype, size); if (error) return (-1); + /* Use same prefetchable property as physical bar */ + uint8_t lobits = pci_get_cfgdata8(pi, PCIR_BAR(i)); + if (bartype == PCIBAR_MEM32 || bartype == PCIBAR_MEM64) { + if (bar.pbi_base & PCIM_BAR_MEM_PREFETCH) + lobits |= PCIM_BAR_MEM_PREFETCH; + else + lobits &= ~PCIM_BAR_MEM_PREFETCH; + pci_set_cfgdata8(pi, PCIR_BAR(i), lobits); + lobits &= ~PCIM_BAR_MEM_BASE; + } + else { + lobits |= PCIM_BAR_IO_SPACE; + pci_set_cfgdata8(pi, PCIR_BAR(i), lobits); + lobits &= ~PCIM_BAR_IO_BASE; + } + sc->psc_bar[i].lobits = lobits; + pi->pi_bar[i].lobits = lobits; + /* The MSI-X table needs special handling */ if (i == pci_msix_table_bar(pi)) { error = init_msix_table(ctx, sc, base); if (error) return (-1); - } else if (bartype != PCIBAR_IO) { - /* Map the physical BAR in the guest MMIO space */ - error = vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus, - sc->psc_sel.pc_dev, sc->psc_sel.pc_func, - pi->pi_bar[i].addr, pi->pi_bar[i].size, base); - if (error) - return (-1); } /* @@ -639,14 +675,91 @@ goto done; } - pci_set_cfgdata16(pi, PCIR_COMMAND, read_config(&sc->psc_sel, - PCIR_COMMAND, 2)); + /* sync command register */ + write_config(&sc->psc_sel, PCIR_COMMAND, 0x02, + pci_get_cfgdata16(pi, PCIR_COMMAND)); error = 0; /* success */ done: return (error); } +#define PPT_PCIR_PROT(reg) ((sc->psc_pcir_prot_map[reg / 4] >> (reg & 0x03)) & PPT_PCIR_PROT_MASK) + +int +set_pcir_prot(struct passthru_softc *sc, uint32_t reg, uint32_t len, uint8_t prot) +{ + if (reg > PCI_REGMAX || reg + len > PCI_REGMAX + 1) + return (-1); + + prot &= PPT_PCIR_PROT_MASK; + + for (int i = reg; i < reg + len; ++i) { + /* delete old prot value */ + sc->psc_pcir_prot_map[i / 4] &= ~(PPT_PCIR_PROT_MASK << (i & 0x03)); + /* set new prot value */ + sc->psc_pcir_prot_map[i / 4] |= prot << (i & 0x03); + } + + return (0); +} + +static int +is_pcir_writable(struct passthru_softc *sc, uint32_t reg) +{ + if (reg > PCI_REGMAX) + return (0); + + return ((PPT_PCIR_PROT(reg) & PPT_PCIR_PROT_WO) != 0); +} + +static int +is_pcir_readable(struct passthru_softc *sc, uint32_t reg) +{ + if (reg > PCI_REGMAX) + return (0); + + return ((PPT_PCIR_PROT(reg) & PPT_PCIR_PROT_RO) != 0); +} + +static int +passthru_init_quirks(struct vmctx *ctx, struct pci_devinst *pi, char *opts) +{ + struct passthru_softc *sc = pi->pi_arg; + + uint16_t vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 0x02); + uint8_t class = read_config(&sc->psc_sel, PCIR_CLASS, 0x01); + + /* currently only display devices have quirks */ + if (class != PCIC_DISPLAY) + return (0); + + if (vendor == PCI_VENDOR_INTEL) + return gvt_d_init(ctx, pi, opts); + if (vendor == PCI_VENDOR_AMD) + return apu_d_init(ctx, pi, opts); + + return (0); +} + +static void +passthru_deinit_quirks(struct vmctx *ctx, struct pci_devinst *pi) +{ + struct passthru_softc *sc = pi->pi_arg; + + uint16_t vendor = read_config(&sc->psc_sel, PCIR_VENDOR, 0x02); + uint8_t class = read_config(&sc->psc_sel, PCIR_CLASS, 0x01); + + /* currently only display devices have quirks */ + if (class != PCIC_DISPLAY) + return; + + if (vendor == PCI_VENDOR_INTEL) + return gvt_d_deinit(ctx, pi); + + return; +} + static int passthru_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { @@ -734,9 +847,20 @@ sc->psc_pi = pi; /* initialize config space */ - error = cfginit(ctx, pi, bus, slot, func); + if ((error = cfginit(ctx, pi, bus, slot, func)) != 0) + goto done; + + /* allow access to all PCI registers */ + if ((error = set_pcir_prot(sc, 0, PCI_REGMAX + 1, PPT_PCIR_PROT_RW)) != 0) + goto done; + + if ((error = passthru_init_quirks(ctx, pi, opts)) != 0) + goto done; + + error = 0; /* success */ done: if (error) { + passthru_deinit_quirks(ctx, pi); free(sc); vm_unassign_pptdev(ctx, bus, slot, func); } @@ -778,6 +902,38 @@ coff < sc->psc_msix.capoff + MSIX_CAPLEN); } +static int +passthru_cfgread_bar(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, + int coff, int bytes, uint32_t *rv) +{ + const int idx = (coff - PCIR_BAR(0)) / 4; + int update_idx = idx; + if (pi->pi_bar[idx].type == PCIBAR_MEMHI64) + --update_idx; + + if (pci_get_cfgdata32(pi, PCIR_BAR(idx)) != ~0U) { + /* return address of BAR */ + if (bytes == 1) + *rv = pci_get_cfgdata8(pi, coff); + else if (bytes == 2) + *rv = pci_get_cfgdata16(pi, coff); + else + *rv = pci_get_cfgdata32(pi, coff); + + return (0); + } + + /* return size of BAR */ + uint64_t size = ~(uint64_t)(pi->pi_bar[update_idx].size - 1); + size |= pi->pi_bar[update_idx].lobits; + if (pi->pi_bar[idx].type == PCIBAR_MEMHI64) + size >>= 32; + assert(bytes == 4); + *rv = size; + + return (0); +} + static int passthru_cfgread(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int coff, int bytes, uint32_t *rv) @@ -786,10 +942,23 @@ sc = pi->pi_arg; + /* skip for protected PCI registers */ + if (!is_pcir_readable(sc, coff)) + return (-1); + /* * PCI BARs and MSI capability is emulated. */ - if (bar_access(coff) || msicap_access(sc, coff)) + if (msicap_access(sc, coff)) + return (-1); + + if (bar_access(coff)) + return passthru_cfgread_bar(ctx, vcpu, pi, coff, bytes, rv); + + /* + * PCI ROM is emulated + */ + if (coff >= PCIR_BIOS && coff < PCIR_BIOS + 4) return (-1); #ifdef LEGACY_SUPPORT @@ -822,6 +991,82 @@ return (0); } +static int +passthru_cfgwrite_bar(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, + int coff, int bytes, uint32_t val) +{ + const int idx = (coff - PCIR_BAR(0)) / 4; + int update_idx = idx; + + switch (pi->pi_bar[idx].type) { + case PCIBAR_MEMHI64: + --update_idx; + case PCIBAR_IO: + case PCIBAR_MEM32: + case PCIBAR_MEM64: + { + const uint16_t cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); + if ((cmd & PCIM_CMD_MEMEN && pi->pi_bar[idx].type != PCIBAR_IO) || + (cmd & PCIM_CMD_PORTEN && pi->pi_bar[idx].type == PCIBAR_IO)) { + passthru_modify_bar_registration(pi, update_idx, 0); + } + + if (val == ~0U) { + /* guest wants to read size of BAR */ + pci_set_cfgdata32(pi, coff, ~0U); + pi->pi_bar[update_idx].addr = 0; + break; + } + + /* guest sets address of BAR */ + uint64_t mask, bar; + mask = ~(pi->pi_bar[update_idx].size - 1); + if (pi->pi_bar[idx].type == PCIBAR_MEMHI64) + mask >>= 32; + bar = val & mask; + if (pi->pi_bar[idx].type != PCIBAR_MEMHI64) + bar |= pi->pi_bar[update_idx].lobits; + pci_set_cfgdata32(pi, coff, bar); + + /* Only register BAR if it contains a valid address */ + uint32_t lo, hi; + + lo = pci_get_cfgdata32(pi, PCIR_BAR(update_idx)); + if (pi->pi_bar[update_idx].type == PCIBAR_IO) { + if ((lo & PCIM_BAR_IO_BASE) == PCIM_BAR_IO_BASE) + lo = ~0U; + else + lo &= PCIM_BAR_IO_BASE; + } else { + if ((lo & PCIM_BAR_MEM_BASE) == PCIM_BAR_MEM_BASE) + lo = ~0U; + else + lo &= PCIM_BAR_MEM_BASE; + } + + if (pi->pi_bar[update_idx].type == PCIBAR_MEM64) + hi = pci_get_cfgdata32(pi, PCIR_BAR(update_idx + 1)); + else + hi = 0; + + if (lo != ~0U && hi != ~0U) { + pi->pi_bar[update_idx].addr = (uint64_t)lo | ((uint64_t)hi << 32U); + if ((cmd & PCIM_CMD_MEMEN && pi->pi_bar[idx].type != PCIBAR_IO) || + (cmd & PCIM_CMD_PORTEN && pi->pi_bar[idx].type == PCIBAR_IO)) { + passthru_modify_bar_registration(pi, update_idx, 1); + } + } + else + pi->pi_bar[update_idx].addr = 0; + break; + } + default: + pi->pi_bar[idx].addr = 0; + break; + } + return (0); +} + static int passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int coff, int bytes, uint32_t val) @@ -832,10 +1077,20 @@ sc = pi->pi_arg; + /* skip for protected PCI registers */ + if (!is_pcir_writable(sc, coff)) + return (-1); + /* * PCI BARs are emulated */ if (bar_access(coff)) + return passthru_cfgwrite_bar(ctx, vcpu, pi, coff, bytes, val); + + /* + * PCI ROM is emulated + */ + if (coff >= PCIR_BIOS && coff < PCIR_BIOS + 4) return (-1); /* @@ -954,6 +1209,15 @@ return (val); } +static void +passthru_addr(struct vmctx *ctx, struct pci_devinst *pi, int baridx, + int enabled, uint64_t address) +{ + if (pi->pi_bar[baridx].type == PCIBAR_IO) + return; + passthru_modify_bar_registration(pi, baridx, enabled); +} + struct pci_devemu passthru = { .pe_emu = "passthru", .pe_init = passthru_init, @@ -961,5 +1225,6 @@ .pe_cfgread = passthru_cfgread, .pe_barwrite = passthru_write, .pe_barread = passthru_read, + .pe_baraddr = passthru_addr, }; PCI_EMUL_SET(passthru); Index: usr.sbin/bhyve/pci_rom_alloc.c =================================================================== --- /dev/null +++ usr.sbin/bhyve/pci_rom_alloc.c @@ -0,0 +1,63 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Beckhoff Automation GmbH & Co. KG + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR OR CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "pci_emul.h" + +#define MB (1024 * 1024UL) + +#define ROM_SIZE (16 * MB) + +static int +pci_rom_alloc_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) +{ + pci_set_cfgdata16(pi, PCIR_VENDOR, 0xBAD0); + pci_set_cfgdata16(pi, PCIR_DEVICE, 0xBAD0); + pci_set_cfgdata16( + pi, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); + + pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, ROM_SIZE); + + return (0); +} + +/* + * It's only a dummy device. + * EFI checks BARs to identify non-prefetchable MMIO Range lower than 4 GB. + * If this range is lower than 16 MB, EFI may skip ROM initialization. + */ + +struct pci_devemu pci_rom_alloc = { + .pe_emu = "rom-alloc", + .pe_init = pci_rom_alloc_init, +}; +PCI_EMUL_SET(pci_rom_alloc);