diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h index 6501baa455da..a9c73b75213b 100644 --- a/sys/amd64/include/vmm.h +++ b/sys/amd64/include/vmm.h @@ -1,812 +1,786 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 NetApp, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _VMM_H_ #define _VMM_H_ #include #include #include struct vcpu; struct vm_snapshot_meta; #ifdef _KERNEL SDT_PROVIDER_DECLARE(vmm); #endif enum vm_suspend_how { VM_SUSPEND_NONE, VM_SUSPEND_RESET, VM_SUSPEND_POWEROFF, VM_SUSPEND_HALT, VM_SUSPEND_TRIPLEFAULT, VM_SUSPEND_LAST }; /* * Identifiers for architecturally defined registers. */ enum vm_reg_name { VM_REG_GUEST_RAX, VM_REG_GUEST_RBX, VM_REG_GUEST_RCX, VM_REG_GUEST_RDX, VM_REG_GUEST_RSI, VM_REG_GUEST_RDI, VM_REG_GUEST_RBP, VM_REG_GUEST_R8, VM_REG_GUEST_R9, VM_REG_GUEST_R10, VM_REG_GUEST_R11, VM_REG_GUEST_R12, VM_REG_GUEST_R13, VM_REG_GUEST_R14, VM_REG_GUEST_R15, VM_REG_GUEST_CR0, VM_REG_GUEST_CR3, VM_REG_GUEST_CR4, VM_REG_GUEST_DR7, VM_REG_GUEST_RSP, VM_REG_GUEST_RIP, VM_REG_GUEST_RFLAGS, VM_REG_GUEST_ES, VM_REG_GUEST_CS, VM_REG_GUEST_SS, VM_REG_GUEST_DS, VM_REG_GUEST_FS, VM_REG_GUEST_GS, VM_REG_GUEST_LDTR, VM_REG_GUEST_TR, VM_REG_GUEST_IDTR, VM_REG_GUEST_GDTR, VM_REG_GUEST_EFER, VM_REG_GUEST_CR2, VM_REG_GUEST_PDPTE0, VM_REG_GUEST_PDPTE1, VM_REG_GUEST_PDPTE2, VM_REG_GUEST_PDPTE3, VM_REG_GUEST_INTR_SHADOW, VM_REG_GUEST_DR0, VM_REG_GUEST_DR1, VM_REG_GUEST_DR2, VM_REG_GUEST_DR3, VM_REG_GUEST_DR6, VM_REG_GUEST_ENTRY_INST_LENGTH, VM_REG_GUEST_FS_BASE, VM_REG_GUEST_GS_BASE, VM_REG_GUEST_KGS_BASE, VM_REG_GUEST_TPR, VM_REG_LAST }; enum x2apic_state { X2APIC_DISABLED, X2APIC_ENABLED, X2APIC_STATE_LAST }; #define VM_INTINFO_VECTOR(info) ((info) & 0xff) #define VM_INTINFO_DEL_ERRCODE 0x800 #define VM_INTINFO_RSVD 0x7ffff000 #define VM_INTINFO_VALID 0x80000000 #define VM_INTINFO_TYPE 0x700 #define VM_INTINFO_HWINTR (0 << 8) #define VM_INTINFO_NMI (2 << 8) #define VM_INTINFO_HWEXCEPTION (3 << 8) #define VM_INTINFO_SWINTR (4 << 8) /* * The VM name has to fit into the pathname length constraints of devfs, * governed primarily by SPECNAMELEN. The length is the total number of * characters in the full path, relative to the mount point and not * including any leading '/' characters. * A prefix and a suffix are added to the name specified by the user. * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters * longer for future use. * The suffix is a string that identifies a bootrom image or some similar * image that is attached to the VM. A separator character gets added to * the suffix automatically when generating the full path, so it must be * accounted for, reducing the effective length by 1. * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37 * bytes for FreeBSD 12. A minimum length is set for safety and supports * a SPECNAMELEN as small as 32 on old systems. */ #define VM_MAX_PREFIXLEN 10 #define VM_MAX_SUFFIXLEN 15 #define VM_MIN_NAMELEN 6 #define VM_MAX_NAMELEN \ (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1) #ifdef _KERNEL #include CTASSERT(VM_MAX_NAMELEN >= VM_MIN_NAMELEN); struct vm; struct vm_exception; +struct vm_mem; struct seg_desc; struct vm_exit; struct vm_run; struct vhpet; struct vioapic; struct vlapic; struct vmspace; struct vm_object; struct vm_guest_paging; struct pmap; enum snapshot_req; struct vm_eventinfo { cpuset_t *rptr; /* rendezvous cookie */ int *sptr; /* suspend cookie */ int *iptr; /* reqidle cookie */ }; typedef int (*vmm_init_func_t)(int ipinum); typedef int (*vmm_cleanup_func_t)(void); typedef void (*vmm_suspend_func_t)(void); typedef void (*vmm_resume_func_t)(void); typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap); typedef int (*vmi_run_func_t)(void *vcpui, register_t rip, struct pmap *pmap, struct vm_eventinfo *info); typedef void (*vmi_cleanup_func_t)(void *vmi); typedef void * (*vmi_vcpu_init_func_t)(void *vmi, struct vcpu *vcpu, int vcpu_id); typedef void (*vmi_vcpu_cleanup_func_t)(void *vcpui); typedef int (*vmi_get_register_t)(void *vcpui, int num, uint64_t *retval); typedef int (*vmi_set_register_t)(void *vcpui, int num, uint64_t val); typedef int (*vmi_get_desc_t)(void *vcpui, int num, struct seg_desc *desc); typedef int (*vmi_set_desc_t)(void *vcpui, int num, struct seg_desc *desc); typedef int (*vmi_get_cap_t)(void *vcpui, int num, int *retval); typedef int (*vmi_set_cap_t)(void *vcpui, int num, int val); typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max); typedef void (*vmi_vmspace_free)(struct vmspace *vmspace); typedef struct vlapic * (*vmi_vlapic_init)(void *vcpui); typedef void (*vmi_vlapic_cleanup)(struct vlapic *vlapic); typedef int (*vmi_snapshot_vcpu_t)(void *vcpui, struct vm_snapshot_meta *meta); typedef int (*vmi_restore_tsc_t)(void *vcpui, uint64_t now); struct vmm_ops { vmm_init_func_t modinit; /* module wide initialization */ vmm_cleanup_func_t modcleanup; vmm_resume_func_t modsuspend; vmm_resume_func_t modresume; vmi_init_func_t init; /* vm-specific initialization */ vmi_run_func_t run; vmi_cleanup_func_t cleanup; vmi_vcpu_init_func_t vcpu_init; vmi_vcpu_cleanup_func_t vcpu_cleanup; vmi_get_register_t getreg; vmi_set_register_t setreg; vmi_get_desc_t getdesc; vmi_set_desc_t setdesc; vmi_get_cap_t getcap; vmi_set_cap_t setcap; vmi_vmspace_alloc vmspace_alloc; vmi_vmspace_free vmspace_free; vmi_vlapic_init vlapic_init; vmi_vlapic_cleanup vlapic_cleanup; /* checkpoint operations */ vmi_snapshot_vcpu_t vcpu_snapshot; vmi_restore_tsc_t restore_tsc; }; extern const struct vmm_ops vmm_ops_intel; extern const struct vmm_ops vmm_ops_amd; extern u_int vm_maxcpu; /* maximum virtual cpus */ int vm_create(const char *name, struct vm **retvm); struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid); void vm_disable_vcpu_creation(struct vm *vm); void vm_slock_vcpus(struct vm *vm); void vm_unlock_vcpus(struct vm *vm); void vm_destroy(struct vm *vm); int vm_reinit(struct vm *vm); const char *vm_name(struct vm *vm); uint16_t vm_get_maxcpus(struct vm *vm); void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus); int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus); -/* - * APIs that modify the guest memory map require all vcpus to be frozen. - */ -void vm_slock_memsegs(struct vm *vm); -void vm_xlock_memsegs(struct vm *vm); -void vm_unlock_memsegs(struct vm *vm); -int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off, - size_t len, int prot, int flags); -int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len); -int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem); -void vm_free_memseg(struct vm *vm, int ident); int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len); int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func); int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func); -/* - * APIs that inspect the guest memory map require only a *single* vcpu to - * be frozen. This acts like a read lock on the guest memory map since any - * modification requires *all* vcpus to be frozen. - */ -int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, - vm_ooffset_t *segoff, size_t *len, int *prot, int *flags); -int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, - struct vm_object **objptr); -vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm); -void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, - int prot, void **cookie); -void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, - int prot, void **cookie); -void vm_gpa_release(void *cookie); -bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa); - int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval); int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val); int vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *ret_desc); int vm_set_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc); int vm_run(struct vcpu *vcpu); int vm_suspend(struct vm *vm, enum vm_suspend_how how); int vm_inject_nmi(struct vcpu *vcpu); int vm_nmi_pending(struct vcpu *vcpu); void vm_nmi_clear(struct vcpu *vcpu); int vm_inject_extint(struct vcpu *vcpu); int vm_extint_pending(struct vcpu *vcpu); void vm_extint_clear(struct vcpu *vcpu); int vcpu_vcpuid(struct vcpu *vcpu); struct vm *vcpu_vm(struct vcpu *vcpu); struct vcpu *vm_vcpu(struct vm *vm, int cpu); struct vlapic *vm_lapic(struct vcpu *vcpu); struct vioapic *vm_ioapic(struct vm *vm); struct vhpet *vm_hpet(struct vm *vm); int vm_get_capability(struct vcpu *vcpu, int type, int *val); int vm_set_capability(struct vcpu *vcpu, int type, int val); int vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state); int vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state); int vm_apicid2vcpuid(struct vm *vm, int apicid); int vm_activate_cpu(struct vcpu *vcpu); int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu); int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu); int vm_restart_instruction(struct vcpu *vcpu); struct vm_exit *vm_exitinfo(struct vcpu *vcpu); cpuset_t *vm_exitinfo_cpuset(struct vcpu *vcpu); void vm_exit_suspended(struct vcpu *vcpu, uint64_t rip); void vm_exit_debug(struct vcpu *vcpu, uint64_t rip); void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip); void vm_exit_astpending(struct vcpu *vcpu, uint64_t rip); void vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip); int vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta); int vm_restore_time(struct vm *vm); #ifdef _SYS__CPUSET_H_ /* * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'. * The rendezvous 'func(arg)' is not allowed to do anything that will * cause the thread to be put to sleep. * * The caller cannot hold any locks when initiating the rendezvous. * * The implementation of this API may cause vcpus other than those specified * by 'dest' to be stalled. The caller should not rely on any vcpus making * forward progress when the rendezvous is in progress. */ typedef void (*vm_rendezvous_func_t)(struct vcpu *vcpu, void *arg); int vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest, vm_rendezvous_func_t func, void *arg); cpuset_t vm_active_cpus(struct vm *vm); cpuset_t vm_debug_cpus(struct vm *vm); cpuset_t vm_suspended_cpus(struct vm *vm); cpuset_t vm_start_cpus(struct vm *vm, const cpuset_t *tostart); void vm_await_start(struct vm *vm, const cpuset_t *waiting); #endif /* _SYS__CPUSET_H_ */ static __inline int vcpu_rendezvous_pending(struct vcpu *vcpu, struct vm_eventinfo *info) { /* * This check isn't done with atomic operations or under a lock because * there's no need to. If the vcpuid bit is set, the vcpu is part of a * rendezvous and the bit won't be cleared until the vcpu enters the * rendezvous. On rendezvous exit, the cpuset is cleared and the vcpu * will see an empty cpuset. So, the races are harmless. */ return (CPU_ISSET(vcpu_vcpuid(vcpu), info->rptr)); } static __inline int vcpu_suspended(struct vm_eventinfo *info) { return (*info->sptr); } static __inline int vcpu_reqidle(struct vm_eventinfo *info) { return (*info->iptr); } int vcpu_debugged(struct vcpu *vcpu); /* * Return true if device indicated by bus/slot/func is supposed to be a * pci passthrough device. * * Return false otherwise. */ bool vmm_is_pptdev(int bus, int slot, int func); void *vm_iommu_domain(struct vm *vm); enum vcpu_state { VCPU_IDLE, VCPU_FROZEN, VCPU_RUNNING, VCPU_SLEEPING, }; int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle); enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu); static int __inline vcpu_is_running(struct vcpu *vcpu, int *hostcpu) { return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING); } #ifdef _SYS_PROC_H_ static int __inline vcpu_should_yield(struct vcpu *vcpu) { struct thread *td; td = curthread; return (td->td_ast != 0 || td->td_owepreempt != 0); } #endif void *vcpu_stats(struct vcpu *vcpu); void vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr); struct vmspace *vm_vmspace(struct vm *vm); +struct vm_mem *vm_mem(struct vm *vm); struct vatpic *vm_atpic(struct vm *vm); struct vatpit *vm_atpit(struct vm *vm); struct vpmtmr *vm_pmtmr(struct vm *vm); struct vrtc *vm_rtc(struct vm *vm); /* * Inject exception 'vector' into the guest vcpu. This function returns 0 on * success and non-zero on failure. * * Wrapper functions like 'vm_inject_gp()' should be preferred to calling * this function directly because they enforce the trap-like or fault-like * behavior of an exception. * * This function should only be called in the context of the thread that is * executing this vcpu. */ int vm_inject_exception(struct vcpu *vcpu, int vector, int err_valid, uint32_t errcode, int restart_instruction); /* * This function is called after a VM-exit that occurred during exception or * interrupt delivery through the IDT. The format of 'intinfo' is described * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2. * * If a VM-exit handler completes the event delivery successfully then it * should call vm_exit_intinfo() to extinguish the pending event. For e.g., * if the task switch emulation is triggered via a task gate then it should * call this function with 'intinfo=0' to indicate that the external event * is not pending anymore. * * Return value is 0 on success and non-zero on failure. */ int vm_exit_intinfo(struct vcpu *vcpu, uint64_t intinfo); /* * This function is called before every VM-entry to retrieve a pending * event that should be injected into the guest. This function combines * nested events into a double or triple fault. * * Returns 0 if there are no events that need to be injected into the guest * and non-zero otherwise. */ int vm_entry_intinfo(struct vcpu *vcpu, uint64_t *info); int vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2); /* * Function used to keep track of the guest's TSC offset. The * offset is used by the virtualization extensions to provide a consistent * value for the Time Stamp Counter to the guest. */ void vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset); enum vm_reg_name vm_segment_name(int seg_encoding); struct vm_copyinfo { uint64_t gpa; size_t len; void *hva; void *cookie; }; /* * Set up 'copyinfo[]' to copy to/from guest linear address space starting * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for * a copyin or PROT_WRITE for a copyout. * * retval is_fault Interpretation * 0 0 Success * 0 1 An exception was injected into the guest * EFAULT N/A Unrecoverable error * * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if * the return value is 0. The 'copyinfo[]' resources should be freed by calling * 'vm_copy_teardown()' after the copy is done. */ int vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging, uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, int num_copyinfo, int *is_fault); void vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo); void vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len); void vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len); int vcpu_trace_exceptions(struct vcpu *vcpu); int vcpu_trap_wbinvd(struct vcpu *vcpu); #endif /* KERNEL */ /* * Identifiers for optional vmm capabilities */ enum vm_cap_type { VM_CAP_HALT_EXIT, VM_CAP_MTRAP_EXIT, VM_CAP_PAUSE_EXIT, VM_CAP_UNRESTRICTED_GUEST, VM_CAP_ENABLE_INVPCID, VM_CAP_BPT_EXIT, VM_CAP_RDPID, VM_CAP_RDTSCP, VM_CAP_IPI_EXIT, VM_CAP_MASK_HWINTR, VM_CAP_RFLAGS_TF, VM_CAP_MAX }; enum vm_intr_trigger { EDGE_TRIGGER, LEVEL_TRIGGER }; /* * The 'access' field has the format specified in Table 21-2 of the Intel * Architecture Manual vol 3b. * * XXX The contents of the 'access' field are architecturally defined except * bit 16 - Segment Unusable. */ struct seg_desc { uint64_t base; uint32_t limit; uint32_t access; }; #define SEG_DESC_TYPE(access) ((access) & 0x001f) #define SEG_DESC_DPL(access) (((access) >> 5) & 0x3) #define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0) #define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0) #define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0) #define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0) enum vm_cpu_mode { CPU_MODE_REAL, CPU_MODE_PROTECTED, CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */ CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */ }; enum vm_paging_mode { PAGING_MODE_FLAT, PAGING_MODE_32, PAGING_MODE_PAE, PAGING_MODE_64, PAGING_MODE_64_LA57, }; struct vm_guest_paging { uint64_t cr3; int cpl; enum vm_cpu_mode cpu_mode; enum vm_paging_mode paging_mode; }; /* * The data structures 'vie' and 'vie_op' are meant to be opaque to the * consumers of instruction decoding. The only reason why their contents * need to be exposed is because they are part of the 'vm_exit' structure. */ struct vie_op { uint8_t op_byte; /* actual opcode byte */ uint8_t op_type; /* type of operation (e.g. MOV) */ uint16_t op_flags; }; _Static_assert(sizeof(struct vie_op) == 4, "ABI"); _Static_assert(_Alignof(struct vie_op) == 2, "ABI"); #define VIE_INST_SIZE 15 struct vie { uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */ uint8_t num_valid; /* size of the instruction */ /* The following fields are all zeroed upon restart. */ #define vie_startzero num_processed uint8_t num_processed; uint8_t addrsize:4, opsize:4; /* address and operand sizes */ uint8_t rex_w:1, /* REX prefix */ rex_r:1, rex_x:1, rex_b:1, rex_present:1, repz_present:1, /* REP/REPE/REPZ prefix */ repnz_present:1, /* REPNE/REPNZ prefix */ opsize_override:1, /* Operand size override */ addrsize_override:1, /* Address size override */ segment_override:1; /* Segment override */ uint8_t mod:2, /* ModRM byte */ reg:4, rm:4; uint8_t ss:2, /* SIB byte */ vex_present:1, /* VEX prefixed */ vex_l:1, /* L bit */ index:4, /* SIB byte */ base:4; /* SIB byte */ uint8_t disp_bytes; uint8_t imm_bytes; uint8_t scale; uint8_t vex_reg:4, /* vvvv: first source register specifier */ vex_pp:2, /* pp */ _sparebits:2; uint8_t _sparebytes[2]; int base_register; /* VM_REG_GUEST_xyz */ int index_register; /* VM_REG_GUEST_xyz */ int segment_register; /* VM_REG_GUEST_xyz */ int64_t displacement; /* optional addr displacement */ int64_t immediate; /* optional immediate operand */ uint8_t decoded; /* set to 1 if successfully decoded */ uint8_t _sparebyte; struct vie_op op; /* opcode description */ }; _Static_assert(sizeof(struct vie) == 64, "ABI"); _Static_assert(__offsetof(struct vie, disp_bytes) == 22, "ABI"); _Static_assert(__offsetof(struct vie, scale) == 24, "ABI"); _Static_assert(__offsetof(struct vie, base_register) == 28, "ABI"); enum vm_exitcode { VM_EXITCODE_INOUT, VM_EXITCODE_VMX, VM_EXITCODE_BOGUS, VM_EXITCODE_RDMSR, VM_EXITCODE_WRMSR, VM_EXITCODE_HLT, VM_EXITCODE_MTRAP, VM_EXITCODE_PAUSE, VM_EXITCODE_PAGING, VM_EXITCODE_INST_EMUL, VM_EXITCODE_SPINUP_AP, VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */ VM_EXITCODE_RENDEZVOUS, VM_EXITCODE_IOAPIC_EOI, VM_EXITCODE_SUSPENDED, VM_EXITCODE_INOUT_STR, VM_EXITCODE_TASK_SWITCH, VM_EXITCODE_MONITOR, VM_EXITCODE_MWAIT, VM_EXITCODE_SVM, VM_EXITCODE_REQIDLE, VM_EXITCODE_DEBUG, VM_EXITCODE_VMINSN, VM_EXITCODE_BPT, VM_EXITCODE_IPI, VM_EXITCODE_DB, VM_EXITCODE_MAX }; struct vm_inout { uint16_t bytes:3; /* 1 or 2 or 4 */ uint16_t in:1; uint16_t string:1; uint16_t rep:1; uint16_t port; uint32_t eax; /* valid for out */ }; struct vm_inout_str { struct vm_inout inout; /* must be the first element */ struct vm_guest_paging paging; uint64_t rflags; uint64_t cr0; uint64_t index; uint64_t count; /* rep=1 (%rcx), rep=0 (1) */ int addrsize; enum vm_reg_name seg_name; struct seg_desc seg_desc; }; enum task_switch_reason { TSR_CALL, TSR_IRET, TSR_JMP, TSR_IDT_GATE, /* task gate in IDT */ }; struct vm_task_switch { uint16_t tsssel; /* new TSS selector */ int ext; /* task switch due to external event */ uint32_t errcode; int errcode_valid; /* push 'errcode' on the new stack */ enum task_switch_reason reason; struct vm_guest_paging paging; }; struct vm_exit { enum vm_exitcode exitcode; int inst_length; /* 0 means unknown */ uint64_t rip; union { struct vm_inout inout; struct vm_inout_str inout_str; struct { uint64_t gpa; int fault_type; } paging; struct { uint64_t gpa; uint64_t gla; uint64_t cs_base; int cs_d; /* CS.D */ struct vm_guest_paging paging; struct vie vie; } inst_emul; /* * VMX specific payload. Used when there is no "better" * exitcode to represent the VM-exit. */ struct { int status; /* vmx inst status */ /* * 'exit_reason' and 'exit_qualification' are valid * only if 'status' is zero. */ uint32_t exit_reason; uint64_t exit_qualification; /* * 'inst_error' and 'inst_type' are valid * only if 'status' is non-zero. */ int inst_type; int inst_error; } vmx; /* * SVM specific payload. */ struct { uint64_t exitcode; uint64_t exitinfo1; uint64_t exitinfo2; } svm; struct { int inst_length; } bpt; struct { int trace_trap; int pushf_intercept; int tf_shadow_val; struct vm_guest_paging paging; } dbg; struct { uint32_t code; /* ecx value */ uint64_t wval; } msr; struct { int vcpu; uint64_t rip; } spinup_ap; struct { uint64_t rflags; uint64_t intr_status; } hlt; struct { int vector; } ioapic_eoi; struct { enum vm_suspend_how how; } suspended; struct { /* * The destination vCPU mask is saved in vcpu->cpuset * and is copied out to userspace separately to avoid * ABI concerns. */ uint32_t mode; uint8_t vector; } ipi; struct vm_task_switch task_switch; } u; }; /* APIs to inject faults into the guest */ void vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode); static __inline void vm_inject_ud(struct vcpu *vcpu) { vm_inject_fault(vcpu, IDT_UD, 0, 0); } static __inline void vm_inject_gp(struct vcpu *vcpu) { vm_inject_fault(vcpu, IDT_GP, 1, 0); } static __inline void vm_inject_ac(struct vcpu *vcpu, int errcode) { vm_inject_fault(vcpu, IDT_AC, 1, errcode); } static __inline void vm_inject_ss(struct vcpu *vcpu, int errcode) { vm_inject_fault(vcpu, IDT_SS, 1, errcode); } void vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2); #endif /* _VMM_H_ */ diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c index ed37dd0c810e..6c16daaa47c2 100644 --- a/sys/amd64/vmm/amd/svm.c +++ b/sys/amd64/vmm/amd/svm.c @@ -1,2835 +1,2838 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_bhyve_snapshot.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include "vmm_lapic.h" #include "vmm_stat.h" #include "vmm_ioport.h" #include "vatpic.h" #include "vlapic.h" #include "vlapic_priv.h" #include "x86.h" #include "vmcb.h" #include "svm.h" #include "svm_softc.h" #include "svm_msr.h" #include "npt.h" +#include "io/ppt.h" SYSCTL_DECL(_hw_vmm); SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); /* * SVM CPUID function 0x8000_000A, edx bit decoding. */ #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ VMCB_CACHE_IOPM | \ VMCB_CACHE_I | \ VMCB_CACHE_TPR | \ VMCB_CACHE_CR2 | \ VMCB_CACHE_CR | \ VMCB_CACHE_DR | \ VMCB_CACHE_DT | \ VMCB_CACHE_SEG | \ VMCB_CACHE_NP) static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 0, NULL); static MALLOC_DEFINE(M_SVM, "svm", "svm"); static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); static uint32_t svm_feature = ~0U; /* AMD SVM features. */ SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, "SVM features advertised by CPUID.8000000AH:EDX"); static int disable_npf_assist; SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, &disable_npf_assist, 0, NULL); /* Maximum ASIDs supported by the processor */ static uint32_t nasid; SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, "Number of ASIDs supported by this processor"); /* Current ASID generation for each host cpu */ static struct asid asid[MAXCPU]; /* SVM host state saved area of size 4KB for each physical core. */ static uint8_t *hsave; static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc); static int svm_setreg(void *vcpui, int ident, uint64_t val); static int svm_getreg(void *vcpui, int ident, uint64_t *val); static __inline int flush_by_asid(void) { return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); } static __inline int decode_assist(void) { return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); } static void svm_disable(void *arg __unused) { uint64_t efer; efer = rdmsr(MSR_EFER); efer &= ~EFER_SVM; wrmsr(MSR_EFER, efer); } /* * Disable SVM on all CPUs. */ static int svm_modcleanup(void) { smp_rendezvous(NULL, svm_disable, NULL, NULL); if (hsave != NULL) kmem_free(hsave, (mp_maxid + 1) * PAGE_SIZE); return (0); } /* * Verify that all the features required by bhyve are available. */ static int check_svm_features(void) { u_int regs[4]; /* CPUID Fn8000_000A is for SVM */ do_cpuid(0x8000000A, regs); svm_feature &= regs[3]; /* * The number of ASIDs can be configured to be less than what is * supported by the hardware but not more. */ if (nasid == 0 || nasid > regs[1]) nasid = regs[1]; KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); /* bhyve requires the Nested Paging feature */ if (!(svm_feature & AMD_CPUID_SVM_NP)) { printf("SVM: Nested Paging feature not available.\n"); return (ENXIO); } /* bhyve requires the NRIP Save feature */ if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { printf("SVM: NRIP Save feature not available.\n"); return (ENXIO); } return (0); } static void svm_enable(void *arg __unused) { uint64_t efer; efer = rdmsr(MSR_EFER); efer |= EFER_SVM; wrmsr(MSR_EFER, efer); wrmsr(MSR_VM_HSAVE_PA, vtophys(&hsave[curcpu * PAGE_SIZE])); } /* * Return 1 if SVM is enabled on this processor and 0 otherwise. */ static int svm_available(void) { uint64_t msr; /* Section 15.4 Enabling SVM from APM2. */ if ((amd_feature2 & AMDID2_SVM) == 0) { printf("SVM: not available.\n"); return (0); } msr = rdmsr(MSR_VM_CR); if ((msr & VM_CR_SVMDIS) != 0) { printf("SVM: disabled by BIOS.\n"); return (0); } return (1); } static int svm_modinit(int ipinum) { int error, cpu; if (!svm_available()) return (ENXIO); error = check_svm_features(); if (error) return (error); vmcb_clean &= VMCB_CACHE_DEFAULT; for (cpu = 0; cpu < MAXCPU; cpu++) { /* * Initialize the host ASIDs to their "highest" valid values. * * The next ASID allocation will rollover both 'gen' and 'num' * and start off the sequence at {1,1}. */ asid[cpu].gen = ~0UL; asid[cpu].num = nasid - 1; } svm_msr_init(); svm_npt_init(ipinum); /* Enable SVM on all CPUs */ hsave = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO); smp_rendezvous(NULL, svm_enable, NULL, NULL); return (0); } static void svm_modsuspend(void) { } static void svm_modresume(void) { svm_enable(NULL); } #ifdef BHYVE_SNAPSHOT void svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset) { struct vmcb_ctrl *ctrl; ctrl = svm_get_vmcb_ctrl(vcpu); ctrl->tsc_offset = offset; svm_set_dirty(vcpu, VMCB_CACHE_I); SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); vm_set_tsc_offset(vcpu->vcpu, offset); } #endif /* Pentium compatible MSRs */ #define MSR_PENTIUM_START 0 #define MSR_PENTIUM_END 0x1FFF /* AMD 6th generation and Intel compatible MSRs */ #define MSR_AMD6TH_START 0xC0000000UL #define MSR_AMD6TH_END 0xC0001FFFUL /* AMD 7th and 8th generation compatible MSRs */ #define MSR_AMD7TH_START 0xC0010000UL #define MSR_AMD7TH_END 0xC0011FFFUL /* * Get the index and bit position for a MSR in permission bitmap. * Two bits are used for each MSR: lower bit for read and higher bit for write. */ static int svm_msr_index(uint64_t msr, int *index, int *bit) { uint32_t base, off; *index = -1; *bit = (msr % 4) * 2; base = 0; if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { *index = msr / 4; return (0); } base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { off = (msr - MSR_AMD6TH_START); *index = (off + base) / 4; return (0); } base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { off = (msr - MSR_AMD7TH_START); *index = (off + base) / 4; return (0); } return (EINVAL); } /* * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. */ static void svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) { int index, bit, error __diagused; error = svm_msr_index(msr, &index, &bit); KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, ("%s: invalid index %d for msr %#lx", __func__, index, msr)); KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " "msr %#lx", __func__, bit, msr)); if (read) perm_bitmap[index] &= ~(1UL << bit); if (write) perm_bitmap[index] &= ~(2UL << bit); } static void svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) { svm_msr_perm(perm_bitmap, msr, true, true); } static void svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) { svm_msr_perm(perm_bitmap, msr, true, false); } static __inline int svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask) { struct vmcb_ctrl *ctrl; KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); ctrl = svm_get_vmcb_ctrl(vcpu); return (ctrl->intercept[idx] & bitmask ? 1 : 0); } static __inline void svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled) { struct vmcb_ctrl *ctrl; uint32_t oldval; KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); ctrl = svm_get_vmcb_ctrl(vcpu); oldval = ctrl->intercept[idx]; if (enabled) ctrl->intercept[idx] |= bitmask; else ctrl->intercept[idx] &= ~bitmask; if (ctrl->intercept[idx] != oldval) { svm_set_dirty(vcpu, VMCB_CACHE_I); SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx, oldval, ctrl->intercept[idx]); } } static __inline void svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) { svm_set_intercept(vcpu, off, bitmask, 0); } static __inline void svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) { svm_set_intercept(vcpu, off, bitmask, 1); } static void vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa, uint64_t msrpm_base_pa, uint64_t np_pml4) { struct vmcb_ctrl *ctrl; struct vmcb_state *state; uint32_t mask; int n; ctrl = svm_get_vmcb_ctrl(vcpu); state = svm_get_vmcb_state(vcpu); ctrl->iopm_base_pa = iopm_base_pa; ctrl->msrpm_base_pa = msrpm_base_pa; /* Enable nested paging */ ctrl->np_enable = 1; ctrl->n_cr3 = np_pml4; /* * Intercept accesses to the control registers that are not shadowed * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. */ for (n = 0; n < 16; n++) { mask = (BIT(n) << 16) | BIT(n); if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask); else svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask); } /* * Intercept everything when tracing guest exceptions otherwise * just intercept machine check exception. */ if (vcpu_trace_exceptions(vcpu->vcpu)) { for (n = 0; n < 32; n++) { /* * Skip unimplemented vectors in the exception bitmap. */ if (n == 2 || n == 9) { continue; } svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n)); } } else { svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); } /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); /* * Intercept SVM instructions since AMD enables them in guests otherwise. * Non-intercepted VMMCALL causes #UD, skip it. */ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); if (vcpu_trap_wbinvd(vcpu->vcpu)) { svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_WBINVD); } /* * From section "Canonicalization and Consistency Checks" in APMv2 * the VMRUN intercept bit must be set to pass the consistency check. */ svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); /* * The ASID will be set to a non-zero value just before VMRUN. */ ctrl->asid = 0; /* * Section 15.21.1, Interrupt Masking in EFLAGS * Section 15.21.2, Virtualizing APIC.TPR * * This must be set for %rflag and %cr8 isolation of guest and host. */ ctrl->v_intr_masking = 1; /* Enable Last Branch Record aka LBR for debugging */ ctrl->lbr_virt_en = 1; state->dbgctl = BIT(0); /* EFER_SVM must always be set when the guest is executing */ state->efer = EFER_SVM; /* Set up the PAT to power-on state */ state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | PAT_VALUE(1, PAT_WRITE_THROUGH) | PAT_VALUE(2, PAT_UNCACHED) | PAT_VALUE(3, PAT_UNCACHEABLE) | PAT_VALUE(4, PAT_WRITE_BACK) | PAT_VALUE(5, PAT_WRITE_THROUGH) | PAT_VALUE(6, PAT_UNCACHED) | PAT_VALUE(7, PAT_UNCACHEABLE); /* Set up DR6/7 to power-on state */ state->dr6 = DBREG_DR6_RESERVED1; state->dr7 = DBREG_DR7_RESERVED1; } /* * Initialize a virtual machine. */ static void * svm_init(struct vm *vm, pmap_t pmap) { struct svm_softc *svm_sc; svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); if (svm_sc->msr_bitmap == NULL) panic("contigmalloc of SVM MSR bitmap failed"); svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); if (svm_sc->iopm_bitmap == NULL) panic("contigmalloc of SVM IO bitmap failed"); svm_sc->vm = vm; svm_sc->nptp = vtophys(pmap->pm_pmltop); /* * Intercept read and write accesses to all MSRs. */ memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); /* * Access to the following MSRs is redirected to the VMCB when the * guest is executing. Therefore it is safe to allow the guest to * read/write these MSRs directly without hypervisor involvement. */ svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); /* * Intercept writes to make sure that the EFER_SVM bit is not cleared. */ svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); /* Intercept access to all I/O ports. */ memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); return (svm_sc); } static void * svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) { struct svm_softc *sc = vmi; struct svm_vcpu *vcpu; vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO); vcpu->sc = sc; vcpu->vcpu = vcpu1; vcpu->vcpuid = vcpuid; vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, M_WAITOK | M_ZERO); vcpu->nextrip = ~0; vcpu->lastcpu = NOCPU; vcpu->vmcb_pa = vtophys(vcpu->vmcb); vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), sc->nptp); svm_msr_guest_init(sc, vcpu); return (vcpu); } /* * Collateral for a generic SVM VM-exit. */ static void vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) { vme->exitcode = VM_EXITCODE_SVM; vme->u.svm.exitcode = code; vme->u.svm.exitinfo1 = info1; vme->u.svm.exitinfo2 = info2; } static int svm_cpl(struct vmcb_state *state) { /* * From APMv2: * "Retrieve the CPL from the CPL field in the VMCB, not * from any segment DPL" */ return (state->cpl); } static enum vm_cpu_mode svm_vcpu_mode(struct vmcb *vmcb) { struct vmcb_segment seg; struct vmcb_state *state; int error __diagused; state = &vmcb->state; if (state->efer & EFER_LMA) { error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, error)); /* * Section 4.8.1 for APM2, check if Code Segment has * Long attribute set in descriptor. */ if (seg.attrib & VMCB_CS_ATTRIB_L) return (CPU_MODE_64BIT); else return (CPU_MODE_COMPATIBILITY); } else if (state->cr0 & CR0_PE) { return (CPU_MODE_PROTECTED); } else { return (CPU_MODE_REAL); } } static enum vm_paging_mode svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) { if ((cr0 & CR0_PG) == 0) return (PAGING_MODE_FLAT); if ((cr4 & CR4_PAE) == 0) return (PAGING_MODE_32); if (efer & EFER_LME) return (PAGING_MODE_64); else return (PAGING_MODE_PAE); } /* * ins/outs utility routines */ static uint64_t svm_inout_str_index(struct svm_regctx *regs, int in) { uint64_t val; val = in ? regs->sctx_rdi : regs->sctx_rsi; return (val); } static uint64_t svm_inout_str_count(struct svm_regctx *regs, int rep) { uint64_t val; val = rep ? regs->sctx_rcx : 1; return (val); } static void svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in, struct vm_inout_str *vis) { int error __diagused, s; if (in) { vis->seg_name = VM_REG_GUEST_ES; } else { /* The segment field has standard encoding */ s = (info1 >> 10) & 0x7; vis->seg_name = vm_segment_name(s); } error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); } static int svm_inout_str_addrsize(uint64_t info1) { uint32_t size; size = (info1 >> 7) & 0x7; switch (size) { case 1: return (2); /* 16 bit */ case 2: return (4); /* 32 bit */ case 4: return (8); /* 64 bit */ default: panic("%s: invalid size encoding %d", __func__, size); } } static void svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) { struct vmcb_state *state; state = &vmcb->state; paging->cr3 = state->cr3; paging->cpl = svm_cpl(state); paging->cpu_mode = svm_vcpu_mode(vmcb); paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, state->efer); } #define UNHANDLED 0 /* * Handle guest I/O intercept. */ static int svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit) { struct vmcb_ctrl *ctrl; struct vmcb_state *state; struct svm_regctx *regs; struct vm_inout_str *vis; uint64_t info1; int inout_string; state = svm_get_vmcb_state(vcpu); ctrl = svm_get_vmcb_ctrl(vcpu); regs = svm_get_guest_regctx(vcpu); info1 = ctrl->exitinfo1; inout_string = info1 & BIT(2) ? 1 : 0; /* * The effective segment number in EXITINFO1[12:10] is populated * only if the processor has the DecodeAssist capability. * * XXX this is not specified explicitly in APMv2 but can be verified * empirically. */ if (inout_string && !decode_assist()) return (UNHANDLED); vmexit->exitcode = VM_EXITCODE_INOUT; vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; vmexit->u.inout.string = inout_string; vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; vmexit->u.inout.bytes = (info1 >> 4) & 0x7; vmexit->u.inout.port = (uint16_t)(info1 >> 16); vmexit->u.inout.eax = (uint32_t)(state->rax); if (inout_string) { vmexit->exitcode = VM_EXITCODE_INOUT_STR; vis = &vmexit->u.inout_str; svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); vis->rflags = state->rflags; vis->cr0 = state->cr0; vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); vis->addrsize = svm_inout_str_addrsize(info1); svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); } return (UNHANDLED); } static int npf_fault_type(uint64_t exitinfo1) { if (exitinfo1 & VMCB_NPF_INFO1_W) return (VM_PROT_WRITE); else if (exitinfo1 & VMCB_NPF_INFO1_ID) return (VM_PROT_EXECUTE); else return (VM_PROT_READ); } static bool svm_npf_emul_fault(uint64_t exitinfo1) { if (exitinfo1 & VMCB_NPF_INFO1_ID) { return (false); } if (exitinfo1 & VMCB_NPF_INFO1_GPT) { return (false); } if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { return (false); } return (true); } static void svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) { struct vm_guest_paging *paging; struct vmcb_segment seg; struct vmcb_ctrl *ctrl; char *inst_bytes; int error __diagused, inst_len; ctrl = &vmcb->ctrl; paging = &vmexit->u.inst_emul.paging; vmexit->exitcode = VM_EXITCODE_INST_EMUL; vmexit->u.inst_emul.gpa = gpa; vmexit->u.inst_emul.gla = VIE_INVALID_GLA; svm_paging_info(vmcb, paging); error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); switch(paging->cpu_mode) { case CPU_MODE_REAL: vmexit->u.inst_emul.cs_base = seg.base; vmexit->u.inst_emul.cs_d = 0; break; case CPU_MODE_PROTECTED: case CPU_MODE_COMPATIBILITY: vmexit->u.inst_emul.cs_base = seg.base; /* * Section 4.8.1 of APM2, Default Operand Size or D bit. */ vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 1 : 0; break; default: vmexit->u.inst_emul.cs_base = 0; vmexit->u.inst_emul.cs_d = 0; break; } /* * Copy the instruction bytes into 'vie' if available. */ if (decode_assist() && !disable_npf_assist) { inst_len = ctrl->inst_len; inst_bytes = ctrl->inst_bytes; } else { inst_len = 0; inst_bytes = NULL; } vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); } #ifdef KTR static const char * intrtype_to_str(int intr_type) { switch (intr_type) { case VMCB_EVENTINJ_TYPE_INTR: return ("hwintr"); case VMCB_EVENTINJ_TYPE_NMI: return ("nmi"); case VMCB_EVENTINJ_TYPE_INTn: return ("swintr"); case VMCB_EVENTINJ_TYPE_EXCEPTION: return ("exception"); default: panic("%s: unknown intr_type %d", __func__, intr_type); } } #endif /* * Inject an event to vcpu as described in section 15.20, "Event injection". */ static void svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector, uint32_t error, bool ec_valid) { struct vmcb_ctrl *ctrl; ctrl = svm_get_vmcb_ctrl(vcpu); KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event already pending %#lx", __func__, ctrl->eventinj)); KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", __func__, vector)); switch (intr_type) { case VMCB_EVENTINJ_TYPE_INTR: case VMCB_EVENTINJ_TYPE_NMI: case VMCB_EVENTINJ_TYPE_INTn: break; case VMCB_EVENTINJ_TYPE_EXCEPTION: if (vector >= 0 && vector <= 31 && vector != 2) break; /* FALLTHROUGH */ default: panic("%s: invalid intr_type/vector: %d/%d", __func__, intr_type, vector); } ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; if (ec_valid) { ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; ctrl->eventinj |= (uint64_t)error << 32; SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x", intrtype_to_str(intr_type), vector, error); } else { SVM_CTR2(vcpu, "Injecting %s at vector %d", intrtype_to_str(intr_type), vector); } } static void svm_update_virqinfo(struct svm_vcpu *vcpu) { struct vlapic *vlapic; struct vmcb_ctrl *ctrl; vlapic = vm_lapic(vcpu->vcpu); ctrl = svm_get_vmcb_ctrl(vcpu); /* Update %cr8 in the emulated vlapic */ vlapic_set_cr8(vlapic, ctrl->v_tpr); /* Virtual interrupt injection is not used. */ KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " "v_intr_vector %d", __func__, ctrl->v_intr_vector)); } static void svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) { struct vmcb_ctrl *ctrl; uint64_t intinfo; ctrl = svm_get_vmcb_ctrl(vcpu); intinfo = ctrl->exitintinfo; if (!VMCB_EXITINTINFO_VALID(intinfo)) return; /* * From APMv2, Section "Intercepts during IDT interrupt delivery" * * If a #VMEXIT happened during event delivery then record the event * that was being delivered. */ SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); vm_exit_intinfo(vcpu->vcpu, intinfo); } #ifdef INVARIANTS static __inline int vintr_intercept_enabled(struct svm_vcpu *vcpu) { return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR)); } #endif static __inline void enable_intr_window_exiting(struct svm_vcpu *vcpu) { struct vmcb_ctrl *ctrl; ctrl = svm_get_vmcb_ctrl(vcpu); if (ctrl->v_irq && ctrl->v_intr_vector == 0) { KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); KASSERT(vintr_intercept_enabled(vcpu), ("%s: vintr intercept should be enabled", __func__)); return; } SVM_CTR0(vcpu, "Enable intr window exiting"); ctrl->v_irq = 1; ctrl->v_ign_tpr = 1; ctrl->v_intr_vector = 0; svm_set_dirty(vcpu, VMCB_CACHE_TPR); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); } static __inline void disable_intr_window_exiting(struct svm_vcpu *vcpu) { struct vmcb_ctrl *ctrl; ctrl = svm_get_vmcb_ctrl(vcpu); if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { KASSERT(!vintr_intercept_enabled(vcpu), ("%s: vintr intercept should be disabled", __func__)); return; } SVM_CTR0(vcpu, "Disable intr window exiting"); ctrl->v_irq = 0; ctrl->v_intr_vector = 0; svm_set_dirty(vcpu, VMCB_CACHE_TPR); svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); } static int svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val) { struct vmcb_ctrl *ctrl; int oldval, newval; ctrl = svm_get_vmcb_ctrl(vcpu); oldval = ctrl->intr_shadow; newval = val ? 1 : 0; if (newval != oldval) { ctrl->intr_shadow = newval; SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval); } return (0); } static int svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val) { struct vmcb_ctrl *ctrl; ctrl = svm_get_vmcb_ctrl(vcpu); *val = ctrl->intr_shadow; return (0); } /* * Once an NMI is injected it blocks delivery of further NMIs until the handler * executes an IRET. The IRET intercept is enabled when an NMI is injected to * to track when the vcpu is done handling the NMI. */ static int nmi_blocked(struct svm_vcpu *vcpu) { int blocked; blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); return (blocked); } static void enable_nmi_blocking(struct svm_vcpu *vcpu) { KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked")); SVM_CTR0(vcpu, "vNMI blocking enabled"); svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); } static void clear_nmi_blocking(struct svm_vcpu *vcpu) { int error __diagused; KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked")); SVM_CTR0(vcpu, "vNMI blocking cleared"); /* * When the IRET intercept is cleared the vcpu will attempt to execute * the "iret" when it runs next. However, it is possible to inject * another NMI into the vcpu before the "iret" has actually executed. * * For e.g. if the "iret" encounters a #NPF when accessing the stack * it will trap back into the hypervisor. If an NMI is pending for * the vcpu it will be injected into the guest. * * XXX this needs to be fixed */ svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); /* * Set 'intr_shadow' to prevent an NMI from being injected on the * immediate VMRUN. */ error = svm_modify_intr_shadow(vcpu, 1); KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); } #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL static int svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval, bool *retu) { struct vm_exit *vme; struct vmcb_state *state; uint64_t changed, lma, oldval; int error __diagused; state = svm_get_vmcb_state(vcpu); oldval = state->efer; SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ changed = oldval ^ newval; if (newval & EFER_MBZ_BITS) goto gpf; /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ if (changed & EFER_LME) { if (state->cr0 & CR0_PG) goto gpf; } /* EFER.LMA = EFER.LME & CR0.PG */ if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) lma = EFER_LMA; else lma = 0; if ((newval & EFER_LMA) != lma) goto gpf; if (newval & EFER_NXE) { if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) goto gpf; } /* * XXX bhyve does not enforce segment limits in 64-bit mode. Until * this is fixed flag guest attempt to set EFER_LMSLE as an error. */ if (newval & EFER_LMSLE) { vme = vm_exitinfo(vcpu->vcpu); vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); *retu = true; return (0); } if (newval & EFER_FFXSR) { if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) goto gpf; } if (newval & EFER_TCE) { if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) goto gpf; } error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval); KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); return (0); gpf: vm_inject_gp(vcpu->vcpu); return (0); } static int emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, uint64_t val, bool *retu) { int error; if (lapic_msr(num)) error = lapic_wrmsr(vcpu->vcpu, num, val, retu); else if (num == MSR_EFER) error = svm_write_efer(sc, vcpu, val, retu); else error = svm_wrmsr(vcpu, num, val, retu); return (error); } static int emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu) { struct vmcb_state *state; struct svm_regctx *ctx; uint64_t result; int error; if (lapic_msr(num)) error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); else error = svm_rdmsr(vcpu, num, &result, retu); if (error == 0) { state = svm_get_vmcb_state(vcpu); ctx = svm_get_guest_regctx(vcpu); state->rax = result & 0xffffffff; ctx->sctx_rdx = result >> 32; } return (error); } #ifdef KTR static const char * exit_reason_to_str(uint64_t reason) { int i; static char reasonbuf[32]; static const struct { int reason; const char *str; } reasons[] = { { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, { .reason = VMCB_EXIT_HLT, .str = "hlt" }, { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, { .reason = VMCB_EXIT_IO, .str = "inout" }, { .reason = VMCB_EXIT_MC, .str = "mchk" }, { .reason = VMCB_EXIT_INTR, .str = "extintr" }, { .reason = VMCB_EXIT_NMI, .str = "nmi" }, { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, { .reason = VMCB_EXIT_MSR, .str = "msr" }, { .reason = VMCB_EXIT_IRET, .str = "iret" }, { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, { .reason = VMCB_EXIT_STGI, .str = "stgi" }, { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, { .reason = VMCB_EXIT_INVD, .str = "invd" }, { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, { .reason = VMCB_EXIT_POPF, .str = "popf" }, { .reason = VMCB_EXIT_PUSHF, .str = "pushf" }, }; for (i = 0; i < nitems(reasons); i++) { if (reasons[i].reason == reason) return (reasons[i].str); } snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); return (reasonbuf); } #endif /* KTR */ /* * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs * that are due to instruction intercepts as well as MSR and IOIO intercepts * and exceptions caused by INT3, INTO and BOUND instructions. * * Return 1 if the nRIP is valid and 0 otherwise. */ static int nrip_valid(uint64_t exitcode) { switch (exitcode) { case 0x00 ... 0x0F: /* read of CR0 through CR15 */ case 0x10 ... 0x1F: /* write of CR0 through CR15 */ case 0x20 ... 0x2F: /* read of DR0 through DR15 */ case 0x30 ... 0x3F: /* write of DR0 through DR15 */ case 0x43: /* INT3 */ case 0x44: /* INTO */ case 0x45: /* BOUND */ case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ return (1); default: return (0); } } static int svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu, struct vm_exit *vmexit) { struct vmcb *vmcb; struct vmcb_state *state; struct vmcb_ctrl *ctrl; struct svm_regctx *ctx; uint64_t code, info1, info2, val; uint32_t eax, ecx, edx; int error __diagused, errcode_valid, handled, idtvec, reflect; bool retu; ctx = svm_get_guest_regctx(vcpu); vmcb = svm_get_vmcb(vcpu); state = &vmcb->state; ctrl = &vmcb->ctrl; handled = 0; code = ctrl->exitcode; info1 = ctrl->exitinfo1; info2 = ctrl->exitinfo2; vmexit->exitcode = VM_EXITCODE_BOGUS; vmexit->rip = state->rip; vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); /* * #VMEXIT(INVALID) needs to be handled early because the VMCB is * in an inconsistent state and can trigger assertions that would * never happen otherwise. */ if (code == VMCB_EXIT_INVALID) { vm_exit_svm(vmexit, code, info1, info2); return (0); } KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " "injection valid bit is set %#lx", __func__, ctrl->eventinj)); KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", vmexit->inst_length, code, info1, info2)); svm_update_virqinfo(vcpu); svm_save_intinfo(svm_sc, vcpu); switch (code) { case VMCB_EXIT_IRET: /* * Restart execution at "iret" but with the intercept cleared. */ vmexit->inst_length = 0; clear_nmi_blocking(vcpu); handled = 1; break; case VMCB_EXIT_VINTR: /* interrupt window exiting */ vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); handled = 1; break; case VMCB_EXIT_INTR: /* external interrupt */ vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); handled = 1; break; case VMCB_EXIT_NMI: /* external NMI */ handled = 1; break; case 0x40 ... 0x5F: vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); reflect = 1; idtvec = code - 0x40; switch (idtvec) { case IDT_MC: /* * Call the machine check handler by hand. Also don't * reflect the machine check back into the guest. */ reflect = 0; SVM_CTR0(vcpu, "Vectoring to MCE handler"); __asm __volatile("int $18"); break; case IDT_PF: error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2); KASSERT(error == 0, ("%s: error %d updating cr2", __func__, error)); /* fallthru */ case IDT_NP: case IDT_SS: case IDT_GP: case IDT_AC: case IDT_TS: errcode_valid = 1; break; case IDT_DF: errcode_valid = 1; info1 = 0; break; case IDT_DB: { /* * Check if we are being stepped (RFLAGS.TF) * and bounce vmexit to userland. */ bool stepped = 0; uint64_t dr6 = 0; svm_getreg(vcpu, VM_REG_GUEST_DR6, &dr6); stepped = !!(dr6 & DBREG_DR6_BS); if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) { vmexit->exitcode = VM_EXITCODE_DB; vmexit->u.dbg.trace_trap = 1; vmexit->u.dbg.pushf_intercept = 0; if (vcpu->dbg.popf_sstep) { /* * DB# exit was caused by stepping over * popf. */ uint64_t rflags; vcpu->dbg.popf_sstep = 0; /* * Update shadowed TF bit so the next * setcap(..., RFLAGS_SSTEP, 0) restores * the correct value */ svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); vcpu->dbg.rflags_tf = rflags & PSL_T; } else if (vcpu->dbg.pushf_sstep) { /* * DB# exit was caused by stepping over * pushf. */ vcpu->dbg.pushf_sstep = 0; /* * Adjusting the pushed rflags after a * restarted pushf instruction must be * handled outside of svm.c due to the * critical_enter() lock being held. */ vmexit->u.dbg.pushf_intercept = 1; vmexit->u.dbg.tf_shadow_val = vcpu->dbg.rflags_tf; svm_paging_info(svm_get_vmcb(vcpu), &vmexit->u.dbg.paging); } /* Clear DR6 "single-step" bit. */ dr6 &= ~DBREG_DR6_BS; error = svm_setreg(vcpu, VM_REG_GUEST_DR6, dr6); KASSERT(error == 0, ("%s: error %d updating DR6\r\n", __func__, error)); reflect = 0; } break; } case IDT_BP: vmexit->exitcode = VM_EXITCODE_BPT; vmexit->u.bpt.inst_length = vmexit->inst_length; vmexit->inst_length = 0; reflect = 0; break; case IDT_OF: case IDT_BR: /* * The 'nrip' field is populated for INT3, INTO and * BOUND exceptions and this also implies that * 'inst_length' is non-zero. * * Reset 'inst_length' to zero so the guest %rip at * event injection is identical to what it was when * the exception originally happened. */ SVM_CTR2(vcpu, "Reset inst_length from %d " "to zero before injecting exception %d", vmexit->inst_length, idtvec); vmexit->inst_length = 0; /* fallthru */ default: errcode_valid = 0; info1 = 0; break; } if (reflect) { KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " "when reflecting exception %d into guest", vmexit->inst_length, idtvec)); /* Reflect the exception back into the guest */ SVM_CTR2(vcpu, "Reflecting exception " "%d/%#x into the guest", idtvec, (int)info1); error = vm_inject_exception(vcpu->vcpu, idtvec, errcode_valid, info1, 0); KASSERT(error == 0, ("%s: vm_inject_exception error %d", __func__, error)); handled = 1; } break; case VMCB_EXIT_MSR: /* MSR access. */ eax = state->rax; ecx = ctx->sctx_rcx; edx = ctx->sctx_rdx; retu = false; if (info1) { vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); val = (uint64_t)edx << 32 | eax; SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val); if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { vmexit->exitcode = VM_EXITCODE_WRMSR; vmexit->u.msr.code = ecx; vmexit->u.msr.wval = val; } else if (!retu) { handled = 1; } else { KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, ("emulate_wrmsr retu with bogus exitcode")); } } else { SVM_CTR1(vcpu, "rdmsr %#x", ecx); vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); if (emulate_rdmsr(vcpu, ecx, &retu)) { vmexit->exitcode = VM_EXITCODE_RDMSR; vmexit->u.msr.code = ecx; } else if (!retu) { handled = 1; } else { KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, ("emulate_rdmsr retu with bogus exitcode")); } } break; case VMCB_EXIT_IO: handled = svm_handle_io(vcpu, vmexit); vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); break; case VMCB_EXIT_CPUID: vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); handled = x86_emulate_cpuid(vcpu->vcpu, &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx); break; case VMCB_EXIT_HLT: vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); vmexit->exitcode = VM_EXITCODE_HLT; vmexit->u.hlt.rflags = state->rflags; break; case VMCB_EXIT_PAUSE: vmexit->exitcode = VM_EXITCODE_PAUSE; vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); break; case VMCB_EXIT_NPF: /* EXITINFO2 contains the faulting guest physical address */ if (info1 & VMCB_NPF_INFO1_RSV) { SVM_CTR2(vcpu, "nested page fault with " "reserved bits set: info1(%#lx) info2(%#lx)", info1, info2); - } else if (vm_mem_allocated(vcpu->vcpu, info2)) { + } else if (vm_mem_allocated(vcpu->vcpu, info2) || + ppt_is_mmio(svm_sc->vm, info2)) { vmexit->exitcode = VM_EXITCODE_PAGING; vmexit->u.paging.gpa = info2; vmexit->u.paging.fault_type = npf_fault_type(info1); vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); SVM_CTR3(vcpu, "nested page fault " "on gpa %#lx/%#lx at rip %#lx", info2, info1, state->rip); } else if (svm_npf_emul_fault(info1)) { svm_handle_inst_emul(vmcb, info2, vmexit); vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); SVM_CTR3(vcpu, "inst_emul fault " "for gpa %#lx/%#lx at rip %#lx", info2, info1, state->rip); } break; case VMCB_EXIT_MONITOR: vmexit->exitcode = VM_EXITCODE_MONITOR; break; case VMCB_EXIT_MWAIT: vmexit->exitcode = VM_EXITCODE_MWAIT; break; case VMCB_EXIT_PUSHF: { if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { uint64_t rflags; svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); /* Restart this instruction. */ vmexit->inst_length = 0; /* Disable PUSHF intercepts - avoid a loop. */ svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PUSHF, 0); /* Trace restarted instruction. */ svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); /* Let the IDT_DB handler know that pushf was stepped. */ vcpu->dbg.pushf_sstep = 1; handled = 1; } break; } case VMCB_EXIT_POPF: { if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { uint64_t rflags; svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); /* Restart this instruction */ vmexit->inst_length = 0; /* Disable POPF intercepts - avoid a loop*/ svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_POPF, 0); /* Trace restarted instruction */ svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); vcpu->dbg.popf_sstep = 1; handled = 1; } break; } case VMCB_EXIT_SHUTDOWN: case VMCB_EXIT_VMRUN: case VMCB_EXIT_VMMCALL: case VMCB_EXIT_VMLOAD: case VMCB_EXIT_VMSAVE: case VMCB_EXIT_STGI: case VMCB_EXIT_CLGI: case VMCB_EXIT_SKINIT: case VMCB_EXIT_ICEBP: case VMCB_EXIT_INVLPGA: vm_inject_ud(vcpu->vcpu); handled = 1; break; case VMCB_EXIT_INVD: case VMCB_EXIT_WBINVD: /* ignore exit */ handled = 1; break; default: vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); break; } SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d", handled ? "handled" : "unhandled", exit_reason_to_str(code), vmexit->rip, vmexit->inst_length); if (handled) { vmexit->rip += vmexit->inst_length; vmexit->inst_length = 0; state->rip = vmexit->rip; } else { if (vmexit->exitcode == VM_EXITCODE_BOGUS) { /* * If this VM exit was not claimed by anybody then * treat it as a generic SVM exit. */ vm_exit_svm(vmexit, code, info1, info2); } else { /* * The exitcode and collateral have been populated. * The VM exit will be processed further in userland. */ } } return (handled); } static void svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) { uint64_t intinfo; if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) return; KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " "valid: %#lx", __func__, intinfo)); svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo), VMCB_EXITINTINFO_VECTOR(intinfo), VMCB_EXITINTINFO_EC(intinfo), VMCB_EXITINTINFO_EC_VALID(intinfo)); vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo); } /* * Inject event to virtual cpu. */ static void svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu, struct vlapic *vlapic) { struct vmcb_ctrl *ctrl; struct vmcb_state *state; uint8_t v_tpr; int vector, need_intr_window; int extint_pending; if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) { return; } state = svm_get_vmcb_state(vcpu); ctrl = svm_get_vmcb_ctrl(vcpu); need_intr_window = 0; if (vcpu->nextrip != state->rip) { ctrl->intr_shadow = 0; SVM_CTR2(vcpu, "Guest interrupt blocking " "cleared due to rip change: %#lx/%#lx", vcpu->nextrip, state->rip); } /* * Inject pending events or exceptions for this vcpu. * * An event might be pending because the previous #VMEXIT happened * during event delivery (i.e. ctrl->exitintinfo). * * An event might also be pending because an exception was injected * by the hypervisor (e.g. #PF during instruction emulation). */ svm_inj_intinfo(sc, vcpu); /* NMI event has priority over interrupts. */ if (vm_nmi_pending(vcpu->vcpu)) { if (nmi_blocked(vcpu)) { /* * Can't inject another NMI if the guest has not * yet executed an "iret" after the last NMI. */ SVM_CTR0(vcpu, "Cannot inject NMI due " "to NMI-blocking"); } else if (ctrl->intr_shadow) { /* * Can't inject an NMI if the vcpu is in an intr_shadow. */ SVM_CTR0(vcpu, "Cannot inject NMI due to " "interrupt shadow"); need_intr_window = 1; goto done; } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { /* * If there is already an exception/interrupt pending * then defer the NMI until after that. */ SVM_CTR1(vcpu, "Cannot inject NMI due to " "eventinj %#lx", ctrl->eventinj); /* * Use self-IPI to trigger a VM-exit as soon as * possible after the event injection is completed. * * This works only if the external interrupt exiting * is at a lower priority than the event injection. * * Although not explicitly specified in APMv2 the * relative priorities were verified empirically. */ ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ } else { vm_nmi_clear(vcpu->vcpu); /* Inject NMI, vector number is not used */ svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI, IDT_NMI, 0, false); /* virtual NMI blocking is now in effect */ enable_nmi_blocking(vcpu); SVM_CTR0(vcpu, "Injecting vNMI"); } } extint_pending = vm_extint_pending(vcpu->vcpu); if (!extint_pending) { if (!vlapic_pending_intr(vlapic, &vector)) goto done; KASSERT(vector >= 16 && vector <= 255, ("invalid vector %d from local APIC", vector)); } else { /* Ask the legacy pic for a vector to inject */ vatpic_pending_intr(sc->vm, &vector); KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", vector)); } /* * If the guest has disabled interrupts or is in an interrupt shadow * then we cannot inject the pending interrupt. */ if ((state->rflags & PSL_I) == 0) { SVM_CTR2(vcpu, "Cannot inject vector %d due to " "rflags %#lx", vector, state->rflags); need_intr_window = 1; goto done; } if (ctrl->intr_shadow) { SVM_CTR1(vcpu, "Cannot inject vector %d due to " "interrupt shadow", vector); need_intr_window = 1; goto done; } if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { SVM_CTR2(vcpu, "Cannot inject vector %d due to " "eventinj %#lx", vector, ctrl->eventinj); need_intr_window = 1; goto done; } svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); if (!extint_pending) { vlapic_intr_accepted(vlapic, vector); } else { vm_extint_clear(vcpu->vcpu); vatpic_intr_accepted(sc->vm, vector); } /* * Force a VM-exit as soon as the vcpu is ready to accept another * interrupt. This is done because the PIC might have another vector * that it wants to inject. Also, if the APIC has a pending interrupt * that was preempted by the ExtInt then it allows us to inject the * APIC vector as soon as possible. */ need_intr_window = 1; done: /* * The guest can modify the TPR by writing to %CR8. In guest mode * the processor reflects this write to V_TPR without hypervisor * intervention. * * The guest can also modify the TPR by writing to it via the memory * mapped APIC page. In this case, the write will be emulated by the * hypervisor. For this reason V_TPR must be updated before every * VMRUN. */ v_tpr = vlapic_get_cr8(vlapic); KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); if (ctrl->v_tpr != v_tpr) { SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x", ctrl->v_tpr, v_tpr); ctrl->v_tpr = v_tpr; svm_set_dirty(vcpu, VMCB_CACHE_TPR); } if (need_intr_window) { /* * We use V_IRQ in conjunction with the VINTR intercept to * trap into the hypervisor as soon as a virtual interrupt * can be delivered. * * Since injected events are not subject to intercept checks * we need to ensure that the V_IRQ is not actually going to * be delivered on VM entry. The KASSERT below enforces this. */ KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, ("Bogus intr_window_exiting: eventinj (%#lx), " "intr_shadow (%u), rflags (%#lx)", ctrl->eventinj, ctrl->intr_shadow, state->rflags)); enable_intr_window_exiting(vcpu); } else { disable_intr_window_exiting(vcpu); } } static __inline void restore_host_tss(void) { struct system_segment_descriptor *tss_sd; /* * The TSS descriptor was in use prior to launching the guest so it * has been marked busy. * * 'ltr' requires the descriptor to be marked available so change the * type to "64-bit available TSS". */ tss_sd = PCPU_GET(tss); tss_sd->sd_type = SDT_SYSTSS; ltr(GSEL(GPROC0_SEL, SEL_KPL)); } static void svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap) { struct vmcb_ctrl *ctrl; long eptgen; int cpu; bool alloc_asid; cpu = curcpu; CPU_SET_ATOMIC(cpu, &pmap->pm_active); smr_enter(pmap->pm_eptsmr); ctrl = svm_get_vmcb_ctrl(vcpu); /* * The TLB entries associated with the vcpu's ASID are not valid * if either of the following conditions is true: * * 1. The vcpu's ASID generation is different than the host cpu's * ASID generation. This happens when the vcpu migrates to a new * host cpu. It can also happen when the number of vcpus executing * on a host cpu is greater than the number of ASIDs available. * * 2. The pmap generation number is different than the value cached in * the 'vcpustate'. This happens when the host invalidates pages * belonging to the guest. * * asidgen eptgen Action * mismatch mismatch * 0 0 (a) * 0 1 (b1) or (b2) * 1 0 (c) * 1 1 (d) * * (a) There is no mismatch in eptgen or ASID generation and therefore * no further action is needed. * * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is * retained and the TLB entries associated with this ASID * are flushed by VMRUN. * * (b2) If the cpu does not support FlushByAsid then a new ASID is * allocated. * * (c) A new ASID is allocated. * * (d) A new ASID is allocated. */ alloc_asid = false; eptgen = atomic_load_long(&pmap->pm_eptgen); ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; if (vcpu->asid.gen != asid[cpu].gen) { alloc_asid = true; /* (c) and (d) */ } else if (vcpu->eptgen != eptgen) { if (flush_by_asid()) ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ else alloc_asid = true; /* (b2) */ } else { /* * This is the common case (a). */ KASSERT(!alloc_asid, ("ASID allocation not necessary")); KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); } if (alloc_asid) { if (++asid[cpu].num >= nasid) { asid[cpu].num = 1; if (++asid[cpu].gen == 0) asid[cpu].gen = 1; /* * If this cpu does not support "flush-by-asid" * then flush the entire TLB on a generation * bump. Subsequent ASID allocation in this * generation can be done without a TLB flush. */ if (!flush_by_asid()) ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; } vcpu->asid.gen = asid[cpu].gen; vcpu->asid.num = asid[cpu].num; ctrl->asid = vcpu->asid.num; svm_set_dirty(vcpu, VMCB_CACHE_ASID); /* * If this cpu supports "flush-by-asid" then the TLB * was not flushed after the generation bump. The TLB * is flushed selectively after every new ASID allocation. */ if (flush_by_asid()) ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; } vcpu->eptgen = eptgen; KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); KASSERT(ctrl->asid == vcpu->asid.num, ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); } static void svm_pmap_deactivate(pmap_t pmap) { smr_exit(pmap->pm_eptsmr); CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); } static __inline void disable_gintr(void) { __asm __volatile("clgi"); } static __inline void enable_gintr(void) { __asm __volatile("stgi"); } static __inline void svm_dr_enter_guest(struct svm_regctx *gctx) { /* Save host control debug registers. */ gctx->host_dr7 = rdr7(); gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); /* * Disable debugging in DR7 and DEBUGCTL to avoid triggering * exceptions in the host based on the guest DRx values. The * guest DR6, DR7, and DEBUGCTL are saved/restored in the * VMCB. */ load_dr7(0); wrmsr(MSR_DEBUGCTLMSR, 0); /* Save host debug registers. */ gctx->host_dr0 = rdr0(); gctx->host_dr1 = rdr1(); gctx->host_dr2 = rdr2(); gctx->host_dr3 = rdr3(); gctx->host_dr6 = rdr6(); /* Restore guest debug registers. */ load_dr0(gctx->sctx_dr0); load_dr1(gctx->sctx_dr1); load_dr2(gctx->sctx_dr2); load_dr3(gctx->sctx_dr3); } static __inline void svm_dr_leave_guest(struct svm_regctx *gctx) { /* Save guest debug registers. */ gctx->sctx_dr0 = rdr0(); gctx->sctx_dr1 = rdr1(); gctx->sctx_dr2 = rdr2(); gctx->sctx_dr3 = rdr3(); /* * Restore host debug registers. Restore DR7 and DEBUGCTL * last. */ load_dr0(gctx->host_dr0); load_dr1(gctx->host_dr1); load_dr2(gctx->host_dr2); load_dr3(gctx->host_dr3); load_dr6(gctx->host_dr6); wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); load_dr7(gctx->host_dr7); } /* * Start vcpu with specified RIP. */ static int svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) { struct svm_regctx *gctx; struct svm_softc *svm_sc; struct svm_vcpu *vcpu; struct vmcb_state *state; struct vmcb_ctrl *ctrl; struct vm_exit *vmexit; struct vlapic *vlapic; uint64_t vmcb_pa; int handled; uint16_t ldt_sel; vcpu = vcpui; svm_sc = vcpu->sc; state = svm_get_vmcb_state(vcpu); ctrl = svm_get_vmcb_ctrl(vcpu); vmexit = vm_exitinfo(vcpu->vcpu); vlapic = vm_lapic(vcpu->vcpu); gctx = svm_get_guest_regctx(vcpu); vmcb_pa = vcpu->vmcb_pa; if (vcpu->lastcpu != curcpu) { /* * Force new ASID allocation by invalidating the generation. */ vcpu->asid.gen = 0; /* * Invalidate the VMCB state cache by marking all fields dirty. */ svm_set_dirty(vcpu, 0xffffffff); /* * XXX * Setting 'vcpu->lastcpu' here is bit premature because * we may return from this function without actually executing * the VMRUN instruction. This could happen if a rendezvous * or an AST is pending on the first time through the loop. * * This works for now but any new side-effects of vcpu * migration should take this case into account. */ vcpu->lastcpu = curcpu; vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); } svm_msr_guest_enter(vcpu); /* Update Guest RIP */ state->rip = rip; do { /* * Disable global interrupts to guarantee atomicity during * loading of guest state. This includes not only the state * loaded by the "vmrun" instruction but also software state * maintained by the hypervisor: suspended and rendezvous * state, NPT generation number, vlapic interrupts etc. */ disable_gintr(); if (vcpu_suspended(evinfo)) { enable_gintr(); vm_exit_suspended(vcpu->vcpu, state->rip); break; } if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { enable_gintr(); vm_exit_rendezvous(vcpu->vcpu, state->rip); break; } if (vcpu_reqidle(evinfo)) { enable_gintr(); vm_exit_reqidle(vcpu->vcpu, state->rip); break; } /* We are asked to give the cpu by scheduler. */ if (vcpu_should_yield(vcpu->vcpu)) { enable_gintr(); vm_exit_astpending(vcpu->vcpu, state->rip); break; } if (vcpu_debugged(vcpu->vcpu)) { enable_gintr(); vm_exit_debug(vcpu->vcpu, state->rip); break; } /* * #VMEXIT resumes the host with the guest LDTR, so * save the current LDT selector so it can be restored * after an exit. The userspace hypervisor probably * doesn't use a LDT, but save and restore it to be * safe. */ ldt_sel = sldt(); svm_inj_interrupts(svm_sc, vcpu, vlapic); /* * Check the pmap generation and the ASID generation to * ensure that the vcpu does not use stale TLB mappings. */ svm_pmap_activate(vcpu, pmap); ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; vcpu->dirty = 0; SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); /* Launch Virtual Machine. */ SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); svm_dr_enter_guest(gctx); svm_launch(vmcb_pa, gctx, get_pcpu()); svm_dr_leave_guest(gctx); svm_pmap_deactivate(pmap); /* * The host GDTR and IDTR is saved by VMRUN and restored * automatically on #VMEXIT. However, the host TSS needs * to be restored explicitly. */ restore_host_tss(); /* Restore host LDTR. */ lldt(ldt_sel); /* #VMEXIT disables interrupts so re-enable them here. */ enable_gintr(); /* Update 'nextrip' */ vcpu->nextrip = state->rip; /* Handle #VMEXIT and if required return to user space. */ handled = svm_vmexit(svm_sc, vcpu, vmexit); } while (handled); svm_msr_guest_exit(vcpu); return (0); } static void svm_vcpu_cleanup(void *vcpui) { struct svm_vcpu *vcpu = vcpui; free(vcpu->vmcb, M_SVM); free(vcpu, M_SVM); } static void svm_cleanup(void *vmi) { struct svm_softc *sc = vmi; free(sc->iopm_bitmap, M_SVM); free(sc->msr_bitmap, M_SVM); free(sc, M_SVM); } static register_t * swctx_regptr(struct svm_regctx *regctx, int reg) { switch (reg) { case VM_REG_GUEST_RBX: return (®ctx->sctx_rbx); case VM_REG_GUEST_RCX: return (®ctx->sctx_rcx); case VM_REG_GUEST_RDX: return (®ctx->sctx_rdx); case VM_REG_GUEST_RDI: return (®ctx->sctx_rdi); case VM_REG_GUEST_RSI: return (®ctx->sctx_rsi); case VM_REG_GUEST_RBP: return (®ctx->sctx_rbp); case VM_REG_GUEST_R8: return (®ctx->sctx_r8); case VM_REG_GUEST_R9: return (®ctx->sctx_r9); case VM_REG_GUEST_R10: return (®ctx->sctx_r10); case VM_REG_GUEST_R11: return (®ctx->sctx_r11); case VM_REG_GUEST_R12: return (®ctx->sctx_r12); case VM_REG_GUEST_R13: return (®ctx->sctx_r13); case VM_REG_GUEST_R14: return (®ctx->sctx_r14); case VM_REG_GUEST_R15: return (®ctx->sctx_r15); case VM_REG_GUEST_DR0: return (®ctx->sctx_dr0); case VM_REG_GUEST_DR1: return (®ctx->sctx_dr1); case VM_REG_GUEST_DR2: return (®ctx->sctx_dr2); case VM_REG_GUEST_DR3: return (®ctx->sctx_dr3); default: return (NULL); } } static int svm_getreg(void *vcpui, int ident, uint64_t *val) { struct svm_vcpu *vcpu; register_t *reg; vcpu = vcpui; if (ident == VM_REG_GUEST_INTR_SHADOW) { return (svm_get_intr_shadow(vcpu, val)); } if (vmcb_read(vcpu, ident, val) == 0) { return (0); } reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); if (reg != NULL) { *val = *reg; return (0); } SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident); return (EINVAL); } static int svm_setreg(void *vcpui, int ident, uint64_t val) { struct svm_vcpu *vcpu; register_t *reg; vcpu = vcpui; if (ident == VM_REG_GUEST_INTR_SHADOW) { return (svm_modify_intr_shadow(vcpu, val)); } /* Do not permit user write access to VMCB fields by offset. */ if (!VMCB_ACCESS_OK(ident)) { if (vmcb_write(vcpu, ident, val) == 0) { return (0); } } reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); if (reg != NULL) { *reg = val; return (0); } if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) { /* Ignore. */ return (0); } /* * XXX deal with CR3 and invalidate TLB entries tagged with the * vcpu's ASID. This needs to be treated differently depending on * whether 'running' is true/false. */ SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident); return (EINVAL); } static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc) { return (vmcb_getdesc(vcpui, reg, desc)); } static int svm_setdesc(void *vcpui, int reg, struct seg_desc *desc) { return (vmcb_setdesc(vcpui, reg, desc)); } #ifdef BHYVE_SNAPSHOT static int svm_snapshot_reg(void *vcpui, int ident, struct vm_snapshot_meta *meta) { int ret; uint64_t val; if (meta->op == VM_SNAPSHOT_SAVE) { ret = svm_getreg(vcpui, ident, &val); if (ret != 0) goto done; SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); } else if (meta->op == VM_SNAPSHOT_RESTORE) { SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); ret = svm_setreg(vcpui, ident, val); if (ret != 0) goto done; } else { ret = EINVAL; goto done; } done: return (ret); } #endif static int svm_setcap(void *vcpui, int type, int val) { struct svm_vcpu *vcpu; struct vlapic *vlapic; int error; vcpu = vcpui; error = 0; switch (type) { case VM_CAP_HALT_EXIT: svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_HLT, val); break; case VM_CAP_PAUSE_EXIT: svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PAUSE, val); break; case VM_CAP_UNRESTRICTED_GUEST: /* Unrestricted guest execution cannot be disabled in SVM */ if (val == 0) error = EINVAL; break; case VM_CAP_BPT_EXIT: svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP), val); break; case VM_CAP_IPI_EXIT: vlapic = vm_lapic(vcpu->vcpu); vlapic->ipi_exit = val; break; case VM_CAP_MASK_HWINTR: vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR); vcpu->caps |= (val << VM_CAP_MASK_HWINTR); break; case VM_CAP_RFLAGS_TF: { uint64_t rflags; /* Fetch RFLAGS. */ if (svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags)) { error = (EINVAL); break; } if (val) { /* Save current TF bit. */ vcpu->dbg.rflags_tf = rflags & PSL_T; /* Trace next instruction. */ if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T))) { error = (EINVAL); break; } vcpu->caps |= (1 << VM_CAP_RFLAGS_TF); } else { /* * Restore shadowed RFLAGS.TF only if vCPU was * previously stepped */ if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { rflags &= ~PSL_T; rflags |= vcpu->dbg.rflags_tf; vcpu->dbg.rflags_tf = 0; if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, rflags)) { error = (EINVAL); break; } vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF); } } svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_DB), val); svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_POPF, val); svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PUSHF, val); break; } default: error = ENOENT; break; } return (error); } static int svm_getcap(void *vcpui, int type, int *retval) { struct svm_vcpu *vcpu; struct vlapic *vlapic; int error; vcpu = vcpui; error = 0; switch (type) { case VM_CAP_HALT_EXIT: *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_HLT); break; case VM_CAP_PAUSE_EXIT: *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PAUSE); break; case VM_CAP_UNRESTRICTED_GUEST: *retval = 1; /* unrestricted guest is always enabled */ break; case VM_CAP_BPT_EXIT: *retval = svm_get_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP)); break; case VM_CAP_IPI_EXIT: vlapic = vm_lapic(vcpu->vcpu); *retval = vlapic->ipi_exit; break; case VM_CAP_RFLAGS_TF: *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF)); break; case VM_CAP_MASK_HWINTR: *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR)); break; default: error = ENOENT; break; } return (error); } static struct vmspace * svm_vmspace_alloc(vm_offset_t min, vm_offset_t max) { return (svm_npt_alloc(min, max)); } static void svm_vmspace_free(struct vmspace *vmspace) { svm_npt_free(vmspace); } static struct vlapic * svm_vlapic_init(void *vcpui) { struct svm_vcpu *vcpu; struct vlapic *vlapic; vcpu = vcpui; vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); vlapic->vm = vcpu->sc->vm; vlapic->vcpu = vcpu->vcpu; vlapic->vcpuid = vcpu->vcpuid; vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, M_WAITOK | M_ZERO); vlapic_init(vlapic); return (vlapic); } static void svm_vlapic_cleanup(struct vlapic *vlapic) { vlapic_cleanup(vlapic); free(vlapic->apic_page, M_SVM_VLAPIC); free(vlapic, M_SVM_VLAPIC); } #ifdef BHYVE_SNAPSHOT static int svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta) { struct svm_vcpu *vcpu; int err, running, hostcpu; vcpu = vcpui; err = 0; running = vcpu_is_running(vcpu->vcpu, &hostcpu); if (running && hostcpu != curcpu) { printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), vcpu->vcpuid); return (EINVAL); } err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta); err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta); err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta); err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta); err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta); err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta); err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta); err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta); err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta); err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta); /* Guest segments */ /* ES */ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta); err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta); /* CS */ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta); err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta); /* SS */ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta); err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta); /* DS */ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta); err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta); /* FS */ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta); err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta); /* GS */ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta); err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta); /* TR */ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta); err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta); /* LDTR */ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta); err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta); /* EFER */ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta); /* IDTR and GDTR */ err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta); err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta); /* Specific AMD registers */ err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_ASID, 4), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_CPL, 1), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_STAR, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta); err += vmcb_snapshot_any(vcpu, VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta); if (err != 0) goto done; /* Snapshot swctx for virtual cpu */ SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); /* Restore other svm_vcpu struct fields */ /* Restore NEXTRIP field */ SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); /* Restore lastcpu field */ SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); /* Restore EPTGEN field - EPT is Extended Page Table */ SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done); /* Set all caches dirty */ if (meta->op == VM_SNAPSHOT_RESTORE) svm_set_dirty(vcpu, 0xffffffff); done: return (err); } static int svm_restore_tsc(void *vcpui, uint64_t offset) { struct svm_vcpu *vcpu = vcpui; svm_set_tsc_offset(vcpu, offset); return (0); } #endif const struct vmm_ops vmm_ops_amd = { .modinit = svm_modinit, .modcleanup = svm_modcleanup, .modresume = svm_modresume, .modsuspend = svm_modsuspend, .init = svm_init, .run = svm_run, .cleanup = svm_cleanup, .vcpu_init = svm_vcpu_init, .vcpu_cleanup = svm_vcpu_cleanup, .getreg = svm_getreg, .setreg = svm_setreg, .getdesc = svm_getdesc, .setdesc = svm_setdesc, .getcap = svm_getcap, .setcap = svm_setcap, .vmspace_alloc = svm_vmspace_alloc, .vmspace_free = svm_vmspace_free, .vlapic_init = svm_vlapic_init, .vlapic_cleanup = svm_vlapic_cleanup, #ifdef BHYVE_SNAPSHOT .vcpu_snapshot = svm_vcpu_snapshot, .restore_tsc = svm_restore_tsc, #endif }; diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c index 13a53fa8eed6..957217ab2258 100644 --- a/sys/amd64/vmm/intel/vmx.c +++ b/sys/amd64/vmm/intel/vmx.c @@ -1,4303 +1,4305 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 NetApp, Inc. * All rights reserved. * Copyright (c) 2018 Joyent, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_bhyve_snapshot.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include "vmm_lapic.h" #include "vmm_host.h" #include "vmm_ioport.h" #include "vmm_stat.h" #include "vatpic.h" #include "vlapic.h" #include "vlapic_priv.h" #include "ept.h" #include "vmx_cpufunc.h" #include "vmx.h" #include "vmx_msr.h" #include "x86.h" #include "vmx_controls.h" +#include "io/ppt.h" #define PINBASED_CTLS_ONE_SETTING \ (PINBASED_EXTINT_EXITING | \ PINBASED_NMI_EXITING | \ PINBASED_VIRTUAL_NMI) #define PINBASED_CTLS_ZERO_SETTING 0 #define PROCBASED_CTLS_WINDOW_SETTING \ (PROCBASED_INT_WINDOW_EXITING | \ PROCBASED_NMI_WINDOW_EXITING) #define PROCBASED_CTLS_ONE_SETTING \ (PROCBASED_SECONDARY_CONTROLS | \ PROCBASED_MWAIT_EXITING | \ PROCBASED_MONITOR_EXITING | \ PROCBASED_IO_EXITING | \ PROCBASED_MSR_BITMAPS | \ PROCBASED_CTLS_WINDOW_SETTING | \ PROCBASED_CR8_LOAD_EXITING | \ PROCBASED_CR8_STORE_EXITING) #define PROCBASED_CTLS_ZERO_SETTING \ (PROCBASED_CR3_LOAD_EXITING | \ PROCBASED_CR3_STORE_EXITING | \ PROCBASED_IO_BITMAPS) #define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT #define PROCBASED_CTLS2_ZERO_SETTING 0 #define VM_EXIT_CTLS_ONE_SETTING \ (VM_EXIT_SAVE_DEBUG_CONTROLS | \ VM_EXIT_HOST_LMA | \ VM_EXIT_SAVE_EFER | \ VM_EXIT_LOAD_EFER | \ VM_EXIT_ACKNOWLEDGE_INTERRUPT) #define VM_EXIT_CTLS_ZERO_SETTING 0 #define VM_ENTRY_CTLS_ONE_SETTING \ (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ VM_ENTRY_LOAD_EFER) #define VM_ENTRY_CTLS_ZERO_SETTING \ (VM_ENTRY_INTO_SMM | \ VM_ENTRY_DEACTIVATE_DUAL_MONITOR) #define HANDLED 1 #define UNHANDLED 0 static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); bool vmx_have_msr_tsc_aux; SYSCTL_DECL(_hw_vmm); SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); int vmxon_enabled[MAXCPU]; static uint8_t *vmxon_region; static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; static uint32_t exit_ctls, entry_ctls; static uint64_t cr0_ones_mask, cr0_zeros_mask; SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, &cr0_ones_mask, 0, NULL); SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, &cr0_zeros_mask, 0, NULL); static uint64_t cr4_ones_mask, cr4_zeros_mask; SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, &cr4_ones_mask, 0, NULL); SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, &cr4_zeros_mask, 0, NULL); static int vmx_initialized; SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, &vmx_initialized, 0, "Intel VMX initialized"); /* * Optional capabilities */ static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); static int cap_halt_exit; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0, "HLT triggers a VM-exit"); static int cap_pause_exit; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit, 0, "PAUSE triggers a VM-exit"); static int cap_wbinvd_exit; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, wbinvd_exit, CTLFLAG_RD, &cap_wbinvd_exit, 0, "WBINVD triggers a VM-exit"); static int cap_rdpid; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, rdpid, CTLFLAG_RD, &cap_rdpid, 0, "Guests are allowed to use RDPID"); static int cap_rdtscp; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, rdtscp, CTLFLAG_RD, &cap_rdtscp, 0, "Guests are allowed to use RDTSCP"); static int cap_unrestricted_guest; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD, &cap_unrestricted_guest, 0, "Unrestricted guests"); static int cap_monitor_trap; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD, &cap_monitor_trap, 0, "Monitor trap flag"); static int cap_invpcid; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, 0, "Guests are allowed to use INVPCID"); static int tpr_shadowing; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, tpr_shadowing, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &tpr_shadowing, 0, "TPR shadowing support"); static int virtual_interrupt_delivery; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); static int posted_interrupts; SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &posted_interrupts, 0, "APICv posted interrupt support"); static int pirvec = -1; SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, &pirvec, 0, "APICv posted interrupt vector"); static struct unrhdr *vpid_unr; static u_int vpid_alloc_failed; SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, &vpid_alloc_failed, 0, NULL); int guest_l1d_flush; SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &guest_l1d_flush, 0, NULL); int guest_l1d_flush_sw; SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &guest_l1d_flush_sw, 0, NULL); static struct msr_entry msr_load_list[1] __aligned(16); /* * The definitions of SDT probes for VMX. */ SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, "struct vmx *", "int", "struct vm_exit *", "uint64_t"); SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, "struct vmx *", "int", "struct vm_exit *", "uint32_t"); SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, "struct vmx *", "int", "struct vm_exit *", "uint32_t"); SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, "struct vmx *", "int", "struct vm_exit *", "uint64_t"); SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, "struct vmx *", "int", "struct vm_exit *"); SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, "struct vmx *", "int", "struct vm_exit *", "uint32_t"); SDT_PROBE_DEFINE4(vmm, vmx, exit, return, "struct vmx *", "int", "struct vm_exit *", "int"); /* * Use the last page below 4GB as the APIC access address. This address is * occupied by the boot firmware so it is guaranteed that it will not conflict * with a page in system memory. */ #define APIC_ACCESS_ADDRESS 0xFFFFF000 static int vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc); static int vmx_getreg(void *vcpui, int reg, uint64_t *retval); static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val); static void vmx_inject_pir(struct vlapic *vlapic); #ifdef BHYVE_SNAPSHOT static int vmx_restore_tsc(void *vcpui, uint64_t now); #endif static inline bool host_has_rdpid(void) { return ((cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0); } static inline bool host_has_rdtscp(void) { return ((amd_feature & AMDID_RDTSCP) != 0); } #ifdef KTR static const char * exit_reason_to_str(int reason) { static char reasonbuf[32]; switch (reason) { case EXIT_REASON_EXCEPTION: return "exception"; case EXIT_REASON_EXT_INTR: return "extint"; case EXIT_REASON_TRIPLE_FAULT: return "triplefault"; case EXIT_REASON_INIT: return "init"; case EXIT_REASON_SIPI: return "sipi"; case EXIT_REASON_IO_SMI: return "iosmi"; case EXIT_REASON_SMI: return "smi"; case EXIT_REASON_INTR_WINDOW: return "intrwindow"; case EXIT_REASON_NMI_WINDOW: return "nmiwindow"; case EXIT_REASON_TASK_SWITCH: return "taskswitch"; case EXIT_REASON_CPUID: return "cpuid"; case EXIT_REASON_GETSEC: return "getsec"; case EXIT_REASON_HLT: return "hlt"; case EXIT_REASON_INVD: return "invd"; case EXIT_REASON_INVLPG: return "invlpg"; case EXIT_REASON_RDPMC: return "rdpmc"; case EXIT_REASON_RDTSC: return "rdtsc"; case EXIT_REASON_RSM: return "rsm"; case EXIT_REASON_VMCALL: return "vmcall"; case EXIT_REASON_VMCLEAR: return "vmclear"; case EXIT_REASON_VMLAUNCH: return "vmlaunch"; case EXIT_REASON_VMPTRLD: return "vmptrld"; case EXIT_REASON_VMPTRST: return "vmptrst"; case EXIT_REASON_VMREAD: return "vmread"; case EXIT_REASON_VMRESUME: return "vmresume"; case EXIT_REASON_VMWRITE: return "vmwrite"; case EXIT_REASON_VMXOFF: return "vmxoff"; case EXIT_REASON_VMXON: return "vmxon"; case EXIT_REASON_CR_ACCESS: return "craccess"; case EXIT_REASON_DR_ACCESS: return "draccess"; case EXIT_REASON_INOUT: return "inout"; case EXIT_REASON_RDMSR: return "rdmsr"; case EXIT_REASON_WRMSR: return "wrmsr"; case EXIT_REASON_INVAL_VMCS: return "invalvmcs"; case EXIT_REASON_INVAL_MSR: return "invalmsr"; case EXIT_REASON_MWAIT: return "mwait"; case EXIT_REASON_MTF: return "mtf"; case EXIT_REASON_MONITOR: return "monitor"; case EXIT_REASON_PAUSE: return "pause"; case EXIT_REASON_MCE_DURING_ENTRY: return "mce-during-entry"; case EXIT_REASON_TPR: return "tpr"; case EXIT_REASON_APIC_ACCESS: return "apic-access"; case EXIT_REASON_GDTR_IDTR: return "gdtridtr"; case EXIT_REASON_LDTR_TR: return "ldtrtr"; case EXIT_REASON_EPT_FAULT: return "eptfault"; case EXIT_REASON_EPT_MISCONFIG: return "eptmisconfig"; case EXIT_REASON_INVEPT: return "invept"; case EXIT_REASON_RDTSCP: return "rdtscp"; case EXIT_REASON_VMX_PREEMPT: return "vmxpreempt"; case EXIT_REASON_INVVPID: return "invvpid"; case EXIT_REASON_WBINVD: return "wbinvd"; case EXIT_REASON_XSETBV: return "xsetbv"; case EXIT_REASON_APIC_WRITE: return "apic-write"; default: snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); return (reasonbuf); } } #endif /* KTR */ static int vmx_allow_x2apic_msrs(struct vmx *vmx) { int i, error; error = 0; /* * Allow readonly access to the following x2APIC MSRs from the guest. */ error += guest_msr_ro(vmx, MSR_APIC_ID); error += guest_msr_ro(vmx, MSR_APIC_VERSION); error += guest_msr_ro(vmx, MSR_APIC_LDR); error += guest_msr_ro(vmx, MSR_APIC_SVR); for (i = 0; i < 8; i++) error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); for (i = 0; i < 8; i++) error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); for (i = 0; i < 8; i++) error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); error += guest_msr_ro(vmx, MSR_APIC_ESR); error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); error += guest_msr_ro(vmx, MSR_APIC_ICR); /* * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. * * These registers get special treatment described in the section * "Virtualizing MSR-Based APIC Accesses". */ error += guest_msr_rw(vmx, MSR_APIC_TPR); error += guest_msr_rw(vmx, MSR_APIC_EOI); error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); return (error); } u_long vmx_fix_cr0(u_long cr0) { return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); } u_long vmx_fix_cr4(u_long cr4) { return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); } static void vpid_free(int vpid) { if (vpid < 0 || vpid > 0xffff) panic("vpid_free: invalid vpid %d", vpid); /* * VPIDs [0,vm_maxcpu] are special and are not allocated from * the unit number allocator. */ if (vpid > vm_maxcpu) free_unr(vpid_unr, vpid); } static uint16_t vpid_alloc(int vcpuid) { int x; /* * If the "enable vpid" execution control is not enabled then the * VPID is required to be 0 for all vcpus. */ if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) return (0); /* * Try to allocate a unique VPID for each from the unit number * allocator. */ x = alloc_unr(vpid_unr); if (x == -1) { atomic_add_int(&vpid_alloc_failed, 1); /* * If the unit number allocator does not have enough unique * VPIDs then we need to allocate from the [1,vm_maxcpu] range. * * These VPIDs are not be unique across VMs but this does not * affect correctness because the combined mappings are also * tagged with the EP4TA which is unique for each VM. * * It is still sub-optimal because the invvpid will invalidate * combined mappings for a particular VPID across all EP4TAs. */ return (vcpuid + 1); } return (x); } static void vpid_init(void) { /* * VPID 0 is required when the "enable VPID" execution control is * disabled. * * VPIDs [1,vm_maxcpu] are used as the "overflow namespace" when the * unit number allocator does not have sufficient unique VPIDs to * satisfy the allocation. * * The remaining VPIDs are managed by the unit number allocator. */ vpid_unr = new_unrhdr(vm_maxcpu + 1, 0xffff, NULL); } static void vmx_disable(void *arg __unused) { struct invvpid_desc invvpid_desc = { 0 }; struct invept_desc invept_desc = { 0 }; if (vmxon_enabled[curcpu]) { /* * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. * * VMXON or VMXOFF are not required to invalidate any TLB * caching structures. This prevents potential retention of * cached information in the TLB between distinct VMX episodes. */ invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); vmxoff(); } load_cr4(rcr4() & ~CR4_VMXE); } static int vmx_modcleanup(void) { if (pirvec >= 0) lapic_ipi_free(pirvec); if (vpid_unr != NULL) { delete_unrhdr(vpid_unr); vpid_unr = NULL; } if (nmi_flush_l1d_sw == 1) nmi_flush_l1d_sw = 0; smp_rendezvous(NULL, vmx_disable, NULL, NULL); if (vmxon_region != NULL) kmem_free(vmxon_region, (mp_maxid + 1) * PAGE_SIZE); return (0); } static void vmx_enable(void *arg __unused) { int error; uint64_t feature_control; feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | IA32_FEATURE_CONTROL_VMX_EN | IA32_FEATURE_CONTROL_LOCK); } load_cr4(rcr4() | CR4_VMXE); *(uint32_t *)&vmxon_region[curcpu * PAGE_SIZE] = vmx_revision(); error = vmxon(&vmxon_region[curcpu * PAGE_SIZE]); if (error == 0) vmxon_enabled[curcpu] = 1; } static void vmx_modsuspend(void) { if (vmxon_enabled[curcpu]) vmx_disable(NULL); } static void vmx_modresume(void) { if (vmxon_enabled[curcpu]) vmx_enable(NULL); } static int vmx_modinit(int ipinum) { int error; uint64_t basic, fixed0, fixed1, feature_control; uint32_t tmp, procbased2_vid_bits; /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ if (!(cpu_feature2 & CPUID2_VMX)) { printf("vmx_modinit: processor does not support VMX " "operation\n"); return (ENXIO); } /* * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits * are set (bits 0 and 2 respectively). */ feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 && (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { printf("vmx_modinit: VMX operation disabled by BIOS\n"); return (ENXIO); } /* * Verify capabilities MSR_VMX_BASIC: * - bit 54 indicates support for INS/OUTS decoding */ basic = rdmsr(MSR_VMX_BASIC); if ((basic & (1UL << 54)) == 0) { printf("vmx_modinit: processor does not support desired basic " "capabilities\n"); return (EINVAL); } /* Check support for primary processor-based VM-execution controls */ error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); if (error) { printf("vmx_modinit: processor does not support desired " "primary processor-based controls\n"); return (error); } /* Clear the processor-based ctl bits that are set on demand */ procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; /* Check support for secondary processor-based VM-execution controls */ error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); if (error) { printf("vmx_modinit: processor does not support desired " "secondary processor-based controls\n"); return (error); } /* Check support for VPID */ error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_VPID, 0, &tmp); if (error == 0) procbased_ctls2 |= PROCBASED2_ENABLE_VPID; /* Check support for pin-based VM-execution controls */ error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); if (error) { printf("vmx_modinit: processor does not support desired " "pin-based controls\n"); return (error); } /* Check support for VM-exit controls */ error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &exit_ctls); if (error) { printf("vmx_modinit: processor does not support desired " "exit controls\n"); return (error); } /* Check support for VM-entry controls */ error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &entry_ctls); if (error) { printf("vmx_modinit: processor does not support desired " "entry controls\n"); return (error); } /* * Check support for optional features by testing them * as individual bits */ cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_HLT_EXITING, 0, &tmp) == 0); cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_PROCBASED_CTLS, PROCBASED_MTF, 0, &tmp) == 0); cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_PAUSE_EXITING, 0, &tmp) == 0); cap_wbinvd_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, PROCBASED2_WBINVD_EXITING, 0, &tmp) == 0); /* * Check support for RDPID and/or RDTSCP. * * Support a pass-through-based implementation of these via the * "enable RDTSCP" VM-execution control and the "RDTSC exiting" * VM-execution control. * * The "enable RDTSCP" VM-execution control applies to both RDPID * and RDTSCP (see SDM volume 3, section 25.3, "Changes to * Instruction Behavior in VMX Non-root operation"); this is why * only this VM-execution control needs to be enabled in order to * enable passing through whichever of RDPID and/or RDTSCP are * supported by the host. * * The "RDTSC exiting" VM-execution control applies to both RDTSC * and RDTSCP (again, per SDM volume 3, section 25.3), and is * already set up for RDTSC and RDTSCP pass-through by the current * implementation of RDTSC. * * Although RDPID and RDTSCP are optional capabilities, since there * does not currently seem to be a use case for enabling/disabling * these via libvmmapi, choose not to support this and, instead, * just statically always enable or always disable this support * across all vCPUs on all VMs. (Note that there may be some * complications to providing this functionality, e.g., the MSR * bitmap is currently per-VM rather than per-vCPU while the * capability API wants to be able to control capabilities on a * per-vCPU basis). */ error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_RDTSCP, 0, &tmp); cap_rdpid = error == 0 && host_has_rdpid(); cap_rdtscp = error == 0 && host_has_rdtscp(); if (cap_rdpid || cap_rdtscp) { procbased_ctls2 |= PROCBASED2_ENABLE_RDTSCP; vmx_have_msr_tsc_aux = true; } cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp) == 0); cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, &tmp) == 0); /* * Check support for TPR shadow. */ error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, &tmp); if (error == 0) { tpr_shadowing = 1; #ifndef BURN_BRIDGES TUNABLE_INT_FETCH("hw.vmm.vmx.use_tpr_shadowing", &tpr_shadowing); #endif TUNABLE_INT_FETCH("hw.vmm.vmx.cap.tpr_shadowing", &tpr_shadowing); } if (tpr_shadowing) { procbased_ctls |= PROCBASED_USE_TPR_SHADOW; procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; } /* * Check support for virtual interrupt delivery. */ procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | PROCBASED2_VIRTUALIZE_X2APIC_MODE | PROCBASED2_APIC_REGISTER_VIRTUALIZATION | PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, procbased2_vid_bits, 0, &tmp); if (error == 0 && tpr_shadowing) { virtual_interrupt_delivery = 1; #ifndef BURN_BRIDGES TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", &virtual_interrupt_delivery); #endif TUNABLE_INT_FETCH("hw.vmm.vmx.cap.virtual_interrupt_delivery", &virtual_interrupt_delivery); } if (virtual_interrupt_delivery) { procbased_ctls |= PROCBASED_USE_TPR_SHADOW; procbased_ctls2 |= procbased2_vid_bits; procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; /* * Check for Posted Interrupts only if Virtual Interrupt * Delivery is enabled. */ error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, &tmp); if (error == 0) { pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : &IDTVEC(justreturn)); if (pirvec < 0) { if (bootverbose) { printf("vmx_modinit: unable to " "allocate posted interrupt " "vector\n"); } } else { posted_interrupts = 1; #ifndef BURN_BRIDGES TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", &posted_interrupts); #endif TUNABLE_INT_FETCH("hw.vmm.vmx.cap.posted_interrupts", &posted_interrupts); } } } if (posted_interrupts) pinbased_ctls |= PINBASED_POSTED_INTERRUPT; /* Initialize EPT */ error = ept_init(ipinum); if (error) { printf("vmx_modinit: ept initialization failed (%d)\n", error); return (error); } guest_l1d_flush = (cpu_ia32_arch_caps & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; #ifndef BURN_BRIDGES TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); #endif TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush", &guest_l1d_flush); /* * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when * available. Otherwise fall back to the software flush * method which loads enough data from the kernel text to * flush existing L1D content, both on VMX entry and on NMI * return. */ if (guest_l1d_flush) { if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { guest_l1d_flush_sw = 1; #ifndef BURN_BRIDGES TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", &guest_l1d_flush_sw); #endif TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush_sw", &guest_l1d_flush_sw); } if (guest_l1d_flush_sw) { if (nmi_flush_l1d_sw <= 1) nmi_flush_l1d_sw = 1; } else { msr_load_list[0].index = MSR_IA32_FLUSH_CMD; msr_load_list[0].val = IA32_FLUSH_CMD_L1D; } } /* * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 */ fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); cr0_ones_mask = fixed0 & fixed1; cr0_zeros_mask = ~fixed0 & ~fixed1; /* * CR0_PE and CR0_PG can be set to zero in VMX non-root operation * if unrestricted guest execution is allowed. */ if (cap_unrestricted_guest) cr0_ones_mask &= ~(CR0_PG | CR0_PE); /* * Do not allow the guest to set CR0_NW or CR0_CD. */ cr0_zeros_mask |= (CR0_NW | CR0_CD); fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); cr4_ones_mask = fixed0 & fixed1; cr4_zeros_mask = ~fixed0 & ~fixed1; vpid_init(); vmx_msr_init(); /* enable VMX operation */ vmxon_region = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO); smp_rendezvous(NULL, vmx_enable, NULL, NULL); vmx_initialized = 1; return (0); } static void vmx_trigger_hostintr(int vector) { uintptr_t func; struct gate_descriptor *gd; gd = &idt[vector]; KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " "invalid vector %d", vector)); KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", vector)); KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " "has invalid type %d", vector, gd->gd_type)); KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " "has invalid dpl %d", vector, gd->gd_dpl)); KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " "for vector %d has invalid selector %d", vector, gd->gd_selector)); KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " "IST %d", vector, gd->gd_ist)); func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); vmx_call_isr(func); } static int vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) { int error, mask_ident, shadow_ident; uint64_t mask_value; if (which != 0 && which != 4) panic("vmx_setup_cr_shadow: unknown cr%d", which); if (which == 0) { mask_ident = VMCS_CR0_MASK; mask_value = cr0_ones_mask | cr0_zeros_mask; shadow_ident = VMCS_CR0_SHADOW; } else { mask_ident = VMCS_CR4_MASK; mask_value = cr4_ones_mask | cr4_zeros_mask; shadow_ident = VMCS_CR4_SHADOW; } error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); if (error) return (error); error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); if (error) return (error); return (0); } #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) static void * vmx_init(struct vm *vm, pmap_t pmap) { int error __diagused; struct vmx *vmx; vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); vmx->vm = vm; vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pmltop)); /* * Clean up EPTP-tagged guest physical and combined mappings * * VMX transitions are not required to invalidate any guest physical * mappings. So, it may be possible for stale guest physical mappings * to be present in the processor TLBs. * * Combined mappings for this EP4TA are also invalidated for all VPIDs. */ ept_invalidate_mappings(vmx->eptp); vmx->msr_bitmap = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, M_WAITOK | M_ZERO); msr_bitmap_initialize(vmx->msr_bitmap); /* * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. * The guest FSBASE and GSBASE are saved and restored during * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are * always restored from the vmcs host state area on vm-exit. * * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in * how they are saved/restored so can be directly accessed by the * guest. * * MSR_EFER is saved and restored in the guest VMCS area on a * VM exit and entry respectively. It is also restored from the * host VMCS area on a VM exit. * * The TSC MSR is exposed read-only. Writes are disallowed as * that will impact the host TSC. If the guest does a write * the "use TSC offsetting" execution control is enabled and the * difference between the host TSC and the guest TSC is written * into the TSC offset in the VMCS. * * Guest TSC_AUX support is enabled if any of guest RDPID and/or * guest RDTSCP support are enabled (since, as per Table 2-2 in SDM * volume 4, TSC_AUX is supported if any of RDPID and/or RDTSCP are * supported). If guest TSC_AUX support is enabled, TSC_AUX is * exposed read-only so that the VMM can do one fewer MSR read per * exit than if this register were exposed read-write; the guest * restore value can be updated during guest writes (expected to be * rare) instead of during all exits (common). */ if (guest_msr_rw(vmx, MSR_GSBASE) || guest_msr_rw(vmx, MSR_FSBASE) || guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || guest_msr_rw(vmx, MSR_EFER) || guest_msr_ro(vmx, MSR_TSC) || ((cap_rdpid || cap_rdtscp) && guest_msr_ro(vmx, MSR_TSC_AUX))) panic("vmx_init: error setting guest msr access"); if (virtual_interrupt_delivery) { error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, APIC_ACCESS_ADDRESS); /* XXX this should really return an error to the caller */ KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); } vmx->pmap = pmap; return (vmx); } static void * vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) { struct vmx *vmx = vmi; struct vmcs *vmcs; struct vmx_vcpu *vcpu; uint32_t exc_bitmap; uint16_t vpid; int error; vpid = vpid_alloc(vcpuid); vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO); vcpu->vmx = vmx; vcpu->vcpu = vcpu1; vcpu->vcpuid = vcpuid; vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX, M_WAITOK | M_ZERO); vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, M_WAITOK | M_ZERO); vcpu->pir_desc = malloc_aligned(sizeof(*vcpu->pir_desc), 64, M_VMX, M_WAITOK | M_ZERO); vmcs = vcpu->vmcs; vmcs->identifier = vmx_revision(); error = vmclear(vmcs); if (error != 0) { panic("vmx_init: vmclear error %d on vcpu %d\n", error, vcpuid); } vmx_msr_guest_init(vmx, vcpu); error = vmcs_init(vmcs); KASSERT(error == 0, ("vmcs_init error %d", error)); VMPTRLD(vmcs); error = 0; error += vmwrite(VMCS_HOST_RSP, (u_long)&vcpu->ctx); error += vmwrite(VMCS_EPTP, vmx->eptp); error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); if (vcpu_trap_wbinvd(vcpu->vcpu)) { KASSERT(cap_wbinvd_exit, ("WBINVD trap not available")); procbased_ctls2 |= PROCBASED2_WBINVD_EXITING; } error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); error += vmwrite(VMCS_VPID, vpid); if (guest_l1d_flush && !guest_l1d_flush_sw) { vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( (vm_offset_t)&msr_load_list[0])); vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, nitems(msr_load_list)); vmcs_write(VMCS_EXIT_MSR_STORE, 0); vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); } /* exception bitmap */ if (vcpu_trace_exceptions(vcpu->vcpu)) exc_bitmap = 0xffffffff; else exc_bitmap = 1 << IDT_MC; error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap); vcpu->ctx.guest_dr6 = DBREG_DR6_RESERVED1; error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); if (tpr_shadowing) { error += vmwrite(VMCS_VIRTUAL_APIC, vtophys(vcpu->apic_page)); } if (virtual_interrupt_delivery) { error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); error += vmwrite(VMCS_EOI_EXIT0, 0); error += vmwrite(VMCS_EOI_EXIT1, 0); error += vmwrite(VMCS_EOI_EXIT2, 0); error += vmwrite(VMCS_EOI_EXIT3, 0); } if (posted_interrupts) { error += vmwrite(VMCS_PIR_VECTOR, pirvec); error += vmwrite(VMCS_PIR_DESC, vtophys(vcpu->pir_desc)); } VMCLEAR(vmcs); KASSERT(error == 0, ("vmx_init: error customizing the vmcs")); vcpu->cap.set = 0; vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0; vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0; vcpu->cap.proc_ctls = procbased_ctls; vcpu->cap.proc_ctls2 = procbased_ctls2; vcpu->cap.exc_bitmap = exc_bitmap; vcpu->state.nextrip = ~0; vcpu->state.lastcpu = NOCPU; vcpu->state.vpid = vpid; /* * Set up the CR0/4 shadows, and init the read shadow * to the power-on register value from the Intel Sys Arch. * CR0 - 0x60000010 * CR4 - 0 */ error = vmx_setup_cr0_shadow(vmcs, 0x60000010); if (error != 0) panic("vmx_setup_cr0_shadow %d", error); error = vmx_setup_cr4_shadow(vmcs, 0); if (error != 0) panic("vmx_setup_cr4_shadow %d", error); vcpu->ctx.pmap = vmx->pmap; return (vcpu); } static int vmx_handle_cpuid(struct vmx_vcpu *vcpu, struct vmxctx *vmxctx) { int handled; handled = x86_emulate_cpuid(vcpu->vcpu, (uint64_t *)&vmxctx->guest_rax, (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, (uint64_t *)&vmxctx->guest_rdx); return (handled); } static __inline void vmx_run_trace(struct vmx_vcpu *vcpu) { VMX_CTR1(vcpu, "Resume execution at %#lx", vmcs_guest_rip()); } static __inline void vmx_exit_trace(struct vmx_vcpu *vcpu, uint64_t rip, uint32_t exit_reason, int handled) { VMX_CTR3(vcpu, "%s %s vmexit at 0x%0lx", handled ? "handled" : "unhandled", exit_reason_to_str(exit_reason), rip); } static __inline void vmx_astpending_trace(struct vmx_vcpu *vcpu, uint64_t rip) { VMX_CTR1(vcpu, "astpending vmexit at 0x%0lx", rip); } static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); /* * Invalidate guest mappings identified by its vpid from the TLB. */ static __inline void vmx_invvpid(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap, int running) { struct vmxstate *vmxstate; struct invvpid_desc invvpid_desc; vmxstate = &vcpu->state; if (vmxstate->vpid == 0) return; if (!running) { /* * Set the 'lastcpu' to an invalid host cpu. * * This will invalidate TLB entries tagged with the vcpu's * vpid the next time it runs via vmx_set_pcpu_defaults(). */ vmxstate->lastcpu = NOCPU; return; } KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " "critical section", __func__, vcpu->vcpuid)); /* * Invalidate all mappings tagged with 'vpid' * * We do this because this vcpu was executing on a different host * cpu when it last ran. We do not track whether it invalidated * mappings associated with its 'vpid' during that run. So we must * assume that the mappings associated with 'vpid' on 'curcpu' are * stale and invalidate them. * * Note that we incur this penalty only when the scheduler chooses to * move the thread associated with this vcpu between host cpus. * * Note also that this will invalidate mappings tagged with 'vpid' * for "all" EP4TAs. */ if (atomic_load_long(&pmap->pm_eptgen) == vmx->eptgen[curcpu]) { invvpid_desc._res1 = 0; invvpid_desc._res2 = 0; invvpid_desc.vpid = vmxstate->vpid; invvpid_desc.linear_addr = 0; invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_DONE, 1); } else { /* * The invvpid can be skipped if an invept is going to * be performed before entering the guest. The invept * will invalidate combined mappings tagged with * 'vmx->eptp' for all vpids. */ vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_SAVED, 1); } } static void vmx_set_pcpu_defaults(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap) { struct vmxstate *vmxstate; vmxstate = &vcpu->state; if (vmxstate->lastcpu == curcpu) return; vmxstate->lastcpu = curcpu; vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); vmx_invvpid(vmx, vcpu, pmap, 1); } /* * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. */ CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); static void __inline vmx_set_int_window_exiting(struct vmx_vcpu *vcpu) { if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING; vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); VMX_CTR0(vcpu, "Enabling interrupt window exiting"); } } static void __inline vmx_clear_int_window_exiting(struct vmx_vcpu *vcpu) { KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, ("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls)); vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); VMX_CTR0(vcpu, "Disabling interrupt window exiting"); } static void __inline vmx_set_nmi_window_exiting(struct vmx_vcpu *vcpu) { if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); VMX_CTR0(vcpu, "Enabling NMI window exiting"); } } static void __inline vmx_clear_nmi_window_exiting(struct vmx_vcpu *vcpu) { KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, ("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls)); vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); VMX_CTR0(vcpu, "Disabling NMI window exiting"); } int vmx_set_tsc_offset(struct vmx_vcpu *vcpu, uint64_t offset) { int error; if ((vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) { vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET; vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); VMX_CTR0(vcpu, "Enabling TSC offsetting"); } error = vmwrite(VMCS_TSC_OFFSET, offset); #ifdef BHYVE_SNAPSHOT if (error == 0) vm_set_tsc_offset(vcpu->vcpu, offset); #endif return (error); } #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) static void vmx_inject_nmi(struct vmx_vcpu *vcpu) { uint32_t gi __diagused, info; gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " "interruptibility-state %#x", gi)); info = vmcs_read(VMCS_ENTRY_INTR_INFO); KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " "VM-entry interruption information %#x", info)); /* * Inject the virtual NMI. The vector must be the NMI IDT entry * or the VMCS entry check will fail. */ info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; vmcs_write(VMCS_ENTRY_INTR_INFO, info); VMX_CTR0(vcpu, "Injecting vNMI"); /* Clear the request */ vm_nmi_clear(vcpu->vcpu); } static void vmx_inject_interrupts(struct vmx_vcpu *vcpu, struct vlapic *vlapic, uint64_t guestrip) { int vector, need_nmi_exiting, extint_pending; uint64_t rflags, entryinfo; uint32_t gi, info; if (vcpu->cap.set & (1 << VM_CAP_MASK_HWINTR)) { return; } if (vcpu->state.nextrip != guestrip) { gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); if (gi & HWINTR_BLOCKING) { VMX_CTR2(vcpu, "Guest interrupt blocking " "cleared due to rip change: %#lx/%#lx", vcpu->state.nextrip, guestrip); gi &= ~HWINTR_BLOCKING; vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); } } if (vm_entry_intinfo(vcpu->vcpu, &entryinfo)) { KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry " "intinfo is not valid: %#lx", __func__, entryinfo)); info = vmcs_read(VMCS_ENTRY_INTR_INFO); KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " "pending exception: %#lx/%#x", __func__, entryinfo, info)); info = entryinfo; vector = info & 0xff; if (vector == IDT_BP || vector == IDT_OF) { /* * VT-x requires #BP and #OF to be injected as software * exceptions. */ info &= ~VMCS_INTR_T_MASK; info |= VMCS_INTR_T_SWEXCEPTION; } if (info & VMCS_INTR_DEL_ERRCODE) vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); vmcs_write(VMCS_ENTRY_INTR_INFO, info); } if (vm_nmi_pending(vcpu->vcpu)) { /* * If there are no conditions blocking NMI injection then * inject it directly here otherwise enable "NMI window * exiting" to inject it as soon as we can. * * We also check for STI_BLOCKING because some implementations * don't allow NMI injection in this case. If we are running * on a processor that doesn't have this restriction it will * immediately exit and the NMI will be injected in the * "NMI window exiting" handler. */ need_nmi_exiting = 1; gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { info = vmcs_read(VMCS_ENTRY_INTR_INFO); if ((info & VMCS_INTR_VALID) == 0) { vmx_inject_nmi(vcpu); need_nmi_exiting = 0; } else { VMX_CTR1(vcpu, "Cannot inject NMI " "due to VM-entry intr info %#x", info); } } else { VMX_CTR1(vcpu, "Cannot inject NMI due to " "Guest Interruptibility-state %#x", gi); } if (need_nmi_exiting) vmx_set_nmi_window_exiting(vcpu); } extint_pending = vm_extint_pending(vcpu->vcpu); if (!extint_pending && virtual_interrupt_delivery) { vmx_inject_pir(vlapic); return; } /* * If interrupt-window exiting is already in effect then don't bother * checking for pending interrupts. This is just an optimization and * not needed for correctness. */ if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { VMX_CTR0(vcpu, "Skip interrupt injection due to " "pending int_window_exiting"); return; } if (!extint_pending) { /* Ask the local apic for a vector to inject */ if (!vlapic_pending_intr(vlapic, &vector)) return; /* * From the Intel SDM, Volume 3, Section "Maskable * Hardware Interrupts": * - maskable interrupt vectors [16,255] can be delivered * through the local APIC. */ KASSERT(vector >= 16 && vector <= 255, ("invalid vector %d from local APIC", vector)); } else { /* Ask the legacy pic for a vector to inject */ vatpic_pending_intr(vcpu->vmx->vm, &vector); /* * From the Intel SDM, Volume 3, Section "Maskable * Hardware Interrupts": * - maskable interrupt vectors [0,255] can be delivered * through the INTR pin. */ KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", vector)); } /* Check RFLAGS.IF and the interruptibility state of the guest */ rflags = vmcs_read(VMCS_GUEST_RFLAGS); if ((rflags & PSL_I) == 0) { VMX_CTR2(vcpu, "Cannot inject vector %d due to " "rflags %#lx", vector, rflags); goto cantinject; } gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); if (gi & HWINTR_BLOCKING) { VMX_CTR2(vcpu, "Cannot inject vector %d due to " "Guest Interruptibility-state %#x", vector, gi); goto cantinject; } info = vmcs_read(VMCS_ENTRY_INTR_INFO); if (info & VMCS_INTR_VALID) { /* * This is expected and could happen for multiple reasons: * - A vectoring VM-entry was aborted due to astpending * - A VM-exit happened during event injection. * - An exception was injected above. * - An NMI was injected above or after "NMI window exiting" */ VMX_CTR2(vcpu, "Cannot inject vector %d due to " "VM-entry intr info %#x", vector, info); goto cantinject; } /* Inject the interrupt */ info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; info |= vector; vmcs_write(VMCS_ENTRY_INTR_INFO, info); if (!extint_pending) { /* Update the Local APIC ISR */ vlapic_intr_accepted(vlapic, vector); } else { vm_extint_clear(vcpu->vcpu); vatpic_intr_accepted(vcpu->vmx->vm, vector); /* * After we accepted the current ExtINT the PIC may * have posted another one. If that is the case, set * the Interrupt Window Exiting execution control so * we can inject that one too. * * Also, interrupt window exiting allows us to inject any * pending APIC vector that was preempted by the ExtINT * as soon as possible. This applies both for the software * emulated vlapic and the hardware assisted virtual APIC. */ vmx_set_int_window_exiting(vcpu); } VMX_CTR1(vcpu, "Injecting hwintr at vector %d", vector); return; cantinject: /* * Set the Interrupt Window Exiting execution control so we can inject * the interrupt as soon as blocking condition goes away. */ vmx_set_int_window_exiting(vcpu); } /* * If the Virtual NMIs execution control is '1' then the logical processor * tracks virtual-NMI blocking in the Guest Interruptibility-state field of * the VMCS. An IRET instruction in VMX non-root operation will remove any * virtual-NMI blocking. * * This unblocking occurs even if the IRET causes a fault. In this case the * hypervisor needs to restore virtual-NMI blocking before resuming the guest. */ static void vmx_restore_nmi_blocking(struct vmx_vcpu *vcpu) { uint32_t gi; VMX_CTR0(vcpu, "Restore Virtual-NMI blocking"); gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); } static void vmx_clear_nmi_blocking(struct vmx_vcpu *vcpu) { uint32_t gi; VMX_CTR0(vcpu, "Clear Virtual-NMI blocking"); gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); } static void vmx_assert_nmi_blocking(struct vmx_vcpu *vcpu) { uint32_t gi __diagused; gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, ("NMI blocking is not in effect %#x", gi)); } static int vmx_emulate_xsetbv(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit) { struct vmxctx *vmxctx; uint64_t xcrval; const struct xsave_limits *limits; vmxctx = &vcpu->ctx; limits = vmm_get_xsave_limits(); /* * Note that the processor raises a GP# fault on its own if * xsetbv is executed for CPL != 0, so we do not have to * emulate that fault here. */ /* Only xcr0 is supported. */ if (vmxctx->guest_rcx != 0) { vm_inject_gp(vcpu->vcpu); return (HANDLED); } /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { vm_inject_ud(vcpu->vcpu); return (HANDLED); } xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); if ((xcrval & ~limits->xcr0_allowed) != 0) { vm_inject_gp(vcpu->vcpu); return (HANDLED); } if (!(xcrval & XFEATURE_ENABLED_X87)) { vm_inject_gp(vcpu->vcpu); return (HANDLED); } /* AVX (YMM_Hi128) requires SSE. */ if (xcrval & XFEATURE_ENABLED_AVX && (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { vm_inject_gp(vcpu->vcpu); return (HANDLED); } /* * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, * ZMM_Hi256, and Hi16_ZMM. */ if (xcrval & XFEATURE_AVX512 && (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != (XFEATURE_AVX512 | XFEATURE_AVX)) { vm_inject_gp(vcpu->vcpu); return (HANDLED); } /* * Intel MPX requires both bound register state flags to be * set. */ if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { vm_inject_gp(vcpu->vcpu); return (HANDLED); } /* * This runs "inside" vmrun() with the guest's FPU state, so * modifying xcr0 directly modifies the guest's xcr0, not the * host's. */ load_xcr(0, xcrval); return (HANDLED); } static uint64_t vmx_get_guest_reg(struct vmx_vcpu *vcpu, int ident) { const struct vmxctx *vmxctx; vmxctx = &vcpu->ctx; switch (ident) { case 0: return (vmxctx->guest_rax); case 1: return (vmxctx->guest_rcx); case 2: return (vmxctx->guest_rdx); case 3: return (vmxctx->guest_rbx); case 4: return (vmcs_read(VMCS_GUEST_RSP)); case 5: return (vmxctx->guest_rbp); case 6: return (vmxctx->guest_rsi); case 7: return (vmxctx->guest_rdi); case 8: return (vmxctx->guest_r8); case 9: return (vmxctx->guest_r9); case 10: return (vmxctx->guest_r10); case 11: return (vmxctx->guest_r11); case 12: return (vmxctx->guest_r12); case 13: return (vmxctx->guest_r13); case 14: return (vmxctx->guest_r14); case 15: return (vmxctx->guest_r15); default: panic("invalid vmx register %d", ident); } } static void vmx_set_guest_reg(struct vmx_vcpu *vcpu, int ident, uint64_t regval) { struct vmxctx *vmxctx; vmxctx = &vcpu->ctx; switch (ident) { case 0: vmxctx->guest_rax = regval; break; case 1: vmxctx->guest_rcx = regval; break; case 2: vmxctx->guest_rdx = regval; break; case 3: vmxctx->guest_rbx = regval; break; case 4: vmcs_write(VMCS_GUEST_RSP, regval); break; case 5: vmxctx->guest_rbp = regval; break; case 6: vmxctx->guest_rsi = regval; break; case 7: vmxctx->guest_rdi = regval; break; case 8: vmxctx->guest_r8 = regval; break; case 9: vmxctx->guest_r9 = regval; break; case 10: vmxctx->guest_r10 = regval; break; case 11: vmxctx->guest_r11 = regval; break; case 12: vmxctx->guest_r12 = regval; break; case 13: vmxctx->guest_r13 = regval; break; case 14: vmxctx->guest_r14 = regval; break; case 15: vmxctx->guest_r15 = regval; break; default: panic("invalid vmx register %d", ident); } } static int vmx_emulate_cr0_access(struct vmx_vcpu *vcpu, uint64_t exitqual) { uint64_t crval, regval; /* We only handle mov to %cr0 at this time */ if ((exitqual & 0xf0) != 0x00) return (UNHANDLED); regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf); vmcs_write(VMCS_CR0_SHADOW, regval); crval = regval | cr0_ones_mask; crval &= ~cr0_zeros_mask; vmcs_write(VMCS_GUEST_CR0, crval); if (regval & CR0_PG) { uint64_t efer, entry_ctls; /* * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and * the "IA-32e mode guest" bit in VM-entry control must be * equal. */ efer = vmcs_read(VMCS_GUEST_IA32_EFER); if (efer & EFER_LME) { efer |= EFER_LMA; vmcs_write(VMCS_GUEST_IA32_EFER, efer); entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); entry_ctls |= VM_ENTRY_GUEST_LMA; vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); } } return (HANDLED); } static int vmx_emulate_cr4_access(struct vmx_vcpu *vcpu, uint64_t exitqual) { uint64_t crval, regval; /* We only handle mov to %cr4 at this time */ if ((exitqual & 0xf0) != 0x00) return (UNHANDLED); regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf); vmcs_write(VMCS_CR4_SHADOW, regval); crval = regval | cr4_ones_mask; crval &= ~cr4_zeros_mask; vmcs_write(VMCS_GUEST_CR4, crval); return (HANDLED); } static int vmx_emulate_cr8_access(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t exitqual) { struct vlapic *vlapic; uint64_t cr8; int regnum; /* We only handle mov %cr8 to/from a register at this time. */ if ((exitqual & 0xe0) != 0x00) { return (UNHANDLED); } vlapic = vm_lapic(vcpu->vcpu); regnum = (exitqual >> 8) & 0xf; if (exitqual & 0x10) { cr8 = vlapic_get_cr8(vlapic); vmx_set_guest_reg(vcpu, regnum, cr8); } else { cr8 = vmx_get_guest_reg(vcpu, regnum); vlapic_set_cr8(vlapic, cr8); } return (HANDLED); } /* * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL */ static int vmx_cpl(void) { uint32_t ssar; ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); return ((ssar >> 5) & 0x3); } static enum vm_cpu_mode vmx_cpu_mode(void) { uint32_t csar; if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); if (csar & 0x2000) return (CPU_MODE_64BIT); /* CS.L = 1 */ else return (CPU_MODE_COMPATIBILITY); } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { return (CPU_MODE_PROTECTED); } else { return (CPU_MODE_REAL); } } static enum vm_paging_mode vmx_paging_mode(void) { uint64_t cr4; if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) return (PAGING_MODE_FLAT); cr4 = vmcs_read(VMCS_GUEST_CR4); if (!(cr4 & CR4_PAE)) return (PAGING_MODE_32); if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) { if (!(cr4 & CR4_LA57)) return (PAGING_MODE_64); return (PAGING_MODE_64_LA57); } else return (PAGING_MODE_PAE); } static uint64_t inout_str_index(struct vmx_vcpu *vcpu, int in) { uint64_t val; int error __diagused; enum vm_reg_name reg; reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI; error = vmx_getreg(vcpu, reg, &val); KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error)); return (val); } static uint64_t inout_str_count(struct vmx_vcpu *vcpu, int rep) { uint64_t val; int error __diagused; if (rep) { error = vmx_getreg(vcpu, VM_REG_GUEST_RCX, &val); KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error)); } else { val = 1; } return (val); } static int inout_str_addrsize(uint32_t inst_info) { uint32_t size; size = (inst_info >> 7) & 0x7; switch (size) { case 0: return (2); /* 16 bit */ case 1: return (4); /* 32 bit */ case 2: return (8); /* 64 bit */ default: panic("%s: invalid size encoding %d", __func__, size); } } static void inout_str_seginfo(struct vmx_vcpu *vcpu, uint32_t inst_info, int in, struct vm_inout_str *vis) { int error __diagused, s; if (in) { vis->seg_name = VM_REG_GUEST_ES; } else { s = (inst_info >> 15) & 0x7; vis->seg_name = vm_segment_name(s); } error = vmx_getdesc(vcpu, vis->seg_name, &vis->seg_desc); KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error)); } static void vmx_paging_info(struct vm_guest_paging *paging) { paging->cr3 = vmcs_guest_cr3(); paging->cpl = vmx_cpl(); paging->cpu_mode = vmx_cpu_mode(); paging->paging_mode = vmx_paging_mode(); } static void vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla) { struct vm_guest_paging *paging; uint32_t csar; paging = &vmexit->u.inst_emul.paging; vmexit->exitcode = VM_EXITCODE_INST_EMUL; vmexit->inst_length = 0; vmexit->u.inst_emul.gpa = gpa; vmexit->u.inst_emul.gla = gla; vmx_paging_info(paging); switch (paging->cpu_mode) { case CPU_MODE_REAL: vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); vmexit->u.inst_emul.cs_d = 0; break; case CPU_MODE_PROTECTED: case CPU_MODE_COMPATIBILITY: vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar); break; default: vmexit->u.inst_emul.cs_base = 0; vmexit->u.inst_emul.cs_d = 0; break; } vie_init(&vmexit->u.inst_emul.vie, NULL, 0); } static int ept_fault_type(uint64_t ept_qual) { int fault_type; if (ept_qual & EPT_VIOLATION_DATA_WRITE) fault_type = VM_PROT_WRITE; else if (ept_qual & EPT_VIOLATION_INST_FETCH) fault_type = VM_PROT_EXECUTE; else fault_type= VM_PROT_READ; return (fault_type); } static bool ept_emulation_fault(uint64_t ept_qual) { int read, write; /* EPT fault on an instruction fetch doesn't make sense here */ if (ept_qual & EPT_VIOLATION_INST_FETCH) return (false); /* EPT fault must be a read fault or a write fault */ read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; if ((read | write) == 0) return (false); /* * The EPT violation must have been caused by accessing a * guest-physical address that is a translation of a guest-linear * address. */ if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { return (false); } return (true); } static __inline int apic_access_virtualization(struct vmx_vcpu *vcpu) { uint32_t proc_ctls2; proc_ctls2 = vcpu->cap.proc_ctls2; return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); } static __inline int x2apic_virtualization(struct vmx_vcpu *vcpu) { uint32_t proc_ctls2; proc_ctls2 = vcpu->cap.proc_ctls2; return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); } static int vmx_handle_apic_write(struct vmx_vcpu *vcpu, struct vlapic *vlapic, uint64_t qual) { int error, handled, offset; uint32_t *apic_regs, vector; bool retu; handled = HANDLED; offset = APIC_WRITE_OFFSET(qual); if (!apic_access_virtualization(vcpu)) { /* * In general there should not be any APIC write VM-exits * unless APIC-access virtualization is enabled. * * However self-IPI virtualization can legitimately trigger * an APIC-write VM-exit so treat it specially. */ if (x2apic_virtualization(vcpu) && offset == APIC_OFFSET_SELF_IPI) { apic_regs = (uint32_t *)(vlapic->apic_page); vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; vlapic_self_ipi_handler(vlapic, vector); return (HANDLED); } else return (UNHANDLED); } switch (offset) { case APIC_OFFSET_ID: vlapic_id_write_handler(vlapic); break; case APIC_OFFSET_LDR: vlapic_ldr_write_handler(vlapic); break; case APIC_OFFSET_DFR: vlapic_dfr_write_handler(vlapic); break; case APIC_OFFSET_SVR: vlapic_svr_write_handler(vlapic); break; case APIC_OFFSET_ESR: vlapic_esr_write_handler(vlapic); break; case APIC_OFFSET_ICR_LOW: retu = false; error = vlapic_icrlo_write_handler(vlapic, &retu); if (error != 0 || retu) handled = UNHANDLED; break; case APIC_OFFSET_CMCI_LVT: case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: vlapic_lvt_write_handler(vlapic, offset); break; case APIC_OFFSET_TIMER_ICR: vlapic_icrtmr_write_handler(vlapic); break; case APIC_OFFSET_TIMER_DCR: vlapic_dcr_write_handler(vlapic); break; default: handled = UNHANDLED; break; } return (handled); } static bool apic_access_fault(struct vmx_vcpu *vcpu, uint64_t gpa) { if (apic_access_virtualization(vcpu) && (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) return (true); else return (false); } static int vmx_handle_apic_access(struct vmx_vcpu *vcpu, struct vm_exit *vmexit) { uint64_t qual; int access_type, offset, allowed; if (!apic_access_virtualization(vcpu)) return (UNHANDLED); qual = vmexit->u.vmx.exit_qualification; access_type = APIC_ACCESS_TYPE(qual); offset = APIC_ACCESS_OFFSET(qual); allowed = 0; if (access_type == 0) { /* * Read data access to the following registers is expected. */ switch (offset) { case APIC_OFFSET_APR: case APIC_OFFSET_PPR: case APIC_OFFSET_RRR: case APIC_OFFSET_CMCI_LVT: case APIC_OFFSET_TIMER_CCR: allowed = 1; break; default: break; } } else if (access_type == 1) { /* * Write data access to the following registers is expected. */ switch (offset) { case APIC_OFFSET_VER: case APIC_OFFSET_APR: case APIC_OFFSET_PPR: case APIC_OFFSET_RRR: case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: case APIC_OFFSET_CMCI_LVT: case APIC_OFFSET_TIMER_CCR: allowed = 1; break; default: break; } } if (allowed) { vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset, VIE_INVALID_GLA); } /* * Regardless of whether the APIC-access is allowed this handler * always returns UNHANDLED: * - if the access is allowed then it is handled by emulating the * instruction that caused the VM-exit (outside the critical section) * - if the access is not allowed then it will be converted to an * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. */ return (UNHANDLED); } static enum task_switch_reason vmx_task_switch_reason(uint64_t qual) { int reason; reason = (qual >> 30) & 0x3; switch (reason) { case 0: return (TSR_CALL); case 1: return (TSR_IRET); case 2: return (TSR_JMP); case 3: return (TSR_IDT_GATE); default: panic("%s: invalid reason %d", __func__, reason); } } static int emulate_wrmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t val, bool *retu) { int error; if (lapic_msr(num)) error = lapic_wrmsr(vcpu->vcpu, num, val, retu); else error = vmx_wrmsr(vcpu, num, val, retu); return (error); } static int emulate_rdmsr(struct vmx_vcpu *vcpu, u_int num, bool *retu) { struct vmxctx *vmxctx; uint64_t result; uint32_t eax, edx; int error; if (lapic_msr(num)) error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); else error = vmx_rdmsr(vcpu, num, &result, retu); if (error == 0) { eax = result; vmxctx = &vcpu->ctx; error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax); KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error)); edx = result >> 32; error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx); KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error)); } return (error); } static int vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit) { int error, errcode, errcode_valid, handled, in; struct vmxctx *vmxctx; struct vlapic *vlapic; struct vm_inout_str *vis; struct vm_task_switch *ts; uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info; uint32_t intr_type, intr_vec, reason; uint64_t exitintinfo, qual, gpa; #ifdef KDTRACE_HOOKS int vcpuid; #endif bool retu; CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); handled = UNHANDLED; vmxctx = &vcpu->ctx; #ifdef KDTRACE_HOOKS vcpuid = vcpu->vcpuid; #endif qual = vmexit->u.vmx.exit_qualification; reason = vmexit->u.vmx.exit_reason; vmexit->exitcode = VM_EXITCODE_BOGUS; vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpuid, vmexit); /* * VM-entry failures during or after loading guest state. * * These VM-exits are uncommon but must be handled specially * as most VM-exit fields are not populated as usual. */ if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { VMX_CTR0(vcpu, "Handling MCE during VM-entry"); __asm __volatile("int $18"); return (1); } /* * VM exits that can be triggered during event delivery need to * be handled specially by re-injecting the event if the IDT * vectoring information field's valid bit is set. * * See "Information for VM Exits During Event Delivery" in Intel SDM * for details. */ idtvec_info = vmcs_idt_vectoring_info(); if (idtvec_info & VMCS_IDT_VEC_VALID) { idtvec_info &= ~(1 << 12); /* clear undefined bit */ exitintinfo = idtvec_info; if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { idtvec_err = vmcs_idt_vectoring_err(); exitintinfo |= (uint64_t)idtvec_err << 32; } error = vm_exit_intinfo(vcpu->vcpu, exitintinfo); KASSERT(error == 0, ("%s: vm_set_intinfo error %d", __func__, error)); /* * If 'virtual NMIs' are being used and the VM-exit * happened while injecting an NMI during the previous * VM-entry, then clear "blocking by NMI" in the * Guest Interruptibility-State so the NMI can be * reinjected on the subsequent VM-entry. * * However, if the NMI was being delivered through a task * gate, then the new task must start execution with NMIs * blocked so don't clear NMI blocking in this case. */ intr_type = idtvec_info & VMCS_INTR_T_MASK; if (intr_type == VMCS_INTR_T_NMI) { if (reason != EXIT_REASON_TASK_SWITCH) vmx_clear_nmi_blocking(vcpu); else vmx_assert_nmi_blocking(vcpu); } /* * Update VM-entry instruction length if the event being * delivered was a software interrupt or software exception. */ if (intr_type == VMCS_INTR_T_SWINTR || intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || intr_type == VMCS_INTR_T_SWEXCEPTION) { vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); } } switch (reason) { case EXIT_REASON_TASK_SWITCH: ts = &vmexit->u.task_switch; ts->tsssel = qual & 0xffff; ts->reason = vmx_task_switch_reason(qual); ts->ext = 0; ts->errcode_valid = 0; vmx_paging_info(&ts->paging); /* * If the task switch was due to a CALL, JMP, IRET, software * interrupt (INT n) or software exception (INT3, INTO), * then the saved %rip references the instruction that caused * the task switch. The instruction length field in the VMCS * is valid in this case. * * In all other cases (e.g., NMI, hardware exception) the * saved %rip is one that would have been saved in the old TSS * had the task switch completed normally so the instruction * length field is not needed in this case and is explicitly * set to 0. */ if (ts->reason == TSR_IDT_GATE) { KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, ("invalid idtvec_info %#x for IDT task switch", idtvec_info)); intr_type = idtvec_info & VMCS_INTR_T_MASK; if (intr_type != VMCS_INTR_T_SWINTR && intr_type != VMCS_INTR_T_SWEXCEPTION && intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { /* Task switch triggered by external event */ ts->ext = 1; vmexit->inst_length = 0; if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { ts->errcode_valid = 1; ts->errcode = vmcs_idt_vectoring_err(); } } } vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpuid, vmexit, ts); VMX_CTR4(vcpu, "task switch reason %d, tss 0x%04x, " "%s errcode 0x%016lx", ts->reason, ts->tsssel, ts->ext ? "external" : "internal", ((uint64_t)ts->errcode << 32) | ts->errcode_valid); break; case EXIT_REASON_CR_ACCESS: vmm_stat_incr(vcpu->vcpu, VMEXIT_CR_ACCESS, 1); SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpuid, vmexit, qual); switch (qual & 0xf) { case 0: handled = vmx_emulate_cr0_access(vcpu, qual); break; case 4: handled = vmx_emulate_cr4_access(vcpu, qual); break; case 8: handled = vmx_emulate_cr8_access(vmx, vcpu, qual); break; } break; case EXIT_REASON_RDMSR: vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); retu = false; ecx = vmxctx->guest_rcx; VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx); SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpuid, vmexit, ecx); error = emulate_rdmsr(vcpu, ecx, &retu); if (error) { vmexit->exitcode = VM_EXITCODE_RDMSR; vmexit->u.msr.code = ecx; } else if (!retu) { handled = HANDLED; } else { /* Return to userspace with a valid exitcode */ KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, ("emulate_rdmsr retu with bogus exitcode")); } break; case EXIT_REASON_WRMSR: vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); retu = false; eax = vmxctx->guest_rax; ecx = vmxctx->guest_rcx; edx = vmxctx->guest_rdx; VMX_CTR2(vcpu, "wrmsr 0x%08x value 0x%016lx", ecx, (uint64_t)edx << 32 | eax); SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpuid, ecx, (uint64_t)edx << 32 | eax); error = emulate_wrmsr(vcpu, ecx, (uint64_t)edx << 32 | eax, &retu); if (error) { vmexit->exitcode = VM_EXITCODE_WRMSR; vmexit->u.msr.code = ecx; vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; } else if (!retu) { handled = HANDLED; } else { /* Return to userspace with a valid exitcode */ KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, ("emulate_wrmsr retu with bogus exitcode")); } break; case EXIT_REASON_HLT: vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpuid, vmexit); vmexit->exitcode = VM_EXITCODE_HLT; vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); if (virtual_interrupt_delivery) vmexit->u.hlt.intr_status = vmcs_read(VMCS_GUEST_INTR_STATUS); else vmexit->u.hlt.intr_status = 0; break; case EXIT_REASON_MTF: vmm_stat_incr(vcpu->vcpu, VMEXIT_MTRAP, 1); SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpuid, vmexit); vmexit->exitcode = VM_EXITCODE_MTRAP; vmexit->inst_length = 0; break; case EXIT_REASON_PAUSE: vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpuid, vmexit); vmexit->exitcode = VM_EXITCODE_PAUSE; break; case EXIT_REASON_INTR_WINDOW: vmm_stat_incr(vcpu->vcpu, VMEXIT_INTR_WINDOW, 1); SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpuid, vmexit); vmx_clear_int_window_exiting(vcpu); return (1); case EXIT_REASON_EXT_INTR: /* * External interrupts serve only to cause VM exits and allow * the host interrupt handler to run. * * If this external interrupt triggers a virtual interrupt * to a VM, then that state will be recorded by the * host interrupt handler in the VM's softc. We will inject * this virtual interrupt during the subsequent VM enter. */ intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); SDT_PROBE4(vmm, vmx, exit, interrupt, vmx, vcpuid, vmexit, intr_info); /* * XXX: Ignore this exit if VMCS_INTR_VALID is not set. * This appears to be a bug in VMware Fusion? */ if (!(intr_info & VMCS_INTR_VALID)) return (1); KASSERT((intr_info & VMCS_INTR_VALID) != 0 && (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, ("VM exit interruption info invalid: %#x", intr_info)); vmx_trigger_hostintr(intr_info & 0xff); /* * This is special. We want to treat this as an 'handled' * VM-exit but not increment the instruction pointer. */ vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); return (1); case EXIT_REASON_NMI_WINDOW: SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpuid, vmexit); /* Exit to allow the pending virtual NMI to be injected */ if (vm_nmi_pending(vcpu->vcpu)) vmx_inject_nmi(vcpu); vmx_clear_nmi_window_exiting(vcpu); vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1); return (1); case EXIT_REASON_INOUT: vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); vmexit->exitcode = VM_EXITCODE_INOUT; vmexit->u.inout.bytes = (qual & 0x7) + 1; vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0; vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; vmexit->u.inout.port = (uint16_t)(qual >> 16); vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); if (vmexit->u.inout.string) { inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); vmexit->exitcode = VM_EXITCODE_INOUT_STR; vis = &vmexit->u.inout_str; vmx_paging_info(&vis->paging); vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); vis->cr0 = vmcs_read(VMCS_GUEST_CR0); vis->index = inout_str_index(vcpu, in); vis->count = inout_str_count(vcpu, vis->inout.rep); vis->addrsize = inout_str_addrsize(inst_info); inout_str_seginfo(vcpu, inst_info, in, vis); } SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpuid, vmexit); break; case EXIT_REASON_CPUID: vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpuid, vmexit); handled = vmx_handle_cpuid(vcpu, vmxctx); break; case EXIT_REASON_EXCEPTION: vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); KASSERT((intr_info & VMCS_INTR_VALID) != 0, ("VM exit interruption info invalid: %#x", intr_info)); intr_vec = intr_info & 0xff; intr_type = intr_info & VMCS_INTR_T_MASK; /* * If Virtual NMIs control is 1 and the VM-exit is due to a * fault encountered during the execution of IRET then we must * restore the state of "virtual-NMI blocking" before resuming * the guest. * * See "Resuming Guest Software after Handling an Exception". * See "Information for VM Exits Due to Vectored Events". */ if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && (intr_vec != IDT_DF) && (intr_info & EXIT_QUAL_NMIUDTI) != 0) vmx_restore_nmi_blocking(vcpu); /* * The NMI has already been handled in vmx_exit_handle_nmi(). */ if (intr_type == VMCS_INTR_T_NMI) return (1); /* * Call the machine check handler by hand. Also don't reflect * the machine check back into the guest. */ if (intr_vec == IDT_MC) { VMX_CTR0(vcpu, "Vectoring to MCE handler"); __asm __volatile("int $18"); return (1); } /* * If the hypervisor has requested user exits for * debug exceptions, bounce them out to userland. */ if (intr_type == VMCS_INTR_T_SWEXCEPTION && intr_vec == IDT_BP && (vcpu->cap.set & (1 << VM_CAP_BPT_EXIT))) { vmexit->exitcode = VM_EXITCODE_BPT; vmexit->u.bpt.inst_length = vmexit->inst_length; vmexit->inst_length = 0; break; } if (intr_vec == IDT_PF) { error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual); KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d", __func__, error)); } /* * Software exceptions exhibit trap-like behavior. This in * turn requires populating the VM-entry instruction length * so that the %rip in the trap frame is past the INT3/INTO * instruction. */ if (intr_type == VMCS_INTR_T_SWEXCEPTION) vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); /* Reflect all other exceptions back into the guest */ errcode_valid = errcode = 0; if (intr_info & VMCS_INTR_DEL_ERRCODE) { errcode_valid = 1; errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); } VMX_CTR2(vcpu, "Reflecting exception %d/%#x into " "the guest", intr_vec, errcode); SDT_PROBE5(vmm, vmx, exit, exception, vmx, vcpuid, vmexit, intr_vec, errcode); error = vm_inject_exception(vcpu->vcpu, intr_vec, errcode_valid, errcode, 0); KASSERT(error == 0, ("%s: vm_inject_exception error %d", __func__, error)); return (1); case EXIT_REASON_EPT_FAULT: /* * If 'gpa' lies within the address space allocated to * memory then this must be a nested page fault otherwise * this must be an instruction that accesses MMIO space. */ gpa = vmcs_gpa(); if (vm_mem_allocated(vcpu->vcpu, gpa) || - apic_access_fault(vcpu, gpa)) { + ppt_is_mmio(vmx->vm, gpa) || apic_access_fault(vcpu, gpa)) { vmexit->exitcode = VM_EXITCODE_PAGING; vmexit->inst_length = 0; vmexit->u.paging.gpa = gpa; vmexit->u.paging.fault_type = ept_fault_type(qual); vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); SDT_PROBE5(vmm, vmx, exit, nestedfault, vmx, vcpuid, vmexit, gpa, qual); } else if (ept_emulation_fault(qual)) { vmexit_inst_emul(vmexit, gpa, vmcs_gla()); vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); SDT_PROBE4(vmm, vmx, exit, mmiofault, vmx, vcpuid, vmexit, gpa); } /* * If Virtual NMIs control is 1 and the VM-exit is due to an * EPT fault during the execution of IRET then we must restore * the state of "virtual-NMI blocking" before resuming. * * See description of "NMI unblocking due to IRET" in * "Exit Qualification for EPT Violations". */ if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && (qual & EXIT_QUAL_NMIUDTI) != 0) vmx_restore_nmi_blocking(vcpu); break; case EXIT_REASON_VIRTUALIZED_EOI: vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; vmexit->u.ioapic_eoi.vector = qual & 0xFF; SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpuid, vmexit); vmexit->inst_length = 0; /* trap-like */ break; case EXIT_REASON_APIC_ACCESS: SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpuid, vmexit); handled = vmx_handle_apic_access(vcpu, vmexit); break; case EXIT_REASON_APIC_WRITE: /* * APIC-write VM exit is trap-like so the %rip is already * pointing to the next instruction. */ vmexit->inst_length = 0; vlapic = vm_lapic(vcpu->vcpu); SDT_PROBE4(vmm, vmx, exit, apicwrite, vmx, vcpuid, vmexit, vlapic); handled = vmx_handle_apic_write(vcpu, vlapic, qual); break; case EXIT_REASON_XSETBV: SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpuid, vmexit); handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); break; case EXIT_REASON_MONITOR: SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpuid, vmexit); vmexit->exitcode = VM_EXITCODE_MONITOR; break; case EXIT_REASON_MWAIT: SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpuid, vmexit); vmexit->exitcode = VM_EXITCODE_MWAIT; break; case EXIT_REASON_TPR: vlapic = vm_lapic(vcpu->vcpu); vlapic_sync_tpr(vlapic); vmexit->inst_length = 0; handled = HANDLED; break; case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: case EXIT_REASON_VMXOFF: case EXIT_REASON_VMXON: SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpuid, vmexit); vmexit->exitcode = VM_EXITCODE_VMINSN; break; case EXIT_REASON_INVD: case EXIT_REASON_WBINVD: /* ignore exit */ handled = HANDLED; break; default: SDT_PROBE4(vmm, vmx, exit, unknown, vmx, vcpuid, vmexit, reason); vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); break; } if (handled) { /* * It is possible that control is returned to userland * even though we were able to handle the VM exit in the * kernel. * * In such a case we want to make sure that the userland * restarts guest execution at the instruction *after* * the one we just processed. Therefore we update the * guest rip in the VMCS and in 'vmexit'. */ vmexit->rip += vmexit->inst_length; vmexit->inst_length = 0; vmcs_write(VMCS_GUEST_RIP, vmexit->rip); } else { if (vmexit->exitcode == VM_EXITCODE_BOGUS) { /* * If this VM exit was not claimed by anybody then * treat it as a generic VMX exit. */ vmexit->exitcode = VM_EXITCODE_VMX; vmexit->u.vmx.status = VM_SUCCESS; vmexit->u.vmx.inst_type = 0; vmexit->u.vmx.inst_error = 0; } else { /* * The exitcode and collateral have been populated. * The VM exit will be processed further in userland. */ } } SDT_PROBE4(vmm, vmx, exit, return, vmx, vcpuid, vmexit, handled); return (handled); } static __inline void vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) { KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, ("vmx_exit_inst_error: invalid inst_fail_status %d", vmxctx->inst_fail_status)); vmexit->inst_length = 0; vmexit->exitcode = VM_EXITCODE_VMX; vmexit->u.vmx.status = vmxctx->inst_fail_status; vmexit->u.vmx.inst_error = vmcs_instruction_error(); vmexit->u.vmx.exit_reason = ~0; vmexit->u.vmx.exit_qualification = ~0; switch (rc) { case VMX_VMRESUME_ERROR: case VMX_VMLAUNCH_ERROR: vmexit->u.vmx.inst_type = rc; break; default: panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); } } /* * If the NMI-exiting VM execution control is set to '1' then an NMI in * non-root operation causes a VM-exit. NMI blocking is in effect so it is * sufficient to simply vector to the NMI handler via a software interrupt. * However, this must be done before maskable interrupts are enabled * otherwise the "iret" issued by an interrupt handler will incorrectly * clear NMI blocking. */ static __inline void vmx_exit_handle_nmi(struct vmx_vcpu *vcpu, struct vm_exit *vmexit) { uint32_t intr_info; KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) return; intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); KASSERT((intr_info & VMCS_INTR_VALID) != 0, ("VM exit interruption info invalid: %#x", intr_info)); if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " "to NMI has invalid vector: %#x", intr_info)); VMX_CTR0(vcpu, "Vectoring to NMI handler"); __asm __volatile("int $2"); } } static __inline void vmx_dr_enter_guest(struct vmxctx *vmxctx) { register_t rflags; /* Save host control debug registers. */ vmxctx->host_dr7 = rdr7(); vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); /* * Disable debugging in DR7 and DEBUGCTL to avoid triggering * exceptions in the host based on the guest DRx values. The * guest DR7 and DEBUGCTL are saved/restored in the VMCS. */ load_dr7(0); wrmsr(MSR_DEBUGCTLMSR, 0); /* * Disable single stepping the kernel to avoid corrupting the * guest DR6. A debugger might still be able to corrupt the * guest DR6 by setting a breakpoint after this point and then * single stepping. */ rflags = read_rflags(); vmxctx->host_tf = rflags & PSL_T; write_rflags(rflags & ~PSL_T); /* Save host debug registers. */ vmxctx->host_dr0 = rdr0(); vmxctx->host_dr1 = rdr1(); vmxctx->host_dr2 = rdr2(); vmxctx->host_dr3 = rdr3(); vmxctx->host_dr6 = rdr6(); /* Restore guest debug registers. */ load_dr0(vmxctx->guest_dr0); load_dr1(vmxctx->guest_dr1); load_dr2(vmxctx->guest_dr2); load_dr3(vmxctx->guest_dr3); load_dr6(vmxctx->guest_dr6); } static __inline void vmx_dr_leave_guest(struct vmxctx *vmxctx) { /* Save guest debug registers. */ vmxctx->guest_dr0 = rdr0(); vmxctx->guest_dr1 = rdr1(); vmxctx->guest_dr2 = rdr2(); vmxctx->guest_dr3 = rdr3(); vmxctx->guest_dr6 = rdr6(); /* * Restore host debug registers. Restore DR7, DEBUGCTL, and * PSL_T last. */ load_dr0(vmxctx->host_dr0); load_dr1(vmxctx->host_dr1); load_dr2(vmxctx->host_dr2); load_dr3(vmxctx->host_dr3); load_dr6(vmxctx->host_dr6); wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); load_dr7(vmxctx->host_dr7); write_rflags(read_rflags() | vmxctx->host_tf); } static __inline void vmx_pmap_activate(struct vmx *vmx, pmap_t pmap) { long eptgen; int cpu; cpu = curcpu; CPU_SET_ATOMIC(cpu, &pmap->pm_active); smr_enter(pmap->pm_eptsmr); eptgen = atomic_load_long(&pmap->pm_eptgen); if (eptgen != vmx->eptgen[cpu]) { vmx->eptgen[cpu] = eptgen; invept(INVEPT_TYPE_SINGLE_CONTEXT, (struct invept_desc){ .eptp = vmx->eptp, ._res = 0 }); } } static __inline void vmx_pmap_deactivate(struct vmx *vmx, pmap_t pmap) { smr_exit(pmap->pm_eptsmr); CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); } static int vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) { int rc, handled, launched; struct vmx *vmx; struct vmx_vcpu *vcpu; struct vmxctx *vmxctx; struct vmcs *vmcs; struct vm_exit *vmexit; struct vlapic *vlapic; uint32_t exit_reason; struct region_descriptor gdtr, idtr; uint16_t ldt_sel; vcpu = vcpui; vmx = vcpu->vmx; vmcs = vcpu->vmcs; vmxctx = &vcpu->ctx; vlapic = vm_lapic(vcpu->vcpu); vmexit = vm_exitinfo(vcpu->vcpu); launched = 0; KASSERT(vmxctx->pmap == pmap, ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); vmx_msr_guest_enter(vcpu); VMPTRLD(vmcs); /* * XXX * We do this every time because we may setup the virtual machine * from a different process than the one that actually runs it. * * If the life of a virtual machine was spent entirely in the context * of a single process we could do this once in vmx_init(). */ vmcs_write(VMCS_HOST_CR3, rcr3()); vmcs_write(VMCS_GUEST_RIP, rip); vmx_set_pcpu_defaults(vmx, vcpu, pmap); do { KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " "%#lx/%#lx", __func__, vmcs_guest_rip(), rip)); handled = UNHANDLED; /* * Interrupts are disabled from this point on until the * guest starts executing. This is done for the following * reasons: * * If an AST is asserted on this thread after the check below, * then the IPI_AST notification will not be lost, because it * will cause a VM exit due to external interrupt as soon as * the guest state is loaded. * * A posted interrupt after 'vmx_inject_interrupts()' will * not be "lost" because it will be held pending in the host * APIC because interrupts are disabled. The pending interrupt * will be recognized as soon as the guest state is loaded. * * The same reasoning applies to the IPI generated by * pmap_invalidate_ept(). */ disable_intr(); vmx_inject_interrupts(vcpu, vlapic, rip); /* * Check for vcpu suspension after injecting events because * vmx_inject_interrupts() can suspend the vcpu due to a * triple fault. */ if (vcpu_suspended(evinfo)) { enable_intr(); vm_exit_suspended(vcpu->vcpu, rip); break; } if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { enable_intr(); vm_exit_rendezvous(vcpu->vcpu, rip); break; } if (vcpu_reqidle(evinfo)) { enable_intr(); vm_exit_reqidle(vcpu->vcpu, rip); break; } if (vcpu_should_yield(vcpu->vcpu)) { enable_intr(); vm_exit_astpending(vcpu->vcpu, rip); vmx_astpending_trace(vcpu, rip); handled = HANDLED; break; } if (vcpu_debugged(vcpu->vcpu)) { enable_intr(); vm_exit_debug(vcpu->vcpu, rip); break; } /* * If TPR Shadowing is enabled, the TPR Threshold * must be updated right before entering the guest. */ if (tpr_shadowing && !virtual_interrupt_delivery) { if ((vcpu->cap.proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) { vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); } } /* * VM exits restore the base address but not the * limits of GDTR and IDTR. The VMCS only stores the * base address, so VM exits set the limits to 0xffff. * Save and restore the full GDTR and IDTR to restore * the limits. * * The VMCS does not save the LDTR at all, and VM * exits clear LDTR as if a NULL selector were loaded. * The userspace hypervisor probably doesn't use a * LDT, but save and restore it to be safe. */ sgdt(&gdtr); sidt(&idtr); ldt_sel = sldt(); /* * The TSC_AUX MSR must be saved/restored while interrupts * are disabled so that it is not possible for the guest * TSC_AUX MSR value to be overwritten by the resume * portion of the IPI_SUSPEND codepath. This is why the * transition of this MSR is handled separately from those * handled by vmx_msr_guest_{enter,exit}(), which are ok to * be transitioned with preemption disabled but interrupts * enabled. * * These vmx_msr_guest_{enter,exit}_tsc_aux() calls can be * anywhere in this loop so long as they happen with * interrupts disabled. This location is chosen for * simplicity. */ vmx_msr_guest_enter_tsc_aux(vmx, vcpu); vmx_dr_enter_guest(vmxctx); /* * Mark the EPT as active on this host CPU and invalidate * EPTP-tagged TLB entries if required. */ vmx_pmap_activate(vmx, pmap); vmx_run_trace(vcpu); rc = vmx_enter_guest(vmxctx, vmx, launched); vmx_pmap_deactivate(vmx, pmap); vmx_dr_leave_guest(vmxctx); vmx_msr_guest_exit_tsc_aux(vmx, vcpu); bare_lgdt(&gdtr); lidt(&idtr); lldt(ldt_sel); /* Collect some information for VM exit processing */ vmexit->rip = rip = vmcs_guest_rip(); vmexit->inst_length = vmexit_instruction_length(); vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); /* Update 'nextrip' */ vcpu->state.nextrip = rip; if (rc == VMX_GUEST_VMEXIT) { vmx_exit_handle_nmi(vcpu, vmexit); enable_intr(); handled = vmx_exit_process(vmx, vcpu, vmexit); } else { enable_intr(); vmx_exit_inst_error(vmxctx, rc, vmexit); } launched = 1; vmx_exit_trace(vcpu, rip, exit_reason, handled); rip = vmexit->rip; } while (handled); /* * If a VM exit has been handled then the exitcode must be BOGUS * If a VM exit is not handled then the exitcode must not be BOGUS */ if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { panic("Mismatch between handled (%d) and exitcode (%d)", handled, vmexit->exitcode); } VMX_CTR1(vcpu, "returning from vmx_run: exitcode %d", vmexit->exitcode); VMCLEAR(vmcs); vmx_msr_guest_exit(vcpu); return (0); } static void vmx_vcpu_cleanup(void *vcpui) { struct vmx_vcpu *vcpu = vcpui; vpid_free(vcpu->state.vpid); free(vcpu->pir_desc, M_VMX); free(vcpu->apic_page, M_VMX); free(vcpu->vmcs, M_VMX); free(vcpu, M_VMX); } static void vmx_cleanup(void *vmi) { struct vmx *vmx = vmi; if (virtual_interrupt_delivery) vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); free(vmx->msr_bitmap, M_VMX); free(vmx, M_VMX); return; } static register_t * vmxctx_regptr(struct vmxctx *vmxctx, int reg) { switch (reg) { case VM_REG_GUEST_RAX: return (&vmxctx->guest_rax); case VM_REG_GUEST_RBX: return (&vmxctx->guest_rbx); case VM_REG_GUEST_RCX: return (&vmxctx->guest_rcx); case VM_REG_GUEST_RDX: return (&vmxctx->guest_rdx); case VM_REG_GUEST_RSI: return (&vmxctx->guest_rsi); case VM_REG_GUEST_RDI: return (&vmxctx->guest_rdi); case VM_REG_GUEST_RBP: return (&vmxctx->guest_rbp); case VM_REG_GUEST_R8: return (&vmxctx->guest_r8); case VM_REG_GUEST_R9: return (&vmxctx->guest_r9); case VM_REG_GUEST_R10: return (&vmxctx->guest_r10); case VM_REG_GUEST_R11: return (&vmxctx->guest_r11); case VM_REG_GUEST_R12: return (&vmxctx->guest_r12); case VM_REG_GUEST_R13: return (&vmxctx->guest_r13); case VM_REG_GUEST_R14: return (&vmxctx->guest_r14); case VM_REG_GUEST_R15: return (&vmxctx->guest_r15); case VM_REG_GUEST_CR2: return (&vmxctx->guest_cr2); case VM_REG_GUEST_DR0: return (&vmxctx->guest_dr0); case VM_REG_GUEST_DR1: return (&vmxctx->guest_dr1); case VM_REG_GUEST_DR2: return (&vmxctx->guest_dr2); case VM_REG_GUEST_DR3: return (&vmxctx->guest_dr3); case VM_REG_GUEST_DR6: return (&vmxctx->guest_dr6); default: break; } return (NULL); } static int vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) { register_t *regp; if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { *retval = *regp; return (0); } else return (EINVAL); } static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) { register_t *regp; if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { *regp = val; return (0); } else return (EINVAL); } static int vmx_get_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t *retval) { uint64_t gi; int error; error = vmcs_getreg(vcpu->vmcs, running, VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; return (error); } static int vmx_modify_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t val) { struct vmcs *vmcs; uint64_t gi; int error, ident; /* * Forcing the vcpu into an interrupt shadow is not supported. */ if (val) { error = EINVAL; goto done; } vmcs = vcpu->vmcs; ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY); error = vmcs_getreg(vmcs, running, ident, &gi); if (error == 0) { gi &= ~HWINTR_BLOCKING; error = vmcs_setreg(vmcs, running, ident, gi); } done: VMX_CTR2(vcpu, "Setting intr_shadow to %#lx %s", val, error ? "failed" : "succeeded"); return (error); } static int vmx_shadow_reg(int reg) { int shreg; shreg = -1; switch (reg) { case VM_REG_GUEST_CR0: shreg = VMCS_CR0_SHADOW; break; case VM_REG_GUEST_CR4: shreg = VMCS_CR4_SHADOW; break; default: break; } return (shreg); } static int vmx_getreg(void *vcpui, int reg, uint64_t *retval) { int running, hostcpu; struct vmx_vcpu *vcpu = vcpui; struct vmx *vmx = vcpu->vmx; running = vcpu_is_running(vcpu->vcpu, &hostcpu); if (running && hostcpu != curcpu) panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu->vcpuid); switch (reg) { case VM_REG_GUEST_INTR_SHADOW: return (vmx_get_intr_shadow(vcpu, running, retval)); case VM_REG_GUEST_KGS_BASE: *retval = vcpu->guest_msrs[IDX_MSR_KGSBASE]; return (0); case VM_REG_GUEST_TPR: *retval = vlapic_get_cr8(vm_lapic(vcpu->vcpu)); return (0); } if (vmxctx_getreg(&vcpu->ctx, reg, retval) == 0) return (0); return (vmcs_getreg(vcpu->vmcs, running, reg, retval)); } static int vmx_setreg(void *vcpui, int reg, uint64_t val) { int error, hostcpu, running, shadow; uint64_t ctls; pmap_t pmap; struct vmx_vcpu *vcpu = vcpui; struct vmx *vmx = vcpu->vmx; running = vcpu_is_running(vcpu->vcpu, &hostcpu); if (running && hostcpu != curcpu) panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu->vcpuid); if (reg == VM_REG_GUEST_INTR_SHADOW) return (vmx_modify_intr_shadow(vcpu, running, val)); if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0) return (0); /* Do not permit user write access to VMCS fields by offset. */ if (reg < 0) return (EINVAL); error = vmcs_setreg(vcpu->vmcs, running, reg, val); if (error == 0) { /* * If the "load EFER" VM-entry control is 1 then the * value of EFER.LMA must be identical to "IA-32e mode guest" * bit in the VM-entry control. */ if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && (reg == VM_REG_GUEST_EFER)) { vmcs_getreg(vcpu->vmcs, running, VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); if (val & EFER_LMA) ctls |= VM_ENTRY_GUEST_LMA; else ctls &= ~VM_ENTRY_GUEST_LMA; vmcs_setreg(vcpu->vmcs, running, VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); } shadow = vmx_shadow_reg(reg); if (shadow > 0) { /* * Store the unmodified value in the shadow */ error = vmcs_setreg(vcpu->vmcs, running, VMCS_IDENT(shadow), val); } if (reg == VM_REG_GUEST_CR3) { /* * Invalidate the guest vcpu's TLB mappings to emulate * the behavior of updating %cr3. * * XXX the processor retains global mappings when %cr3 * is updated but vmx_invvpid() does not. */ pmap = vcpu->ctx.pmap; vmx_invvpid(vmx, vcpu, pmap, running); } } return (error); } static int vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc) { int hostcpu, running; struct vmx_vcpu *vcpu = vcpui; struct vmx *vmx = vcpu->vmx; running = vcpu_is_running(vcpu->vcpu, &hostcpu); if (running && hostcpu != curcpu) panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu->vcpuid); return (vmcs_getdesc(vcpu->vmcs, running, reg, desc)); } static int vmx_setdesc(void *vcpui, int reg, struct seg_desc *desc) { int hostcpu, running; struct vmx_vcpu *vcpu = vcpui; struct vmx *vmx = vcpu->vmx; running = vcpu_is_running(vcpu->vcpu, &hostcpu); if (running && hostcpu != curcpu) panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu->vcpuid); return (vmcs_setdesc(vcpu->vmcs, running, reg, desc)); } static int vmx_getcap(void *vcpui, int type, int *retval) { struct vmx_vcpu *vcpu = vcpui; int vcap; int ret; ret = ENOENT; vcap = vcpu->cap.set; switch (type) { case VM_CAP_HALT_EXIT: if (cap_halt_exit) ret = 0; break; case VM_CAP_PAUSE_EXIT: if (cap_pause_exit) ret = 0; break; case VM_CAP_MTRAP_EXIT: if (cap_monitor_trap) ret = 0; break; case VM_CAP_RDPID: if (cap_rdpid) ret = 0; break; case VM_CAP_RDTSCP: if (cap_rdtscp) ret = 0; break; case VM_CAP_UNRESTRICTED_GUEST: if (cap_unrestricted_guest) ret = 0; break; case VM_CAP_ENABLE_INVPCID: if (cap_invpcid) ret = 0; break; case VM_CAP_BPT_EXIT: case VM_CAP_IPI_EXIT: ret = 0; break; default: break; } if (ret == 0) *retval = (vcap & (1 << type)) ? 1 : 0; return (ret); } static int vmx_setcap(void *vcpui, int type, int val) { struct vmx_vcpu *vcpu = vcpui; struct vmcs *vmcs = vcpu->vmcs; struct vlapic *vlapic; uint32_t baseval; uint32_t *pptr; int error; int flag; int reg; int retval; retval = ENOENT; pptr = NULL; switch (type) { case VM_CAP_HALT_EXIT: if (cap_halt_exit) { retval = 0; pptr = &vcpu->cap.proc_ctls; baseval = *pptr; flag = PROCBASED_HLT_EXITING; reg = VMCS_PRI_PROC_BASED_CTLS; } break; case VM_CAP_MTRAP_EXIT: if (cap_monitor_trap) { retval = 0; pptr = &vcpu->cap.proc_ctls; baseval = *pptr; flag = PROCBASED_MTF; reg = VMCS_PRI_PROC_BASED_CTLS; } break; case VM_CAP_PAUSE_EXIT: if (cap_pause_exit) { retval = 0; pptr = &vcpu->cap.proc_ctls; baseval = *pptr; flag = PROCBASED_PAUSE_EXITING; reg = VMCS_PRI_PROC_BASED_CTLS; } break; case VM_CAP_RDPID: case VM_CAP_RDTSCP: if (cap_rdpid || cap_rdtscp) /* * Choose not to support enabling/disabling * RDPID/RDTSCP via libvmmapi since, as per the * discussion in vmx_modinit(), RDPID/RDTSCP are * either always enabled or always disabled. */ error = EOPNOTSUPP; break; case VM_CAP_UNRESTRICTED_GUEST: if (cap_unrestricted_guest) { retval = 0; pptr = &vcpu->cap.proc_ctls2; baseval = *pptr; flag = PROCBASED2_UNRESTRICTED_GUEST; reg = VMCS_SEC_PROC_BASED_CTLS; } break; case VM_CAP_ENABLE_INVPCID: if (cap_invpcid) { retval = 0; pptr = &vcpu->cap.proc_ctls2; baseval = *pptr; flag = PROCBASED2_ENABLE_INVPCID; reg = VMCS_SEC_PROC_BASED_CTLS; } break; case VM_CAP_BPT_EXIT: retval = 0; /* Don't change the bitmap if we are tracing all exceptions. */ if (vcpu->cap.exc_bitmap != 0xffffffff) { pptr = &vcpu->cap.exc_bitmap; baseval = *pptr; flag = (1 << IDT_BP); reg = VMCS_EXCEPTION_BITMAP; } break; case VM_CAP_IPI_EXIT: retval = 0; vlapic = vm_lapic(vcpu->vcpu); vlapic->ipi_exit = val; break; case VM_CAP_MASK_HWINTR: retval = 0; break; default: break; } if (retval) return (retval); if (pptr != NULL) { if (val) { baseval |= flag; } else { baseval &= ~flag; } VMPTRLD(vmcs); error = vmwrite(reg, baseval); VMCLEAR(vmcs); if (error) return (error); /* * Update optional stored flags, and record * setting */ *pptr = baseval; } if (val) { vcpu->cap.set |= (1 << type); } else { vcpu->cap.set &= ~(1 << type); } return (0); } static struct vmspace * vmx_vmspace_alloc(vm_offset_t min, vm_offset_t max) { return (ept_vmspace_alloc(min, max)); } static void vmx_vmspace_free(struct vmspace *vmspace) { ept_vmspace_free(vmspace); } struct vlapic_vtx { struct vlapic vlapic; struct pir_desc *pir_desc; struct vmx_vcpu *vcpu; u_int pending_prio; }; #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) #define VMX_CTR_PIR(vlapic, pir_desc, notify, vector, level, msg) \ do { \ VLAPIC_CTR2(vlapic, msg " assert %s-triggered vector %d", \ level ? "level" : "edge", vector); \ VLAPIC_CTR1(vlapic, msg " pir0 0x%016lx", pir_desc->pir[0]); \ VLAPIC_CTR1(vlapic, msg " pir1 0x%016lx", pir_desc->pir[1]); \ VLAPIC_CTR1(vlapic, msg " pir2 0x%016lx", pir_desc->pir[2]); \ VLAPIC_CTR1(vlapic, msg " pir3 0x%016lx", pir_desc->pir[3]); \ VLAPIC_CTR1(vlapic, msg " notify: %s", notify ? "yes" : "no"); \ } while (0) /* * vlapic->ops handlers that utilize the APICv hardware assist described in * Chapter 29 of the Intel SDM. */ static int vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) { struct vlapic_vtx *vlapic_vtx; struct pir_desc *pir_desc; uint64_t mask; int idx, notify = 0; vlapic_vtx = (struct vlapic_vtx *)vlapic; pir_desc = vlapic_vtx->pir_desc; /* * Keep track of interrupt requests in the PIR descriptor. This is * because the virtual APIC page pointed to by the VMCS cannot be * modified if the vcpu is running. */ idx = vector / 64; mask = 1UL << (vector % 64); atomic_set_long(&pir_desc->pir[idx], mask); /* * A notification is required whenever the 'pending' bit makes a * transition from 0->1. * * Even if the 'pending' bit is already asserted, notification about * the incoming interrupt may still be necessary. For example, if a * vCPU is HLTed with a high PPR, a low priority interrupt would cause * the 0->1 'pending' transition with a notification, but the vCPU * would ignore the interrupt for the time being. The same vCPU would * need to then be notified if a high-priority interrupt arrived which * satisfied the PPR. * * The priorities of interrupts injected while 'pending' is asserted * are tracked in a custom bitfield 'pending_prio'. Should the * to-be-injected interrupt exceed the priorities already present, the * notification is sent. The priorities recorded in 'pending_prio' are * cleared whenever the 'pending' bit makes another 0->1 transition. */ if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { notify = 1; vlapic_vtx->pending_prio = 0; } else { const u_int old_prio = vlapic_vtx->pending_prio; const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); notify = 1; } } VMX_CTR_PIR(vlapic, pir_desc, notify, vector, level, "vmx_set_intr_ready"); return (notify); } static int vmx_pending_intr(struct vlapic *vlapic, int *vecptr) { struct vlapic_vtx *vlapic_vtx; struct pir_desc *pir_desc; struct LAPIC *lapic; uint64_t pending, pirval; uint8_t ppr, vpr, rvi; struct vm_exit *vmexit; int i; /* * This function is only expected to be called from the 'HLT' exit * handler which does not care about the vector that is pending. */ KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); vlapic_vtx = (struct vlapic_vtx *)vlapic; pir_desc = vlapic_vtx->pir_desc; lapic = vlapic->apic_page; /* * While a virtual interrupt may have already been * processed the actual delivery maybe pending the * interruptibility of the guest. Recognize a pending * interrupt by reevaluating virtual interrupts * following Section 30.2.1 in the Intel SDM Volume 3. */ vmexit = vm_exitinfo(vlapic->vcpu); KASSERT(vmexit->exitcode == VM_EXITCODE_HLT, ("vmx_pending_intr: exitcode not 'HLT'")); rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT; ppr = lapic->ppr & APIC_TPR_INT; if (rvi > ppr) return (1); pending = atomic_load_acq_long(&pir_desc->pending); if (!pending) return (0); /* * If there is an interrupt pending then it will be recognized only * if its priority is greater than the processor priority. * * Special case: if the processor priority is zero then any pending * interrupt will be recognized. */ if (ppr == 0) return (1); VLAPIC_CTR1(vlapic, "HLT with non-zero PPR %d", lapic->ppr); vpr = 0; for (i = 3; i >= 0; i--) { pirval = pir_desc->pir[i]; if (pirval != 0) { vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT; break; } } /* * If the highest-priority pending interrupt falls short of the * processor priority of this vCPU, ensure that 'pending_prio' does not * have any stale bits which would preclude a higher-priority interrupt * from incurring a notification later. */ if (vpr <= ppr) { const u_int prio_bit = VPR_PRIO_BIT(vpr); const u_int old = vlapic_vtx->pending_prio; if (old > prio_bit && (old & prio_bit) == 0) { vlapic_vtx->pending_prio = prio_bit; } return (0); } return (1); } static void vmx_intr_accepted(struct vlapic *vlapic, int vector) { panic("vmx_intr_accepted: not expected to be called"); } static void vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) { struct vlapic_vtx *vlapic_vtx; struct vmcs *vmcs; uint64_t mask, val; KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); KASSERT(!vcpu_is_running(vlapic->vcpu, NULL), ("vmx_set_tmr: vcpu cannot be running")); vlapic_vtx = (struct vlapic_vtx *)vlapic; vmcs = vlapic_vtx->vcpu->vmcs; mask = 1UL << (vector % 64); VMPTRLD(vmcs); val = vmcs_read(VMCS_EOI_EXIT(vector)); if (level) val |= mask; else val &= ~mask; vmcs_write(VMCS_EOI_EXIT(vector), val); VMCLEAR(vmcs); } static void vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) { struct vlapic_vtx *vlapic_vtx; struct vmx_vcpu *vcpu; struct vmcs *vmcs; uint32_t proc_ctls; vlapic_vtx = (struct vlapic_vtx *)vlapic; vcpu = vlapic_vtx->vcpu; vmcs = vcpu->vmcs; proc_ctls = vcpu->cap.proc_ctls; proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; proc_ctls |= PROCBASED_CR8_LOAD_EXITING; proc_ctls |= PROCBASED_CR8_STORE_EXITING; vcpu->cap.proc_ctls = proc_ctls; VMPTRLD(vmcs); vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); VMCLEAR(vmcs); } static void vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) { struct vlapic_vtx *vlapic_vtx; struct vmx *vmx; struct vmx_vcpu *vcpu; struct vmcs *vmcs; uint32_t proc_ctls2; int error __diagused; vlapic_vtx = (struct vlapic_vtx *)vlapic; vcpu = vlapic_vtx->vcpu; vmx = vcpu->vmx; vmcs = vcpu->vmcs; proc_ctls2 = vcpu->cap.proc_ctls2; KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2)); proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; vcpu->cap.proc_ctls2 = proc_ctls2; VMPTRLD(vmcs); vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); VMCLEAR(vmcs); if (vlapic->vcpuid == 0) { /* * The nested page table mappings are shared by all vcpus * so unmap the APIC access page just once. */ error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", __func__, error)); /* * The MSR bitmap is shared by all vcpus so modify it only * once in the context of vcpu 0. */ error = vmx_allow_x2apic_msrs(vmx); KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", __func__, error)); } } static void vmx_post_intr(struct vlapic *vlapic, int hostcpu) { ipi_cpu(hostcpu, pirvec); } /* * Transfer the pending interrupts in the PIR descriptor to the IRR * in the virtual APIC page. */ static void vmx_inject_pir(struct vlapic *vlapic) { struct vlapic_vtx *vlapic_vtx; struct pir_desc *pir_desc; struct LAPIC *lapic; uint64_t val, pirval; int rvi, pirbase = -1; uint16_t intr_status_old, intr_status_new; vlapic_vtx = (struct vlapic_vtx *)vlapic; pir_desc = vlapic_vtx->pir_desc; if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { VLAPIC_CTR0(vlapic, "vmx_inject_pir: " "no posted interrupt pending"); return; } pirval = 0; pirbase = -1; lapic = vlapic->apic_page; val = atomic_readandclear_long(&pir_desc->pir[0]); if (val != 0) { lapic->irr0 |= val; lapic->irr1 |= val >> 32; pirbase = 0; pirval = val; } val = atomic_readandclear_long(&pir_desc->pir[1]); if (val != 0) { lapic->irr2 |= val; lapic->irr3 |= val >> 32; pirbase = 64; pirval = val; } val = atomic_readandclear_long(&pir_desc->pir[2]); if (val != 0) { lapic->irr4 |= val; lapic->irr5 |= val >> 32; pirbase = 128; pirval = val; } val = atomic_readandclear_long(&pir_desc->pir[3]); if (val != 0) { lapic->irr6 |= val; lapic->irr7 |= val >> 32; pirbase = 192; pirval = val; } VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); /* * Update RVI so the processor can evaluate pending virtual * interrupts on VM-entry. * * It is possible for pirval to be 0 here, even though the * pending bit has been set. The scenario is: * CPU-Y is sending a posted interrupt to CPU-X, which * is running a guest and processing posted interrupts in h/w. * CPU-X will eventually exit and the state seen in s/w is * the pending bit set, but no PIR bits set. * * CPU-X CPU-Y * (vm running) (host running) * rx posted interrupt * CLEAR pending bit * SET PIR bit * READ/CLEAR PIR bits * SET pending bit * (vm exit) * pending bit set, PIR 0 */ if (pirval != 0) { rvi = pirbase + flsl(pirval) - 1; intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); intr_status_new = (intr_status_old & 0xFF00) | rvi; if (intr_status_new > intr_status_old) { vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); VLAPIC_CTR2(vlapic, "vmx_inject_pir: " "guest_intr_status changed from 0x%04x to 0x%04x", intr_status_old, intr_status_new); } } } static struct vlapic * vmx_vlapic_init(void *vcpui) { struct vmx *vmx; struct vmx_vcpu *vcpu; struct vlapic *vlapic; struct vlapic_vtx *vlapic_vtx; vcpu = vcpui; vmx = vcpu->vmx; vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); vlapic->vm = vmx->vm; vlapic->vcpu = vcpu->vcpu; vlapic->vcpuid = vcpu->vcpuid; vlapic->apic_page = (struct LAPIC *)vcpu->apic_page; vlapic_vtx = (struct vlapic_vtx *)vlapic; vlapic_vtx->pir_desc = vcpu->pir_desc; vlapic_vtx->vcpu = vcpu; if (tpr_shadowing) { vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; } if (virtual_interrupt_delivery) { vlapic->ops.set_intr_ready = vmx_set_intr_ready; vlapic->ops.pending_intr = vmx_pending_intr; vlapic->ops.intr_accepted = vmx_intr_accepted; vlapic->ops.set_tmr = vmx_set_tmr; vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; } if (posted_interrupts) vlapic->ops.post_intr = vmx_post_intr; vlapic_init(vlapic); return (vlapic); } static void vmx_vlapic_cleanup(struct vlapic *vlapic) { vlapic_cleanup(vlapic); free(vlapic, M_VLAPIC); } #ifdef BHYVE_SNAPSHOT static int vmx_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta) { struct vmcs *vmcs; struct vmx *vmx; struct vmx_vcpu *vcpu; struct vmxctx *vmxctx; int err, run, hostcpu; err = 0; vcpu = vcpui; vmx = vcpu->vmx; vmcs = vcpu->vmcs; run = vcpu_is_running(vcpu->vcpu, &hostcpu); if (run && hostcpu != curcpu) { printf("%s: %s%d is running", __func__, vm_name(vmx->vm), vcpu->vcpuid); return (EINVAL); } err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CR0, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CR3, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CR4, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_DR7, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_RSP, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_RIP, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_RFLAGS, meta); /* Guest segments */ err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_ES, meta); err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_ES, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CS, meta); err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_CS, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_SS, meta); err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_SS, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_DS, meta); err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_DS, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_FS, meta); err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_FS, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_GS, meta); err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_GS, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_TR, meta); err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_TR, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_LDTR, meta); err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_LDTR, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_EFER, meta); err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_IDTR, meta); err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_GDTR, meta); /* Guest page tables */ err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE0, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE1, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE2, meta); err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE3, meta); /* Other guest state */ err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_IA32_SYSENTER_CS, meta); err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_IA32_SYSENTER_ESP, meta); err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_IA32_SYSENTER_EIP, meta); err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_INTERRUPTIBILITY, meta); err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_ACTIVITY, meta); err += vmcs_snapshot_any(vmcs, run, VMCS_ENTRY_CTLS, meta); err += vmcs_snapshot_any(vmcs, run, VMCS_EXIT_CTLS, meta); if (err != 0) goto done; SNAPSHOT_BUF_OR_LEAVE(vcpu->guest_msrs, sizeof(vcpu->guest_msrs), meta, err, done); SNAPSHOT_BUF_OR_LEAVE(vcpu->pir_desc, sizeof(*vcpu->pir_desc), meta, err, done); SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done); vmxctx = &vcpu->ctx; SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rdi, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rsi, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rdx, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rcx, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r8, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r9, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rax, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rbx, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rbp, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r10, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r11, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r12, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r13, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r14, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r15, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_cr2, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr0, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr1, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr2, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr3, meta, err, done); SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr6, meta, err, done); done: return (err); } static int vmx_restore_tsc(void *vcpui, uint64_t offset) { struct vmx_vcpu *vcpu = vcpui; struct vmcs *vmcs; struct vmx *vmx; int error, running, hostcpu; vmx = vcpu->vmx; vmcs = vcpu->vmcs; running = vcpu_is_running(vcpu->vcpu, &hostcpu); if (running && hostcpu != curcpu) { printf("%s: %s%d is running", __func__, vm_name(vmx->vm), vcpu->vcpuid); return (EINVAL); } if (!running) VMPTRLD(vmcs); error = vmx_set_tsc_offset(vcpu, offset); if (!running) VMCLEAR(vmcs); return (error); } #endif const struct vmm_ops vmm_ops_intel = { .modinit = vmx_modinit, .modcleanup = vmx_modcleanup, .modsuspend = vmx_modsuspend, .modresume = vmx_modresume, .init = vmx_init, .run = vmx_run, .cleanup = vmx_cleanup, .vcpu_init = vmx_vcpu_init, .vcpu_cleanup = vmx_vcpu_cleanup, .getreg = vmx_getreg, .setreg = vmx_setreg, .getdesc = vmx_getdesc, .setdesc = vmx_setdesc, .getcap = vmx_getcap, .setcap = vmx_setcap, .vmspace_alloc = vmx_vmspace_alloc, .vmspace_free = vmx_vmspace_free, .vlapic_init = vmx_vlapic_init, .vlapic_cleanup = vmx_vlapic_cleanup, #ifdef BHYVE_SNAPSHOT .vcpu_snapshot = vmx_vcpu_snapshot, .restore_tsc = vmx_restore_tsc, #endif }; diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c index aa13d506ac6a..1d410835be88 100644 --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -1,3069 +1,2694 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 NetApp, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#include #include "opt_bhyve_snapshot.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include "vmm_ioport.h" #include "vmm_host.h" #include "vmm_mem.h" #include "vmm_util.h" #include "vatpic.h" #include "vatpit.h" #include "vhpet.h" #include "vioapic.h" #include "vlapic.h" #include "vpmtmr.h" #include "vrtc.h" #include "vmm_stat.h" #include "vmm_lapic.h" #include "io/ppt.h" #include "io/iommu.h" struct vlapic; /* * Initialization: * (a) allocated when vcpu is created * (i) initialized when vcpu is created and when it is reinitialized * (o) initialized the first time the vcpu is created * (x) initialized before use */ struct vcpu { struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */ enum vcpu_state state; /* (o) vcpu state */ int vcpuid; /* (o) */ int hostcpu; /* (o) vcpu's host cpu */ int reqidle; /* (i) request vcpu to idle */ struct vm *vm; /* (o) */ void *cookie; /* (i) cpu-specific data */ struct vlapic *vlapic; /* (i) APIC device model */ enum x2apic_state x2apic_state; /* (i) APIC mode */ uint64_t exitintinfo; /* (i) events pending at VM exit */ int nmi_pending; /* (i) NMI pending */ int extint_pending; /* (i) INTR pending */ int exception_pending; /* (i) exception pending */ int exc_vector; /* (x) exception collateral */ int exc_errcode_valid; uint32_t exc_errcode; struct savefpu *guestfpu; /* (a,i) guest fpu state */ uint64_t guest_xcr0; /* (i) guest %xcr0 register */ void *stats; /* (a,i) statistics */ struct vm_exit exitinfo; /* (x) exit reason and collateral */ cpuset_t exitinfo_cpuset; /* (x) storage for vmexit handlers */ uint64_t nextrip; /* (x) next instruction to execute */ uint64_t tsc_offset; /* (o) TSC offsetting */ }; #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx)) #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) -struct mem_seg { - size_t len; - bool sysmem; - struct vm_object *object; -}; -#define VM_MAX_MEMSEGS 4 - -struct mem_map { - vm_paddr_t gpa; - size_t len; - vm_ooffset_t segoff; - int segid; - int prot; - int flags; -}; -#define VM_MAX_MEMMAPS 8 - /* * Initialization: * (o) initialized the first time the VM is created * (i) initialized when VM is created and when it is reinitialized * (x) initialized before use * * Locking: * [m] mem_segs_lock * [r] rendezvous_mtx * [v] reads require one frozen vcpu, writes require freezing all vcpus */ struct vm { void *cookie; /* (i) cpu-specific data */ void *iommu; /* (x) iommu-specific data */ struct vhpet *vhpet; /* (i) virtual HPET */ struct vioapic *vioapic; /* (i) virtual ioapic */ struct vatpic *vatpic; /* (i) virtual atpic */ struct vatpit *vatpit; /* (i) virtual atpit */ struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ struct vrtc *vrtc; /* (o) virtual RTC */ volatile cpuset_t active_cpus; /* (i) active vcpus */ volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */ cpuset_t startup_cpus; /* (i) [r] waiting for startup */ int suspend; /* (i) stop VM execution */ bool dying; /* (o) is dying */ volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ cpuset_t rendezvous_req_cpus; /* (x) [r] rendezvous requested */ cpuset_t rendezvous_done_cpus; /* (x) [r] rendezvous finished */ void *rendezvous_arg; /* (x) [r] rendezvous func/arg */ vm_rendezvous_func_t rendezvous_func; struct mtx rendezvous_mtx; /* (o) rendezvous lock */ - struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) [m+v] guest address space */ - struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) [m+v] guest memory regions */ struct vmspace *vmspace; /* (o) guest's address space */ + struct vm_mem mem; /* (i) [m+v] guest memory */ char name[VM_MAX_NAMELEN+1]; /* (o) virtual machine name */ struct vcpu **vcpu; /* (o) guest vcpus */ /* The following describe the vm cpu topology */ uint16_t sockets; /* (o) num of sockets */ uint16_t cores; /* (o) num of cores/socket */ uint16_t threads; /* (o) num of threads/core */ uint16_t maxcpus; /* (o) max pluggable cpus */ - struct sx mem_segs_lock; /* (o) */ struct sx vcpus_init_lock; /* (o) */ }; #define VMM_CTR0(vcpu, format) \ VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format) #define VMM_CTR1(vcpu, format, p1) \ VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1) #define VMM_CTR2(vcpu, format, p1, p2) \ VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2) #define VMM_CTR3(vcpu, format, p1, p2, p3) \ VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3) #define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \ VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4) static int vmm_initialized; static void vmmops_panic(void); static void vmmops_panic(void) { panic("vmm_ops func called when !vmm_is_intel() && !vmm_is_svm()"); } #define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \ DEFINE_IFUNC(static, ret_type, vmmops_##opname, args) \ { \ if (vmm_is_intel()) \ return (vmm_ops_intel.opname); \ else if (vmm_is_svm()) \ return (vmm_ops_amd.opname); \ else \ return ((ret_type (*)args)vmmops_panic); \ } DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum)) DEFINE_VMMOPS_IFUNC(int, modcleanup, (void)) DEFINE_VMMOPS_IFUNC(void, modsuspend, (void)) DEFINE_VMMOPS_IFUNC(void, modresume, (void)) DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap)) DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t rip, struct pmap *pmap, struct vm_eventinfo *info)) DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi)) DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu, int vcpu_id)) DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui)) DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval)) DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val)) DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vcpui, int num, struct seg_desc *desc)) DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vcpui, int num, struct seg_desc *desc)) DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval)) DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val)) DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min, vm_offset_t max)) DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace)) DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vcpui)) DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (struct vlapic *vlapic)) #ifdef BHYVE_SNAPSHOT DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui, struct vm_snapshot_meta *meta)) DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now)) #endif SDT_PROVIDER_DEFINE(vmm); static MALLOC_DEFINE(M_VM, "vm", "vm"); /* statistics */ static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); /* * Halt the guest if all vcpus are executing a HLT instruction with * interrupts disabled. */ static int halt_detection_enabled = 1; SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, &halt_detection_enabled, 0, "Halt VM if all vcpus execute HLT with interrupts disabled"); static int vmm_ipinum; SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, "IPI vector used for vcpu notifications"); static int trace_guest_exceptions; SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, &trace_guest_exceptions, 0, "Trap into hypervisor on all guest exceptions and reflect them back"); static int trap_wbinvd; SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0, "WBINVD triggers a VM-exit"); u_int vm_maxcpu; SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &vm_maxcpu, 0, "Maximum number of vCPUs"); -static void vm_free_memmap(struct vm *vm, int ident); -static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr); /* global statistics */ VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus"); VMM_STAT(VMEXIT_COUNT, "total number of vm exits"); VMM_STAT(VMEXIT_EXTINT, "vm exits due to external interrupt"); VMM_STAT(VMEXIT_HLT, "number of times hlt was intercepted"); VMM_STAT(VMEXIT_CR_ACCESS, "number of times %cr access was intercepted"); VMM_STAT(VMEXIT_RDMSR, "number of times rdmsr was intercepted"); VMM_STAT(VMEXIT_WRMSR, "number of times wrmsr was intercepted"); VMM_STAT(VMEXIT_MTRAP, "number of monitor trap exits"); VMM_STAT(VMEXIT_PAUSE, "number of times pause was intercepted"); VMM_STAT(VMEXIT_INTR_WINDOW, "vm exits due to interrupt window opening"); VMM_STAT(VMEXIT_NMI_WINDOW, "vm exits due to nmi window opening"); VMM_STAT(VMEXIT_INOUT, "number of times in/out was intercepted"); VMM_STAT(VMEXIT_CPUID, "number of times cpuid was intercepted"); VMM_STAT(VMEXIT_NESTED_FAULT, "vm exits due to nested page fault"); VMM_STAT(VMEXIT_INST_EMUL, "vm exits for instruction emulation"); VMM_STAT(VMEXIT_UNKNOWN, "number of vm exits for unknown reason"); VMM_STAT(VMEXIT_ASTPENDING, "number of times astpending at exit"); VMM_STAT(VMEXIT_REQIDLE, "number of times idle requested at exit"); VMM_STAT(VMEXIT_USERSPACE, "number of vm exits handled in userspace"); VMM_STAT(VMEXIT_RENDEZVOUS, "number of times rendezvous pending at exit"); VMM_STAT(VMEXIT_EXCEPTION, "number of vm exits due to exceptions"); /* * Upper limit on vm_maxcpu. Limited by use of uint16_t types for CPU * counts as well as range of vpid values for VT-x and by the capacity * of cpuset_t masks. The call to new_unrhdr() in vpid_init() in * vmx.c requires 'vm_maxcpu + 1 <= 0xffff', hence the '- 1' below. */ #define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE) #ifdef KTR static const char * vcpu_state2str(enum vcpu_state state) { switch (state) { case VCPU_IDLE: return ("idle"); case VCPU_FROZEN: return ("frozen"); case VCPU_RUNNING: return ("running"); case VCPU_SLEEPING: return ("sleeping"); default: return ("unknown"); } } #endif static void vcpu_cleanup(struct vcpu *vcpu, bool destroy) { vmmops_vlapic_cleanup(vcpu->vlapic); vmmops_vcpu_cleanup(vcpu->cookie); vcpu->cookie = NULL; if (destroy) { vmm_stat_free(vcpu->stats); fpu_save_area_free(vcpu->guestfpu); vcpu_lock_destroy(vcpu); free(vcpu, M_VM); } } static struct vcpu * vcpu_alloc(struct vm *vm, int vcpu_id) { struct vcpu *vcpu; KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, ("vcpu_init: invalid vcpu %d", vcpu_id)); vcpu = malloc(sizeof(*vcpu), M_VM, M_WAITOK | M_ZERO); vcpu_lock_init(vcpu); vcpu->state = VCPU_IDLE; vcpu->hostcpu = NOCPU; vcpu->vcpuid = vcpu_id; vcpu->vm = vm; vcpu->guestfpu = fpu_save_area_alloc(); vcpu->stats = vmm_stat_alloc(); vcpu->tsc_offset = 0; return (vcpu); } static void vcpu_init(struct vcpu *vcpu) { vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie); vm_set_x2apic_state(vcpu, X2APIC_DISABLED); vcpu->reqidle = 0; vcpu->exitintinfo = 0; vcpu->nmi_pending = 0; vcpu->extint_pending = 0; vcpu->exception_pending = 0; vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; fpu_save_area_reset(vcpu->guestfpu); vmm_stat_init(vcpu->stats); } int vcpu_trace_exceptions(struct vcpu *vcpu) { return (trace_guest_exceptions); } int vcpu_trap_wbinvd(struct vcpu *vcpu) { return (trap_wbinvd); } struct vm_exit * vm_exitinfo(struct vcpu *vcpu) { return (&vcpu->exitinfo); } cpuset_t * vm_exitinfo_cpuset(struct vcpu *vcpu) { return (&vcpu->exitinfo_cpuset); } static int vmm_init(void) { if (!vmm_is_hw_supported()) return (ENXIO); vm_maxcpu = mp_ncpus; TUNABLE_INT_FETCH("hw.vmm.maxcpu", &vm_maxcpu); if (vm_maxcpu > VM_MAXCPU) { printf("vmm: vm_maxcpu clamped to %u\n", VM_MAXCPU); vm_maxcpu = VM_MAXCPU; } if (vm_maxcpu == 0) vm_maxcpu = 1; vmm_host_state_init(); vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : &IDTVEC(justreturn)); if (vmm_ipinum < 0) vmm_ipinum = IPI_AST; vmm_suspend_p = vmmops_modsuspend; vmm_resume_p = vmmops_modresume; return (vmmops_modinit(vmm_ipinum)); } static int vmm_handler(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: if (vmm_is_hw_supported()) { error = vmmdev_init(); if (error != 0) break; error = vmm_init(); if (error == 0) vmm_initialized = 1; else (void)vmmdev_cleanup(); } else { error = ENXIO; } break; case MOD_UNLOAD: if (vmm_is_hw_supported()) { error = vmmdev_cleanup(); if (error == 0) { vmm_suspend_p = NULL; vmm_resume_p = NULL; iommu_cleanup(); if (vmm_ipinum != IPI_AST) lapic_ipi_free(vmm_ipinum); error = vmmops_modcleanup(); /* * Something bad happened - prevent new * VMs from being created */ if (error) vmm_initialized = 0; } } else { error = 0; } break; default: error = 0; break; } return (error); } static moduledata_t vmm_kmod = { "vmm", vmm_handler, NULL }; /* * vmm initialization has the following dependencies: * * - VT-x initialization requires smp_rendezvous() and therefore must happen * after SMP is fully functional (after SI_SUB_SMP). * - vmm device initialization requires an initialized devfs. */ DECLARE_MODULE(vmm, vmm_kmod, MAX(SI_SUB_SMP, SI_SUB_DEVFS) + 1, SI_ORDER_ANY); MODULE_VERSION(vmm, 1); static void vm_init(struct vm *vm, bool create) { vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); vm->iommu = NULL; vm->vioapic = vioapic_init(vm); vm->vhpet = vhpet_init(vm); vm->vatpic = vatpic_init(vm); vm->vatpit = vatpit_init(vm); vm->vpmtmr = vpmtmr_init(vm); if (create) vm->vrtc = vrtc_init(vm); CPU_ZERO(&vm->active_cpus); CPU_ZERO(&vm->debug_cpus); CPU_ZERO(&vm->startup_cpus); vm->suspend = 0; CPU_ZERO(&vm->suspended_cpus); if (!create) { for (int i = 0; i < vm->maxcpus; i++) { if (vm->vcpu[i] != NULL) vcpu_init(vm->vcpu[i]); } } } void vm_disable_vcpu_creation(struct vm *vm) { sx_xlock(&vm->vcpus_init_lock); vm->dying = true; sx_xunlock(&vm->vcpus_init_lock); } struct vcpu * vm_alloc_vcpu(struct vm *vm, int vcpuid) { struct vcpu *vcpu; if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) return (NULL); vcpu = (struct vcpu *) atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); if (__predict_true(vcpu != NULL)) return (vcpu); sx_xlock(&vm->vcpus_init_lock); vcpu = vm->vcpu[vcpuid]; if (vcpu == NULL && !vm->dying) { vcpu = vcpu_alloc(vm, vcpuid); vcpu_init(vcpu); /* * Ensure vCPU is fully created before updating pointer * to permit unlocked reads above. */ atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], (uintptr_t)vcpu); } sx_xunlock(&vm->vcpus_init_lock); return (vcpu); } void vm_slock_vcpus(struct vm *vm) { sx_slock(&vm->vcpus_init_lock); } void vm_unlock_vcpus(struct vm *vm) { sx_unlock(&vm->vcpus_init_lock); } /* * The default CPU topology is a single thread per package. */ u_int cores_per_package = 1; u_int threads_per_core = 1; int vm_create(const char *name, struct vm **retvm) { struct vm *vm; struct vmspace *vmspace; /* * If vmm.ko could not be successfully initialized then don't attempt * to create the virtual machine. */ if (!vmm_initialized) return (ENXIO); if (name == NULL || strnlen(name, VM_MAX_NAMELEN + 1) == VM_MAX_NAMELEN + 1) return (EINVAL); vmspace = vmmops_vmspace_alloc(0, VM_MAXUSER_ADDRESS_LA48); if (vmspace == NULL) return (ENOMEM); vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); strcpy(vm->name, name); vm->vmspace = vmspace; + vm_mem_init(&vm->mem); mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); - sx_init(&vm->mem_segs_lock, "vm mem_segs"); sx_init(&vm->vcpus_init_lock, "vm vcpus"); vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK | M_ZERO); vm->sockets = 1; vm->cores = cores_per_package; /* XXX backwards compatibility */ vm->threads = threads_per_core; /* XXX backwards compatibility */ vm->maxcpus = vm_maxcpu; vm_init(vm, true); *retvm = vm; return (0); } void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus) { *sockets = vm->sockets; *cores = vm->cores; *threads = vm->threads; *maxcpus = vm->maxcpus; } uint16_t vm_get_maxcpus(struct vm *vm) { return (vm->maxcpus); } int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus __unused) { /* Ignore maxcpus. */ if ((sockets * cores * threads) > vm->maxcpus) return (EINVAL); vm->sockets = sockets; vm->cores = cores; vm->threads = threads; return(0); } static void vm_cleanup(struct vm *vm, bool destroy) { - struct mem_map *mm; - int i; - if (destroy) vm_xlock_memsegs(vm); + else + vm_assert_memseg_xlocked(vm); ppt_unassign_all(vm); if (vm->iommu != NULL) iommu_destroy_domain(vm->iommu); if (destroy) vrtc_cleanup(vm->vrtc); else vrtc_reset(vm->vrtc); vpmtmr_cleanup(vm->vpmtmr); vatpit_cleanup(vm->vatpit); vhpet_cleanup(vm->vhpet); vatpic_cleanup(vm->vatpic); vioapic_cleanup(vm->vioapic); - for (i = 0; i < vm->maxcpus; i++) { + for (int i = 0; i < vm->maxcpus; i++) { if (vm->vcpu[i] != NULL) vcpu_cleanup(vm->vcpu[i], destroy); } vmmops_cleanup(vm->cookie); - /* - * System memory is removed from the guest address space only when - * the VM is destroyed. This is because the mapping remains the same - * across VM reset. - * - * Device memory can be relocated by the guest (e.g. using PCI BARs) - * so those mappings are removed on a VM reset. - */ - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (destroy || !sysmem_mapping(vm, mm)) - vm_free_memmap(vm, i); - } + vm_mem_cleanup(vm); if (destroy) { - for (i = 0; i < VM_MAX_MEMSEGS; i++) - vm_free_memseg(vm, i); - vm_unlock_memsegs(vm); + vm_mem_destroy(vm); vmmops_vmspace_free(vm->vmspace); vm->vmspace = NULL; free(vm->vcpu, M_VM); sx_destroy(&vm->vcpus_init_lock); - sx_destroy(&vm->mem_segs_lock); mtx_destroy(&vm->rendezvous_mtx); } } void vm_destroy(struct vm *vm) { vm_cleanup(vm, true); free(vm, M_VM); } int vm_reinit(struct vm *vm) { int error; /* * A virtual machine can be reset only if all vcpus are suspended. */ if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { vm_cleanup(vm, false); vm_init(vm, false); error = 0; } else { error = EBUSY; } return (error); } const char * vm_name(struct vm *vm) { return (vm->name); } -void -vm_slock_memsegs(struct vm *vm) -{ - sx_slock(&vm->mem_segs_lock); -} - -void -vm_xlock_memsegs(struct vm *vm) -{ - sx_xlock(&vm->mem_segs_lock); -} - -void -vm_unlock_memsegs(struct vm *vm) -{ - sx_unlock(&vm->mem_segs_lock); -} - int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) { vm_object_t obj; if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) return (ENOMEM); else return (0); } int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) { vmm_mmio_free(vm->vmspace, gpa, len); return (0); } -/* - * Return 'true' if 'gpa' is allocated in the guest address space. - * - * This function is called in the context of a running vcpu which acts as - * an implicit lock on 'vm->mem_maps[]'. - */ -bool -vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa) -{ - struct vm *vm = vcpu->vm; - struct mem_map *mm; - int i; - -#ifdef INVARIANTS - int hostcpu, state; - state = vcpu_get_state(vcpu, &hostcpu); - KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, - ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); -#endif - - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) - return (true); /* 'gpa' is sysmem or devmem */ - } - - if (ppt_is_mmio(vm, gpa)) - return (true); /* 'gpa' is pci passthru mmio */ - - return (false); -} - -int -vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) -{ - struct mem_seg *seg; - vm_object_t obj; - - sx_assert(&vm->mem_segs_lock, SX_XLOCKED); - - if (ident < 0 || ident >= VM_MAX_MEMSEGS) - return (EINVAL); - - if (len == 0 || (len & PAGE_MASK)) - return (EINVAL); - - seg = &vm->mem_segs[ident]; - if (seg->object != NULL) { - if (seg->len == len && seg->sysmem == sysmem) - return (EEXIST); - else - return (EINVAL); - } - - obj = vm_object_allocate(OBJT_SWAP, len >> PAGE_SHIFT); - if (obj == NULL) - return (ENOMEM); - - seg->len = len; - seg->object = obj; - seg->sysmem = sysmem; - return (0); -} - -int -vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, - vm_object_t *objptr) -{ - struct mem_seg *seg; - - sx_assert(&vm->mem_segs_lock, SX_LOCKED); - - if (ident < 0 || ident >= VM_MAX_MEMSEGS) - return (EINVAL); - - seg = &vm->mem_segs[ident]; - if (len) - *len = seg->len; - if (sysmem) - *sysmem = seg->sysmem; - if (objptr) - *objptr = seg->object; - return (0); -} - -void -vm_free_memseg(struct vm *vm, int ident) -{ - struct mem_seg *seg; - - KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, - ("%s: invalid memseg ident %d", __func__, ident)); - - seg = &vm->mem_segs[ident]; - if (seg->object != NULL) { - vm_object_deallocate(seg->object); - bzero(seg, sizeof(struct mem_seg)); - } -} - -int -vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, - size_t len, int prot, int flags) -{ - struct mem_seg *seg; - struct mem_map *m, *map; - vm_ooffset_t last; - int i, error; - - if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0) - return (EINVAL); - - if (flags & ~VM_MEMMAP_F_WIRED) - return (EINVAL); - - if (segid < 0 || segid >= VM_MAX_MEMSEGS) - return (EINVAL); - - seg = &vm->mem_segs[segid]; - if (seg->object == NULL) - return (EINVAL); - - last = first + len; - if (first < 0 || first >= last || last > seg->len) - return (EINVAL); - - if ((gpa | first | last) & PAGE_MASK) - return (EINVAL); - - map = NULL; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - m = &vm->mem_maps[i]; - if (m->len == 0) { - map = m; - break; - } - } - - if (map == NULL) - return (ENOSPC); - - error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa, - len, 0, VMFS_NO_SPACE, prot, prot, 0); - if (error != KERN_SUCCESS) - return (EFAULT); - - vm_object_reference(seg->object); - - if (flags & VM_MEMMAP_F_WIRED) { - error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len, - VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); - if (error != KERN_SUCCESS) { - vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len); - return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM : - EFAULT); - } - } - - map->gpa = gpa; - map->len = len; - map->segoff = first; - map->segid = segid; - map->prot = prot; - map->flags = flags; - return (0); -} - -int -vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) -{ - struct mem_map *m; - int i; - - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - m = &vm->mem_maps[i]; - if (m->gpa == gpa && m->len == len && - (m->flags & VM_MEMMAP_F_IOMMU) == 0) { - vm_free_memmap(vm, i); - return (0); - } - } - - return (EINVAL); -} - -int -vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, - vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) -{ - struct mem_map *mm, *mmnext; - int i; - - mmnext = NULL; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (mm->len == 0 || mm->gpa < *gpa) - continue; - if (mmnext == NULL || mm->gpa < mmnext->gpa) - mmnext = mm; - } - - if (mmnext != NULL) { - *gpa = mmnext->gpa; - if (segid) - *segid = mmnext->segid; - if (segoff) - *segoff = mmnext->segoff; - if (len) - *len = mmnext->len; - if (prot) - *prot = mmnext->prot; - if (flags) - *flags = mmnext->flags; - return (0); - } else { - return (ENOENT); - } -} - -static void -vm_free_memmap(struct vm *vm, int ident) -{ - struct mem_map *mm; - int error __diagused; - - mm = &vm->mem_maps[ident]; - if (mm->len) { - error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa, - mm->gpa + mm->len); - KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d", - __func__, error)); - bzero(mm, sizeof(struct mem_map)); - } -} - -static __inline bool -sysmem_mapping(struct vm *vm, struct mem_map *mm) -{ - - if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) - return (true); - else - return (false); -} - -vm_paddr_t -vmm_sysmem_maxaddr(struct vm *vm) -{ - struct mem_map *mm; - vm_paddr_t maxaddr; - int i; - - maxaddr = 0; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (sysmem_mapping(vm, mm)) { - if (maxaddr < mm->gpa + mm->len) - maxaddr = mm->gpa + mm->len; - } - } - return (maxaddr); -} - static void vm_iommu_map(struct vm *vm) { vm_paddr_t gpa, hpa; - struct mem_map *mm; + struct vm_mem_map *mm; int i; - sx_assert(&vm->mem_segs_lock, SX_LOCKED); + sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED); for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (!sysmem_mapping(vm, mm)) + if (!vm_memseg_sysmem(vm, i)) continue; + mm = &vm->mem.mem_maps[i]; KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0, ("iommu map found invalid memmap %#lx/%#lx/%#x", mm->gpa, mm->len, mm->flags)); if ((mm->flags & VM_MEMMAP_F_WIRED) == 0) continue; mm->flags |= VM_MEMMAP_F_IOMMU; for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) { hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa); /* * All mappings in the vmm vmspace must be * present since they are managed by vmm in this way. * Because we are in pass-through mode, the * mappings must also be wired. This implies * that all pages must be mapped and wired, * allowing to use pmap_extract() and avoiding the * need to use vm_gpa_hold_global(). * * This could change if/when we start * supporting page faults on IOMMU maps. */ KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(hpa)), ("vm_iommu_map: vm %p gpa %jx hpa %jx not wired", vm, (uintmax_t)gpa, (uintmax_t)hpa)); iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE); } } iommu_invalidate_tlb(iommu_host_domain()); } static void vm_iommu_unmap(struct vm *vm) { vm_paddr_t gpa; - struct mem_map *mm; + struct vm_mem_map *mm; int i; - sx_assert(&vm->mem_segs_lock, SX_LOCKED); + sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED); for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (!sysmem_mapping(vm, mm)) + if (!vm_memseg_sysmem(vm, i)) continue; + mm = &vm->mem.mem_maps[i]; if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0) continue; mm->flags &= ~VM_MEMMAP_F_IOMMU; KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0, ("iommu unmap found invalid memmap %#lx/%#lx/%#x", mm->gpa, mm->len, mm->flags)); for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) { KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(pmap_extract( vmspace_pmap(vm->vmspace), gpa))), ("vm_iommu_unmap: vm %p gpa %jx not wired", vm, (uintmax_t)gpa)); iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE); } } /* * Invalidate the cached translations associated with the domain * from which pages were removed. */ iommu_invalidate_tlb(vm->iommu); } int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) { int error; error = ppt_unassign_device(vm, bus, slot, func); if (error) return (error); if (ppt_assigned_devices(vm) == 0) vm_iommu_unmap(vm); return (0); } int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) { int error; vm_paddr_t maxaddr; /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */ if (ppt_assigned_devices(vm) == 0) { KASSERT(vm->iommu == NULL, ("vm_assign_pptdev: iommu must be NULL")); maxaddr = vmm_sysmem_maxaddr(vm); vm->iommu = iommu_create_domain(maxaddr); if (vm->iommu == NULL) return (ENXIO); vm_iommu_map(vm); } error = ppt_assign_device(vm, bus, slot, func); return (error); } -static void * -_vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, - void **cookie) -{ - int i, count, pageoff; - struct mem_map *mm; - vm_page_t m; - - pageoff = gpa & PAGE_MASK; - if (len > PAGE_SIZE - pageoff) - panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); - - count = 0; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) { - count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, - trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); - break; - } - } - - if (count == 1) { - *cookie = m; - return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); - } else { - *cookie = NULL; - return (NULL); - } -} - -void * -vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot, - void **cookie) -{ -#ifdef INVARIANTS - /* - * The current vcpu should be frozen to ensure 'vm_memmap[]' - * stability. - */ - int state = vcpu_get_state(vcpu, NULL); - KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", - __func__, state)); -#endif - return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie)); -} - -void * -vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, - void **cookie) -{ - sx_assert(&vm->mem_segs_lock, SX_LOCKED); - return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie)); -} - -void -vm_gpa_release(void *cookie) -{ - vm_page_t m = cookie; - - vm_page_unwire(m, PQ_ACTIVE); -} - int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval) { if (reg >= VM_REG_LAST) return (EINVAL); return (vmmops_getreg(vcpu->cookie, reg, retval)); } int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) { int error; if (reg >= VM_REG_LAST) return (EINVAL); error = vmmops_setreg(vcpu->cookie, reg, val); if (error || reg != VM_REG_GUEST_RIP) return (error); /* Set 'nextrip' to match the value of %rip */ VMM_CTR1(vcpu, "Setting nextrip to %#lx", val); vcpu->nextrip = val; return (0); } static bool is_descriptor_table(int reg) { switch (reg) { case VM_REG_GUEST_IDTR: case VM_REG_GUEST_GDTR: return (true); default: return (false); } } static bool is_segment_register(int reg) { switch (reg) { case VM_REG_GUEST_ES: case VM_REG_GUEST_CS: case VM_REG_GUEST_SS: case VM_REG_GUEST_DS: case VM_REG_GUEST_FS: case VM_REG_GUEST_GS: case VM_REG_GUEST_TR: case VM_REG_GUEST_LDTR: return (true); default: return (false); } } int vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc) { if (!is_segment_register(reg) && !is_descriptor_table(reg)) return (EINVAL); return (vmmops_getdesc(vcpu->cookie, reg, desc)); } int vm_set_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc) { if (!is_segment_register(reg) && !is_descriptor_table(reg)) return (EINVAL); return (vmmops_setdesc(vcpu->cookie, reg, desc)); } static void restore_guest_fpustate(struct vcpu *vcpu) { /* flush host state to the pcb */ fpuexit(curthread); /* restore guest FPU state */ fpu_enable(); fpurestore(vcpu->guestfpu); /* restore guest XCR0 if XSAVE is enabled in the host */ if (rcr4() & CR4_XSAVE) load_xcr(0, vcpu->guest_xcr0); /* * The FPU is now "dirty" with the guest's state so disable * the FPU to trap any access by the host. */ fpu_disable(); } static void save_guest_fpustate(struct vcpu *vcpu) { if ((rcr0() & CR0_TS) == 0) panic("fpu emulation not enabled in host!"); /* save guest XCR0 and restore host XCR0 */ if (rcr4() & CR4_XSAVE) { vcpu->guest_xcr0 = rxcr(0); load_xcr(0, vmm_get_host_xcr0()); } /* save guest FPU state */ fpu_enable(); fpusave(vcpu->guestfpu); fpu_disable(); } static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); static int vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) { int error; vcpu_assert_locked(vcpu); /* * State transitions from the vmmdev_ioctl() must always begin from * the VCPU_IDLE state. This guarantees that there is only a single * ioctl() operating on a vcpu at any point. */ if (from_idle) { while (vcpu->state != VCPU_IDLE) { vcpu->reqidle = 1; vcpu_notify_event_locked(vcpu, false); VMM_CTR1(vcpu, "vcpu state change from %s to " "idle requested", vcpu_state2str(vcpu->state)); msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); } } else { KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " "vcpu idle state")); } if (vcpu->state == VCPU_RUNNING) { KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " "mismatch for running vcpu", curcpu, vcpu->hostcpu)); } else { KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " "vcpu that is not running", vcpu->hostcpu)); } /* * The following state transitions are allowed: * IDLE -> FROZEN -> IDLE * FROZEN -> RUNNING -> FROZEN * FROZEN -> SLEEPING -> FROZEN */ switch (vcpu->state) { case VCPU_IDLE: case VCPU_RUNNING: case VCPU_SLEEPING: error = (newstate != VCPU_FROZEN); break; case VCPU_FROZEN: error = (newstate == VCPU_FROZEN); break; default: error = 1; break; } if (error) return (EBUSY); VMM_CTR2(vcpu, "vcpu state changed from %s to %s", vcpu_state2str(vcpu->state), vcpu_state2str(newstate)); vcpu->state = newstate; if (newstate == VCPU_RUNNING) vcpu->hostcpu = curcpu; else vcpu->hostcpu = NOCPU; if (newstate == VCPU_IDLE) wakeup(&vcpu->state); return (0); } static void vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate) { int error; if ((error = vcpu_set_state(vcpu, newstate, false)) != 0) panic("Error %d setting state to %d\n", error, newstate); } static void vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) { int error; if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) panic("Error %d setting state to %d", error, newstate); } static int vm_handle_rendezvous(struct vcpu *vcpu) { struct vm *vm = vcpu->vm; struct thread *td; int error, vcpuid; error = 0; vcpuid = vcpu->vcpuid; td = curthread; mtx_lock(&vm->rendezvous_mtx); while (vm->rendezvous_func != NULL) { /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus); if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { VMM_CTR0(vcpu, "Calling rendezvous func"); (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); CPU_SET(vcpuid, &vm->rendezvous_done_cpus); } if (CPU_CMP(&vm->rendezvous_req_cpus, &vm->rendezvous_done_cpus) == 0) { VMM_CTR0(vcpu, "Rendezvous completed"); CPU_ZERO(&vm->rendezvous_req_cpus); vm->rendezvous_func = NULL; wakeup(&vm->rendezvous_func); break; } VMM_CTR0(vcpu, "Wait for rendezvous completion"); mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, "vmrndv", hz); if (td_ast_pending(td, TDA_SUSPEND)) { mtx_unlock(&vm->rendezvous_mtx); error = thread_check_susp(td, true); if (error != 0) return (error); mtx_lock(&vm->rendezvous_mtx); } } mtx_unlock(&vm->rendezvous_mtx); return (0); } /* * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. */ static int vm_handle_hlt(struct vcpu *vcpu, bool intr_disabled, bool *retu) { struct vm *vm = vcpu->vm; const char *wmesg; struct thread *td; int error, t, vcpuid, vcpu_halted, vm_halted; vcpuid = vcpu->vcpuid; vcpu_halted = 0; vm_halted = 0; error = 0; td = curthread; KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); vcpu_lock(vcpu); while (1) { /* * Do a final check for pending NMI or interrupts before * really putting this thread to sleep. Also check for * software events that would cause this vcpu to wakeup. * * These interrupts/events could have happened after the * vcpu returned from vmmops_run() and before it acquired the * vcpu lock above. */ if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) break; if (vm_nmi_pending(vcpu)) break; if (!intr_disabled) { if (vm_extint_pending(vcpu) || vlapic_pending_intr(vcpu->vlapic, NULL)) { break; } } /* Don't go to sleep if the vcpu thread needs to yield */ if (vcpu_should_yield(vcpu)) break; if (vcpu_debugged(vcpu)) break; /* * Some Linux guests implement "halt" by having all vcpus * execute HLT with interrupts disabled. 'halted_cpus' keeps * track of the vcpus that have entered this state. When all * vcpus enter the halted state the virtual machine is halted. */ if (intr_disabled) { wmesg = "vmhalt"; VMM_CTR0(vcpu, "Halted"); if (!vcpu_halted && halt_detection_enabled) { vcpu_halted = 1; CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); } if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { vm_halted = 1; break; } } else { wmesg = "vmidle"; } t = ticks; vcpu_require_state_locked(vcpu, VCPU_SLEEPING); /* * XXX msleep_spin() cannot be interrupted by signals so * wake up periodically to check pending signals. */ msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); vcpu_require_state_locked(vcpu, VCPU_FROZEN); vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t); if (td_ast_pending(td, TDA_SUSPEND)) { vcpu_unlock(vcpu); error = thread_check_susp(td, false); if (error != 0) { if (vcpu_halted) { CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); } return (error); } vcpu_lock(vcpu); } } if (vcpu_halted) CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); vcpu_unlock(vcpu); if (vm_halted) vm_suspend(vm, VM_SUSPEND_HALT); return (0); } static int vm_handle_paging(struct vcpu *vcpu, bool *retu) { struct vm *vm = vcpu->vm; int rv, ftype; struct vm_map *map; struct vm_exit *vme; vme = &vcpu->exitinfo; KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", __func__, vme->inst_length)); ftype = vme->u.paging.fault_type; KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, ("vm_handle_paging: invalid fault_type %d", ftype)); if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), vme->u.paging.gpa, ftype); if (rv == 0) { VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx", ftype == VM_PROT_READ ? "accessed" : "dirty", vme->u.paging.gpa); goto done; } } map = &vm->vmspace->vm_map; rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL); VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, " "ftype = %d", rv, vme->u.paging.gpa, ftype); if (rv != KERN_SUCCESS) return (EFAULT); done: return (0); } static int vm_handle_inst_emul(struct vcpu *vcpu, bool *retu) { struct vie *vie; struct vm_exit *vme; uint64_t gla, gpa, cs_base; struct vm_guest_paging *paging; mem_region_read_t mread; mem_region_write_t mwrite; enum vm_cpu_mode cpu_mode; int cs_d, error, fault; vme = &vcpu->exitinfo; KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", __func__, vme->inst_length)); gla = vme->u.inst_emul.gla; gpa = vme->u.inst_emul.gpa; cs_base = vme->u.inst_emul.cs_base; cs_d = vme->u.inst_emul.cs_d; vie = &vme->u.inst_emul.vie; paging = &vme->u.inst_emul.paging; cpu_mode = paging->cpu_mode; VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa); /* Fetch, decode and emulate the faulting instruction */ if (vie->num_valid == 0) { error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base, VIE_INST_SIZE, vie, &fault); } else { /* * The instruction bytes have already been copied into 'vie' */ error = fault = 0; } if (error || fault) return (error); if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) { VMM_CTR1(vcpu, "Error decoding instruction at %#lx", vme->rip + cs_base); *retu = true; /* dump instruction bytes in userspace */ return (0); } /* * Update 'nextrip' based on the length of the emulated instruction. */ vme->inst_length = vie->num_processed; vcpu->nextrip += vie->num_processed; VMM_CTR1(vcpu, "nextrip updated to %#lx after instruction decoding", vcpu->nextrip); /* return to userland unless this is an in-kernel emulated device */ if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { mread = lapic_mmio_read; mwrite = lapic_mmio_write; } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { mread = vioapic_mmio_read; mwrite = vioapic_mmio_write; } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { mread = vhpet_mmio_read; mwrite = vhpet_mmio_write; } else { *retu = true; return (0); } error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite, retu); return (error); } static int vm_handle_suspend(struct vcpu *vcpu, bool *retu) { struct vm *vm = vcpu->vm; int error, i; struct thread *td; error = 0; td = curthread; CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); /* * Wait until all 'active_cpus' have suspended themselves. * * Since a VM may be suspended at any time including when one or * more vcpus are doing a rendezvous we need to call the rendezvous * handler while we are waiting to prevent a deadlock. */ vcpu_lock(vcpu); while (error == 0) { if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { VMM_CTR0(vcpu, "All vcpus suspended"); break; } if (vm->rendezvous_func == NULL) { VMM_CTR0(vcpu, "Sleeping during suspend"); vcpu_require_state_locked(vcpu, VCPU_SLEEPING); msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); vcpu_require_state_locked(vcpu, VCPU_FROZEN); if (td_ast_pending(td, TDA_SUSPEND)) { vcpu_unlock(vcpu); error = thread_check_susp(td, false); vcpu_lock(vcpu); } } else { VMM_CTR0(vcpu, "Rendezvous during suspend"); vcpu_unlock(vcpu); error = vm_handle_rendezvous(vcpu); vcpu_lock(vcpu); } } vcpu_unlock(vcpu); /* * Wakeup the other sleeping vcpus and return to userspace. */ for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->suspended_cpus)) { vcpu_notify_event(vm_vcpu(vm, i), false); } } *retu = true; return (error); } static int vm_handle_reqidle(struct vcpu *vcpu, bool *retu) { vcpu_lock(vcpu); KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle)); vcpu->reqidle = 0; vcpu_unlock(vcpu); *retu = true; return (0); } static int vm_handle_db(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) { int error, fault; uint64_t rsp; uint64_t rflags; struct vm_copyinfo copyinfo[2]; *retu = true; if (!vme->u.dbg.pushf_intercept || vme->u.dbg.tf_shadow_val != 0) { return (0); } vm_get_register(vcpu, VM_REG_GUEST_RSP, &rsp); error = vm_copy_setup(vcpu, &vme->u.dbg.paging, rsp, sizeof(uint64_t), VM_PROT_RW, copyinfo, nitems(copyinfo), &fault); if (error != 0 || fault != 0) { *retu = false; return (EINVAL); } /* Read pushed rflags value from top of stack. */ vm_copyin(copyinfo, &rflags, sizeof(uint64_t)); /* Clear TF bit. */ rflags &= ~(PSL_T); /* Write updated value back to memory. */ vm_copyout(&rflags, copyinfo, sizeof(uint64_t)); vm_copy_teardown(copyinfo, nitems(copyinfo)); return (0); } int vm_suspend(struct vm *vm, enum vm_suspend_how how) { int i; if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) return (EINVAL); if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { VM_CTR2(vm, "virtual machine already suspended %d/%d", vm->suspend, how); return (EALREADY); } VM_CTR1(vm, "virtual machine successfully suspended %d", how); /* * Notify all active vcpus that they are now suspended. */ for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) vcpu_notify_event(vm_vcpu(vm, i), false); } return (0); } void vm_exit_suspended(struct vcpu *vcpu, uint64_t rip) { struct vm *vm = vcpu->vm; struct vm_exit *vmexit; KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); vmexit = vm_exitinfo(vcpu); vmexit->rip = rip; vmexit->inst_length = 0; vmexit->exitcode = VM_EXITCODE_SUSPENDED; vmexit->u.suspended.how = vm->suspend; } void vm_exit_debug(struct vcpu *vcpu, uint64_t rip) { struct vm_exit *vmexit; vmexit = vm_exitinfo(vcpu); vmexit->rip = rip; vmexit->inst_length = 0; vmexit->exitcode = VM_EXITCODE_DEBUG; } void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip) { struct vm_exit *vmexit; vmexit = vm_exitinfo(vcpu); vmexit->rip = rip; vmexit->inst_length = 0; vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1); } void vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip) { struct vm_exit *vmexit; vmexit = vm_exitinfo(vcpu); vmexit->rip = rip; vmexit->inst_length = 0; vmexit->exitcode = VM_EXITCODE_REQIDLE; vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1); } void vm_exit_astpending(struct vcpu *vcpu, uint64_t rip) { struct vm_exit *vmexit; vmexit = vm_exitinfo(vcpu); vmexit->rip = rip; vmexit->inst_length = 0; vmexit->exitcode = VM_EXITCODE_BOGUS; vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1); } int vm_run(struct vcpu *vcpu) { struct vm *vm = vcpu->vm; struct vm_eventinfo evinfo; int error, vcpuid; struct pcb *pcb; uint64_t tscval; struct vm_exit *vme; bool retu, intr_disabled; pmap_t pmap; vcpuid = vcpu->vcpuid; if (!CPU_ISSET(vcpuid, &vm->active_cpus)) return (EINVAL); if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) return (EINVAL); pmap = vmspace_pmap(vm->vmspace); vme = &vcpu->exitinfo; evinfo.rptr = &vm->rendezvous_req_cpus; evinfo.sptr = &vm->suspend; evinfo.iptr = &vcpu->reqidle; restart: critical_enter(); KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), ("vm_run: absurd pm_active")); tscval = rdtsc(); pcb = PCPU_GET(curpcb); set_pcb_flags(pcb, PCB_FULL_IRET); restore_guest_fpustate(vcpu); vcpu_require_state(vcpu, VCPU_RUNNING); error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo); vcpu_require_state(vcpu, VCPU_FROZEN); save_guest_fpustate(vcpu); vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); critical_exit(); if (error == 0) { retu = false; vcpu->nextrip = vme->rip + vme->inst_length; switch (vme->exitcode) { case VM_EXITCODE_REQIDLE: error = vm_handle_reqidle(vcpu, &retu); break; case VM_EXITCODE_SUSPENDED: error = vm_handle_suspend(vcpu, &retu); break; case VM_EXITCODE_IOAPIC_EOI: vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector); break; case VM_EXITCODE_RENDEZVOUS: error = vm_handle_rendezvous(vcpu); break; case VM_EXITCODE_HLT: intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); error = vm_handle_hlt(vcpu, intr_disabled, &retu); break; case VM_EXITCODE_PAGING: error = vm_handle_paging(vcpu, &retu); break; case VM_EXITCODE_INST_EMUL: error = vm_handle_inst_emul(vcpu, &retu); break; case VM_EXITCODE_INOUT: case VM_EXITCODE_INOUT_STR: error = vm_handle_inout(vcpu, vme, &retu); break; case VM_EXITCODE_DB: error = vm_handle_db(vcpu, vme, &retu); break; case VM_EXITCODE_MONITOR: case VM_EXITCODE_MWAIT: case VM_EXITCODE_VMINSN: vm_inject_ud(vcpu); break; default: retu = true; /* handled in userland */ break; } } /* * VM_EXITCODE_INST_EMUL could access the apic which could transform the * exit code into VM_EXITCODE_IPI. */ if (error == 0 && vme->exitcode == VM_EXITCODE_IPI) error = vm_handle_ipi(vcpu, vme, &retu); if (error == 0 && retu == false) goto restart; vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1); VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode); return (error); } int vm_restart_instruction(struct vcpu *vcpu) { enum vcpu_state state; uint64_t rip; int error __diagused; state = vcpu_get_state(vcpu, NULL); if (state == VCPU_RUNNING) { /* * When a vcpu is "running" the next instruction is determined * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. * Thus setting 'inst_length' to zero will cause the current * instruction to be restarted. */ vcpu->exitinfo.inst_length = 0; VMM_CTR1(vcpu, "restarting instruction at %#lx by " "setting inst_length to zero", vcpu->exitinfo.rip); } else if (state == VCPU_FROZEN) { /* * When a vcpu is "frozen" it is outside the critical section * around vmmops_run() and 'nextrip' points to the next * instruction. Thus instruction restart is achieved by setting * 'nextrip' to the vcpu's %rip. */ error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip); KASSERT(!error, ("%s: error %d getting rip", __func__, error)); VMM_CTR2(vcpu, "restarting instruction by updating " "nextrip from %#lx to %#lx", vcpu->nextrip, rip); vcpu->nextrip = rip; } else { panic("%s: invalid state %d", __func__, state); } return (0); } int vm_exit_intinfo(struct vcpu *vcpu, uint64_t info) { int type, vector; if (info & VM_INTINFO_VALID) { type = info & VM_INTINFO_TYPE; vector = info & 0xff; if (type == VM_INTINFO_NMI && vector != IDT_NMI) return (EINVAL); if (type == VM_INTINFO_HWEXCEPTION && vector >= 32) return (EINVAL); if (info & VM_INTINFO_RSVD) return (EINVAL); } else { info = 0; } VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info); vcpu->exitintinfo = info; return (0); } enum exc_class { EXC_BENIGN, EXC_CONTRIBUTORY, EXC_PAGEFAULT }; #define IDT_VE 20 /* Virtualization Exception (Intel specific) */ static enum exc_class exception_class(uint64_t info) { int type, vector; KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info)); type = info & VM_INTINFO_TYPE; vector = info & 0xff; /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ switch (type) { case VM_INTINFO_HWINTR: case VM_INTINFO_SWINTR: case VM_INTINFO_NMI: return (EXC_BENIGN); default: /* * Hardware exception. * * SVM and VT-x use identical type values to represent NMI, * hardware interrupt and software interrupt. * * SVM uses type '3' for all exceptions. VT-x uses type '3' * for exceptions except #BP and #OF. #BP and #OF use a type * value of '5' or '6'. Therefore we don't check for explicit * values of 'type' to classify 'intinfo' into a hardware * exception. */ break; } switch (vector) { case IDT_PF: case IDT_VE: return (EXC_PAGEFAULT); case IDT_DE: case IDT_TS: case IDT_NP: case IDT_SS: case IDT_GP: return (EXC_CONTRIBUTORY); default: return (EXC_BENIGN); } } static int nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2, uint64_t *retinfo) { enum exc_class exc1, exc2; int type1, vector1; KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1)); KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2)); /* * If an exception occurs while attempting to call the double-fault * handler the processor enters shutdown mode (aka triple fault). */ type1 = info1 & VM_INTINFO_TYPE; vector1 = info1 & 0xff; if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)", info1, info2); vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT); *retinfo = 0; return (0); } /* * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 */ exc1 = exception_class(info1); exc2 = exception_class(info2); if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { /* Convert nested fault into a double fault. */ *retinfo = IDT_DF; *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; *retinfo |= VM_INTINFO_DEL_ERRCODE; } else { /* Handle exceptions serially */ *retinfo = info2; } return (1); } static uint64_t vcpu_exception_intinfo(struct vcpu *vcpu) { uint64_t info = 0; if (vcpu->exception_pending) { info = vcpu->exc_vector & 0xff; info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; if (vcpu->exc_errcode_valid) { info |= VM_INTINFO_DEL_ERRCODE; info |= (uint64_t)vcpu->exc_errcode << 32; } } return (info); } int vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo) { uint64_t info1, info2; int valid; info1 = vcpu->exitintinfo; vcpu->exitintinfo = 0; info2 = 0; if (vcpu->exception_pending) { info2 = vcpu_exception_intinfo(vcpu); vcpu->exception_pending = 0; VMM_CTR2(vcpu, "Exception %d delivered: %#lx", vcpu->exc_vector, info2); } if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { valid = nested_fault(vcpu, info1, info2, retinfo); } else if (info1 & VM_INTINFO_VALID) { *retinfo = info1; valid = 1; } else if (info2 & VM_INTINFO_VALID) { *retinfo = info2; valid = 1; } else { valid = 0; } if (valid) { VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), " "retinfo(%#lx)", __func__, info1, info2, *retinfo); } return (valid); } int vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2) { *info1 = vcpu->exitintinfo; *info2 = vcpu_exception_intinfo(vcpu); return (0); } int vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid, uint32_t errcode, int restart_instruction) { uint64_t regval; int error __diagused; if (vector < 0 || vector >= 32) return (EINVAL); /* * A double fault exception should never be injected directly into * the guest. It is a derived exception that results from specific * combinations of nested faults. */ if (vector == IDT_DF) return (EINVAL); if (vcpu->exception_pending) { VMM_CTR2(vcpu, "Unable to inject exception %d due to " "pending exception %d", vector, vcpu->exc_vector); return (EBUSY); } if (errcode_valid) { /* * Exceptions don't deliver an error code in real mode. */ error = vm_get_register(vcpu, VM_REG_GUEST_CR0, ®val); KASSERT(!error, ("%s: error %d getting CR0", __func__, error)); if (!(regval & CR0_PE)) errcode_valid = 0; } /* * From section 26.6.1 "Interruptibility State" in Intel SDM: * * Event blocking by "STI" or "MOV SS" is cleared after guest executes * one instruction or incurs an exception. */ error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0); KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", __func__, error)); if (restart_instruction) vm_restart_instruction(vcpu); vcpu->exception_pending = 1; vcpu->exc_vector = vector; vcpu->exc_errcode = errcode; vcpu->exc_errcode_valid = errcode_valid; VMM_CTR1(vcpu, "Exception %d pending", vector); return (0); } void vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode) { int error __diagused, restart_instruction; restart_instruction = 1; error = vm_inject_exception(vcpu, vector, errcode_valid, errcode, restart_instruction); KASSERT(error == 0, ("vm_inject_exception error %d", error)); } void vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2) { int error __diagused; VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx", error_code, cr2); error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2); KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); vm_inject_fault(vcpu, IDT_PF, 1, error_code); } static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); int vm_inject_nmi(struct vcpu *vcpu) { vcpu->nmi_pending = 1; vcpu_notify_event(vcpu, false); return (0); } int vm_nmi_pending(struct vcpu *vcpu) { return (vcpu->nmi_pending); } void vm_nmi_clear(struct vcpu *vcpu) { if (vcpu->nmi_pending == 0) panic("vm_nmi_clear: inconsistent nmi_pending state"); vcpu->nmi_pending = 0; vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1); } static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); int vm_inject_extint(struct vcpu *vcpu) { vcpu->extint_pending = 1; vcpu_notify_event(vcpu, false); return (0); } int vm_extint_pending(struct vcpu *vcpu) { return (vcpu->extint_pending); } void vm_extint_clear(struct vcpu *vcpu) { if (vcpu->extint_pending == 0) panic("vm_extint_clear: inconsistent extint_pending state"); vcpu->extint_pending = 0; vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1); } int vm_get_capability(struct vcpu *vcpu, int type, int *retval) { if (type < 0 || type >= VM_CAP_MAX) return (EINVAL); return (vmmops_getcap(vcpu->cookie, type, retval)); } int vm_set_capability(struct vcpu *vcpu, int type, int val) { if (type < 0 || type >= VM_CAP_MAX) return (EINVAL); return (vmmops_setcap(vcpu->cookie, type, val)); } struct vm * vcpu_vm(struct vcpu *vcpu) { return (vcpu->vm); } int vcpu_vcpuid(struct vcpu *vcpu) { return (vcpu->vcpuid); } struct vcpu * vm_vcpu(struct vm *vm, int vcpuid) { return (vm->vcpu[vcpuid]); } struct vlapic * vm_lapic(struct vcpu *vcpu) { return (vcpu->vlapic); } struct vioapic * vm_ioapic(struct vm *vm) { return (vm->vioapic); } struct vhpet * vm_hpet(struct vm *vm) { return (vm->vhpet); } bool vmm_is_pptdev(int bus, int slot, int func) { int b, f, i, n, s; char *val, *cp, *cp2; bool found; /* * XXX * The length of an environment variable is limited to 128 bytes which * puts an upper limit on the number of passthru devices that may be * specified using a single environment variable. * * Work around this by scanning multiple environment variable * names instead of a single one - yuck! */ const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ found = false; for (i = 0; names[i] != NULL && !found; i++) { cp = val = kern_getenv(names[i]); while (cp != NULL && *cp != '\0') { if ((cp2 = strchr(cp, ' ')) != NULL) *cp2 = '\0'; n = sscanf(cp, "%d/%d/%d", &b, &s, &f); if (n == 3 && bus == b && slot == s && func == f) { found = true; break; } if (cp2 != NULL) *cp2++ = ' '; cp = cp2; } freeenv(val); } return (found); } void * vm_iommu_domain(struct vm *vm) { return (vm->iommu); } int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) { int error; vcpu_lock(vcpu); error = vcpu_set_state_locked(vcpu, newstate, from_idle); vcpu_unlock(vcpu); return (error); } enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu) { enum vcpu_state state; vcpu_lock(vcpu); state = vcpu->state; if (hostcpu != NULL) *hostcpu = vcpu->hostcpu; vcpu_unlock(vcpu); return (state); } int vm_activate_cpu(struct vcpu *vcpu) { struct vm *vm = vcpu->vm; if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) return (EBUSY); VMM_CTR0(vcpu, "activated"); CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); return (0); } int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) { if (vcpu == NULL) { vm->debug_cpus = vm->active_cpus; for (int i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) vcpu_notify_event(vm_vcpu(vm, i), false); } } else { if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) return (EINVAL); CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); vcpu_notify_event(vcpu, false); } return (0); } int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) { if (vcpu == NULL) { CPU_ZERO(&vm->debug_cpus); } else { if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) return (EINVAL); CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); } return (0); } int vcpu_debugged(struct vcpu *vcpu) { return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); } cpuset_t vm_active_cpus(struct vm *vm) { return (vm->active_cpus); } cpuset_t vm_debug_cpus(struct vm *vm) { return (vm->debug_cpus); } cpuset_t vm_suspended_cpus(struct vm *vm) { return (vm->suspended_cpus); } /* * Returns the subset of vCPUs in tostart that are awaiting startup. * These vCPUs are also marked as no longer awaiting startup. */ cpuset_t vm_start_cpus(struct vm *vm, const cpuset_t *tostart) { cpuset_t set; mtx_lock(&vm->rendezvous_mtx); CPU_AND(&set, &vm->startup_cpus, tostart); CPU_ANDNOT(&vm->startup_cpus, &vm->startup_cpus, &set); mtx_unlock(&vm->rendezvous_mtx); return (set); } void vm_await_start(struct vm *vm, const cpuset_t *waiting) { mtx_lock(&vm->rendezvous_mtx); CPU_OR(&vm->startup_cpus, &vm->startup_cpus, waiting); mtx_unlock(&vm->rendezvous_mtx); } void * vcpu_stats(struct vcpu *vcpu) { return (vcpu->stats); } int vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state) { *state = vcpu->x2apic_state; return (0); } int vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state) { if (state >= X2APIC_STATE_LAST) return (EINVAL); vcpu->x2apic_state = state; vlapic_set_x2apic_state(vcpu, state); return (0); } /* * This function is called to ensure that a vcpu "sees" a pending event * as soon as possible: * - If the vcpu thread is sleeping then it is woken up. * - If the vcpu is running on a different host_cpu then an IPI will be directed * to the host_cpu to cause the vcpu to trap into the hypervisor. */ static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr) { int hostcpu; hostcpu = vcpu->hostcpu; if (vcpu->state == VCPU_RUNNING) { KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); if (hostcpu != curcpu) { if (lapic_intr) { vlapic_post_intr(vcpu->vlapic, hostcpu, vmm_ipinum); } else { ipi_cpu(hostcpu, vmm_ipinum); } } else { /* * If the 'vcpu' is running on 'curcpu' then it must * be sending a notification to itself (e.g. SELF_IPI). * The pending event will be picked up when the vcpu * transitions back to guest context. */ } } else { KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " "with hostcpu %d", vcpu->state, hostcpu)); if (vcpu->state == VCPU_SLEEPING) wakeup_one(vcpu); } } void vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr) { vcpu_lock(vcpu); vcpu_notify_event_locked(vcpu, lapic_intr); vcpu_unlock(vcpu); } struct vmspace * vm_vmspace(struct vm *vm) { return (vm->vmspace); } +struct vm_mem * +vm_mem(struct vm *vm) +{ + return (&vm->mem); +} + int vm_apicid2vcpuid(struct vm *vm, int apicid) { /* * XXX apic id is assumed to be numerically identical to vcpu id */ return (apicid); } int vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest, vm_rendezvous_func_t func, void *arg) { struct vm *vm = vcpu->vm; int error, i; /* * Enforce that this function is called without any locks */ WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); restart: mtx_lock(&vm->rendezvous_mtx); if (vm->rendezvous_func != NULL) { /* * If a rendezvous is already in progress then we need to * call the rendezvous handler in case this 'vcpu' is one * of the targets of the rendezvous. */ VMM_CTR0(vcpu, "Rendezvous already in progress"); mtx_unlock(&vm->rendezvous_mtx); error = vm_handle_rendezvous(vcpu); if (error != 0) return (error); goto restart; } KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " "rendezvous is still in progress")); VMM_CTR0(vcpu, "Initiating rendezvous"); vm->rendezvous_req_cpus = dest; CPU_ZERO(&vm->rendezvous_done_cpus); vm->rendezvous_arg = arg; vm->rendezvous_func = func; mtx_unlock(&vm->rendezvous_mtx); /* * Wake up any sleeping vcpus and trigger a VM-exit in any running * vcpus so they handle the rendezvous as soon as possible. */ for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &dest)) vcpu_notify_event(vm_vcpu(vm, i), false); } return (vm_handle_rendezvous(vcpu)); } struct vatpic * vm_atpic(struct vm *vm) { return (vm->vatpic); } struct vatpit * vm_atpit(struct vm *vm) { return (vm->vatpit); } struct vpmtmr * vm_pmtmr(struct vm *vm) { return (vm->vpmtmr); } struct vrtc * vm_rtc(struct vm *vm) { return (vm->vrtc); } enum vm_reg_name vm_segment_name(int seg) { static enum vm_reg_name seg_names[] = { VM_REG_GUEST_ES, VM_REG_GUEST_CS, VM_REG_GUEST_SS, VM_REG_GUEST_DS, VM_REG_GUEST_FS, VM_REG_GUEST_GS }; KASSERT(seg >= 0 && seg < nitems(seg_names), ("%s: invalid segment encoding %d", __func__, seg)); return (seg_names[seg]); } void vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo) { int idx; for (idx = 0; idx < num_copyinfo; idx++) { if (copyinfo[idx].cookie != NULL) vm_gpa_release(copyinfo[idx].cookie); } bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo)); } int vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging, uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, int num_copyinfo, int *fault) { int error, idx, nused; size_t n, off, remaining; void *hva, *cookie; uint64_t gpa; bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo); nused = 0; remaining = len; while (remaining > 0) { if (nused >= num_copyinfo) return (EFAULT); error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault); if (error || *fault) return (error); off = gpa & PAGE_MASK; n = min(remaining, PAGE_SIZE - off); copyinfo[nused].gpa = gpa; copyinfo[nused].len = n; remaining -= n; gla += n; nused++; } for (idx = 0; idx < nused; idx++) { hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa, copyinfo[idx].len, prot, &cookie); if (hva == NULL) break; copyinfo[idx].hva = hva; copyinfo[idx].cookie = cookie; } if (idx != nused) { vm_copy_teardown(copyinfo, num_copyinfo); return (EFAULT); } else { *fault = 0; return (0); } } void vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len) { char *dst; int idx; dst = kaddr; idx = 0; while (len > 0) { bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); len -= copyinfo[idx].len; dst += copyinfo[idx].len; idx++; } } void vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len) { const char *src; int idx; src = kaddr; idx = 0; while (len > 0) { bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); len -= copyinfo[idx].len; src += copyinfo[idx].len; idx++; } } /* * Return the amount of in-use and wired memory for the VM. Since * these are global stats, only return the values with for vCPU 0 */ VMM_STAT_DECLARE(VMM_MEM_RESIDENT); VMM_STAT_DECLARE(VMM_MEM_WIRED); static void vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat) { if (vcpu->vcpuid == 0) { vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE * vmspace_resident_count(vcpu->vm->vmspace)); } } static void vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat) { if (vcpu->vcpuid == 0) { vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE * pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace))); } } VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); #ifdef BHYVE_SNAPSHOT static int vm_snapshot_vcpus(struct vm *vm, struct vm_snapshot_meta *meta) { uint64_t tsc, now; int ret; struct vcpu *vcpu; uint16_t i, maxcpus; now = rdtsc(); maxcpus = vm_get_maxcpus(vm); for (i = 0; i < maxcpus; i++) { vcpu = vm->vcpu[i]; if (vcpu == NULL) continue; SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done); SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done); /* * Save the absolute TSC value by adding now to tsc_offset. * * It will be turned turned back into an actual offset when the * TSC restore function is called */ tsc = now + vcpu->tsc_offset; SNAPSHOT_VAR_OR_LEAVE(tsc, meta, ret, done); if (meta->op == VM_SNAPSHOT_RESTORE) vcpu->tsc_offset = tsc; } done: return (ret); } static int vm_snapshot_vm(struct vm *vm, struct vm_snapshot_meta *meta) { int ret; ret = vm_snapshot_vcpus(vm, meta); if (ret != 0) goto done; SNAPSHOT_VAR_OR_LEAVE(vm->startup_cpus, meta, ret, done); done: return (ret); } static int vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta) { int error; struct vcpu *vcpu; uint16_t i, maxcpus; error = 0; maxcpus = vm_get_maxcpus(vm); for (i = 0; i < maxcpus; i++) { vcpu = vm->vcpu[i]; if (vcpu == NULL) continue; error = vmmops_vcpu_snapshot(vcpu->cookie, meta); if (error != 0) { printf("%s: failed to snapshot vmcs/vmcb data for " "vCPU: %d; error: %d\n", __func__, i, error); goto done; } } done: return (error); } /* * Save kernel-side structures to user-space for snapshotting. */ int vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta) { int ret = 0; switch (meta->dev_req) { case STRUCT_VMCX: ret = vm_snapshot_vcpu(vm, meta); break; case STRUCT_VM: ret = vm_snapshot_vm(vm, meta); break; case STRUCT_VIOAPIC: ret = vioapic_snapshot(vm_ioapic(vm), meta); break; case STRUCT_VLAPIC: ret = vlapic_snapshot(vm, meta); break; case STRUCT_VHPET: ret = vhpet_snapshot(vm_hpet(vm), meta); break; case STRUCT_VATPIC: ret = vatpic_snapshot(vm_atpic(vm), meta); break; case STRUCT_VATPIT: ret = vatpit_snapshot(vm_atpit(vm), meta); break; case STRUCT_VPMTMR: ret = vpmtmr_snapshot(vm_pmtmr(vm), meta); break; case STRUCT_VRTC: ret = vrtc_snapshot(vm_rtc(vm), meta); break; default: printf("%s: failed to find the requested type %#x\n", __func__, meta->dev_req); ret = (EINVAL); } return (ret); } void vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset) { vcpu->tsc_offset = offset; } int vm_restore_time(struct vm *vm) { int error; uint64_t now; struct vcpu *vcpu; uint16_t i, maxcpus; now = rdtsc(); error = vhpet_restore_time(vm_hpet(vm)); if (error) return (error); maxcpus = vm_get_maxcpus(vm); for (i = 0; i < maxcpus; i++) { vcpu = vm->vcpu[i]; if (vcpu == NULL) continue; error = vmmops_restore_tsc(vcpu->cookie, vcpu->tsc_offset - now); if (error) return (error); } return (0); } #endif diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c index 6e1501493082..51769faf5893 100644 --- a/sys/amd64/vmm/vmm_instruction_emul.c +++ b/sys/amd64/vmm/vmm_instruction_emul.c @@ -1,2940 +1,2942 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 Sandvine, Inc. * Copyright (c) 2012 NetApp, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #ifdef _KERNEL #include #include #include #include #include #include #include #include + +#include #else /* !_KERNEL */ #include #include #include #include #include #include #include #include #include #include #include #include #define __diagused #define KASSERT(exp,msg) assert((exp)) #define panic(...) errx(4, __VA_ARGS__) #endif /* _KERNEL */ #include #include #include /* struct vie_op.op_type */ enum { VIE_OP_TYPE_NONE = 0, VIE_OP_TYPE_MOV, VIE_OP_TYPE_MOVSX, VIE_OP_TYPE_MOVZX, VIE_OP_TYPE_AND, VIE_OP_TYPE_OR, VIE_OP_TYPE_SUB, VIE_OP_TYPE_TWO_BYTE, VIE_OP_TYPE_PUSH, VIE_OP_TYPE_CMP, VIE_OP_TYPE_POP, VIE_OP_TYPE_MOVS, VIE_OP_TYPE_GROUP1, VIE_OP_TYPE_STOS, VIE_OP_TYPE_BITTEST, VIE_OP_TYPE_TWOB_GRP15, VIE_OP_TYPE_ADD, VIE_OP_TYPE_TEST, VIE_OP_TYPE_BEXTR, VIE_OP_TYPE_LAST }; /* struct vie_op.op_flags */ #define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */ #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */ #define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */ #define VIE_OP_F_NO_MODRM (1 << 3) #define VIE_OP_F_NO_GLA_VERIFICATION (1 << 4) static const struct vie_op three_byte_opcodes_0f38[256] = { [0xF7] = { .op_byte = 0xF7, .op_type = VIE_OP_TYPE_BEXTR, }, }; static const struct vie_op two_byte_opcodes[256] = { [0xAE] = { .op_byte = 0xAE, .op_type = VIE_OP_TYPE_TWOB_GRP15, }, [0xB6] = { .op_byte = 0xB6, .op_type = VIE_OP_TYPE_MOVZX, }, [0xB7] = { .op_byte = 0xB7, .op_type = VIE_OP_TYPE_MOVZX, }, [0xBA] = { .op_byte = 0xBA, .op_type = VIE_OP_TYPE_BITTEST, .op_flags = VIE_OP_F_IMM8, }, [0xBE] = { .op_byte = 0xBE, .op_type = VIE_OP_TYPE_MOVSX, }, }; static const struct vie_op one_byte_opcodes[256] = { [0x03] = { .op_byte = 0x03, .op_type = VIE_OP_TYPE_ADD, }, [0x0F] = { .op_byte = 0x0F, .op_type = VIE_OP_TYPE_TWO_BYTE }, [0x0B] = { .op_byte = 0x0B, .op_type = VIE_OP_TYPE_OR, }, [0x2B] = { .op_byte = 0x2B, .op_type = VIE_OP_TYPE_SUB, }, [0x39] = { .op_byte = 0x39, .op_type = VIE_OP_TYPE_CMP, }, [0x3B] = { .op_byte = 0x3B, .op_type = VIE_OP_TYPE_CMP, }, [0x88] = { .op_byte = 0x88, .op_type = VIE_OP_TYPE_MOV, }, [0x89] = { .op_byte = 0x89, .op_type = VIE_OP_TYPE_MOV, }, [0x8A] = { .op_byte = 0x8A, .op_type = VIE_OP_TYPE_MOV, }, [0x8B] = { .op_byte = 0x8B, .op_type = VIE_OP_TYPE_MOV, }, [0xA1] = { .op_byte = 0xA1, .op_type = VIE_OP_TYPE_MOV, .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM, }, [0xA3] = { .op_byte = 0xA3, .op_type = VIE_OP_TYPE_MOV, .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM, }, [0xA4] = { .op_byte = 0xA4, .op_type = VIE_OP_TYPE_MOVS, .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION }, [0xA5] = { .op_byte = 0xA5, .op_type = VIE_OP_TYPE_MOVS, .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION }, [0xAA] = { .op_byte = 0xAA, .op_type = VIE_OP_TYPE_STOS, .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION }, [0xAB] = { .op_byte = 0xAB, .op_type = VIE_OP_TYPE_STOS, .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION }, [0xC6] = { /* XXX Group 11 extended opcode - not just MOV */ .op_byte = 0xC6, .op_type = VIE_OP_TYPE_MOV, .op_flags = VIE_OP_F_IMM8, }, [0xC7] = { .op_byte = 0xC7, .op_type = VIE_OP_TYPE_MOV, .op_flags = VIE_OP_F_IMM, }, [0x23] = { .op_byte = 0x23, .op_type = VIE_OP_TYPE_AND, }, [0x80] = { /* Group 1 extended opcode */ .op_byte = 0x80, .op_type = VIE_OP_TYPE_GROUP1, .op_flags = VIE_OP_F_IMM8, }, [0x81] = { /* Group 1 extended opcode */ .op_byte = 0x81, .op_type = VIE_OP_TYPE_GROUP1, .op_flags = VIE_OP_F_IMM, }, [0x83] = { /* Group 1 extended opcode */ .op_byte = 0x83, .op_type = VIE_OP_TYPE_GROUP1, .op_flags = VIE_OP_F_IMM8, }, [0x8F] = { /* XXX Group 1A extended opcode - not just POP */ .op_byte = 0x8F, .op_type = VIE_OP_TYPE_POP, }, [0xF7] = { /* XXX Group 3 extended opcode - not just TEST */ .op_byte = 0xF7, .op_type = VIE_OP_TYPE_TEST, .op_flags = VIE_OP_F_IMM, }, [0xFF] = { /* XXX Group 5 extended opcode - not just PUSH */ .op_byte = 0xFF, .op_type = VIE_OP_TYPE_PUSH, } }; /* struct vie.mod */ #define VIE_MOD_INDIRECT 0 #define VIE_MOD_INDIRECT_DISP8 1 #define VIE_MOD_INDIRECT_DISP32 2 #define VIE_MOD_DIRECT 3 /* struct vie.rm */ #define VIE_RM_SIB 4 #define VIE_RM_DISP32 5 #define GB (1024 * 1024 * 1024) static enum vm_reg_name gpr_map[16] = { VM_REG_GUEST_RAX, VM_REG_GUEST_RCX, VM_REG_GUEST_RDX, VM_REG_GUEST_RBX, VM_REG_GUEST_RSP, VM_REG_GUEST_RBP, VM_REG_GUEST_RSI, VM_REG_GUEST_RDI, VM_REG_GUEST_R8, VM_REG_GUEST_R9, VM_REG_GUEST_R10, VM_REG_GUEST_R11, VM_REG_GUEST_R12, VM_REG_GUEST_R13, VM_REG_GUEST_R14, VM_REG_GUEST_R15 }; static uint64_t size2mask[] = { [1] = 0xff, [2] = 0xffff, [4] = 0xffffffff, [8] = 0xffffffffffffffff, }; static int vie_read_register(struct vcpu *vcpu, enum vm_reg_name reg, uint64_t *rval) { int error; error = vm_get_register(vcpu, reg, rval); return (error); } static void vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr) { *lhbr = 0; *reg = gpr_map[vie->reg]; /* * 64-bit mode imposes limitations on accessing legacy high byte * registers (lhbr). * * The legacy high-byte registers cannot be addressed if the REX * prefix is present. In this case the values 4, 5, 6 and 7 of the * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively. * * If the REX prefix is not present then the values 4, 5, 6 and 7 * of the 'ModRM:reg' field address the legacy high-byte registers, * %ah, %ch, %dh and %bh respectively. */ if (!vie->rex_present) { if (vie->reg & 0x4) { *lhbr = 1; *reg = gpr_map[vie->reg & 0x3]; } } } static int vie_read_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t *rval) { uint64_t val; int error, lhbr; enum vm_reg_name reg; vie_calc_bytereg(vie, ®, &lhbr); error = vm_get_register(vcpu, reg, &val); /* * To obtain the value of a legacy high byte register shift the * base register right by 8 bits (%ah = %rax >> 8). */ if (lhbr) *rval = val >> 8; else *rval = val; return (error); } static int vie_write_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t byte) { uint64_t origval, val, mask; int error, lhbr; enum vm_reg_name reg; vie_calc_bytereg(vie, ®, &lhbr); error = vm_get_register(vcpu, reg, &origval); if (error == 0) { val = byte; mask = 0xff; if (lhbr) { /* * Shift left by 8 to store 'byte' in a legacy high * byte register. */ val <<= 8; mask <<= 8; } val |= origval & ~mask; error = vm_set_register(vcpu, reg, val); } return (error); } int vie_update_register(struct vcpu *vcpu, enum vm_reg_name reg, uint64_t val, int size) { int error; uint64_t origval; switch (size) { case 1: case 2: error = vie_read_register(vcpu, reg, &origval); if (error) return (error); val &= size2mask[size]; val |= origval & ~size2mask[size]; break; case 4: val &= 0xffffffffUL; break; case 8: break; default: return (EINVAL); } error = vm_set_register(vcpu, reg, val); return (error); } #define RFLAGS_STATUS_BITS (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V) /* * Return the status flags that would result from doing (x - y). */ #define GETCC(sz) \ static u_long \ getcc##sz(uint##sz##_t x, uint##sz##_t y) \ { \ u_long rflags; \ \ __asm __volatile("sub %2,%1; pushfq; popq %0" : \ "=r" (rflags), "+r" (x) : "m" (y)); \ return (rflags); \ } struct __hack GETCC(8); GETCC(16); GETCC(32); GETCC(64); static u_long getcc(int opsize, uint64_t x, uint64_t y) { KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8, ("getcc: invalid operand size %d", opsize)); if (opsize == 1) return (getcc8(x, y)); else if (opsize == 2) return (getcc16(x, y)); else if (opsize == 4) return (getcc32(x, y)); else return (getcc64(x, y)); } /* * Macro creation of functions getaddflags{8,16,32,64} */ #define GETADDFLAGS(sz) \ static u_long \ getaddflags##sz(uint##sz##_t x, uint##sz##_t y) \ { \ u_long rflags; \ \ __asm __volatile("add %2,%1; pushfq; popq %0" : \ "=r" (rflags), "+r" (x) : "m" (y)); \ return (rflags); \ } struct __hack GETADDFLAGS(8); GETADDFLAGS(16); GETADDFLAGS(32); GETADDFLAGS(64); static u_long getaddflags(int opsize, uint64_t x, uint64_t y) { KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8, ("getaddflags: invalid operand size %d", opsize)); if (opsize == 1) return (getaddflags8(x, y)); else if (opsize == 2) return (getaddflags16(x, y)); else if (opsize == 4) return (getaddflags32(x, y)); else return (getaddflags64(x, y)); } /* * Return the status flags that would result from doing (x & y). */ #define GETANDFLAGS(sz) \ static u_long \ getandflags##sz(uint##sz##_t x, uint##sz##_t y) \ { \ u_long rflags; \ \ __asm __volatile("and %2,%1; pushfq; popq %0" : \ "=r" (rflags), "+r" (x) : "m" (y)); \ return (rflags); \ } struct __hack GETANDFLAGS(8); GETANDFLAGS(16); GETANDFLAGS(32); GETANDFLAGS(64); static u_long getandflags(int opsize, uint64_t x, uint64_t y) { KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8, ("getandflags: invalid operand size %d", opsize)); if (opsize == 1) return (getandflags8(x, y)); else if (opsize == 2) return (getandflags16(x, y)); else if (opsize == 4) return (getandflags32(x, y)); else return (getandflags64(x, y)); } static int emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, mem_region_read_t memread, mem_region_write_t memwrite, void *arg) { int error, size; enum vm_reg_name reg; uint8_t byte; uint64_t val; size = vie->opsize; error = EINVAL; switch (vie->op.op_byte) { case 0x88: /* * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m) * 88/r: mov r/m8, r8 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available) */ size = 1; /* override for byte operation */ error = vie_read_bytereg(vcpu, vie, &byte); if (error == 0) error = memwrite(vcpu, gpa, byte, size, arg); break; case 0x89: /* * MOV from reg (ModRM:reg) to mem (ModRM:r/m) * 89/r: mov r/m16, r16 * 89/r: mov r/m32, r32 * REX.W + 89/r mov r/m64, r64 */ reg = gpr_map[vie->reg]; error = vie_read_register(vcpu, reg, &val); if (error == 0) { val &= size2mask[size]; error = memwrite(vcpu, gpa, val, size, arg); } break; case 0x8A: /* * MOV byte from mem (ModRM:r/m) to reg (ModRM:reg) * 8A/r: mov r8, r/m8 * REX + 8A/r: mov r8, r/m8 */ size = 1; /* override for byte operation */ error = memread(vcpu, gpa, &val, size, arg); if (error == 0) error = vie_write_bytereg(vcpu, vie, val); break; case 0x8B: /* * MOV from mem (ModRM:r/m) to reg (ModRM:reg) * 8B/r: mov r16, r/m16 * 8B/r: mov r32, r/m32 * REX.W 8B/r: mov r64, r/m64 */ error = memread(vcpu, gpa, &val, size, arg); if (error == 0) { reg = gpr_map[vie->reg]; error = vie_update_register(vcpu, reg, val, size); } break; case 0xA1: /* * MOV from seg:moffset to AX/EAX/RAX * A1: mov AX, moffs16 * A1: mov EAX, moffs32 * REX.W + A1: mov RAX, moffs64 */ error = memread(vcpu, gpa, &val, size, arg); if (error == 0) { reg = VM_REG_GUEST_RAX; error = vie_update_register(vcpu, reg, val, size); } break; case 0xA3: /* * MOV from AX/EAX/RAX to seg:moffset * A3: mov moffs16, AX * A3: mov moffs32, EAX * REX.W + A3: mov moffs64, RAX */ error = vie_read_register(vcpu, VM_REG_GUEST_RAX, &val); if (error == 0) { val &= size2mask[size]; error = memwrite(vcpu, gpa, val, size, arg); } break; case 0xC6: /* * MOV from imm8 to mem (ModRM:r/m) * C6/0 mov r/m8, imm8 * REX + C6/0 mov r/m8, imm8 */ size = 1; /* override for byte operation */ error = memwrite(vcpu, gpa, vie->immediate, size, arg); break; case 0xC7: /* * MOV from imm16/imm32 to mem (ModRM:r/m) * C7/0 mov r/m16, imm16 * C7/0 mov r/m32, imm32 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits) */ val = vie->immediate & size2mask[size]; error = memwrite(vcpu, gpa, val, size, arg); break; default: break; } return (error); } static int emulate_movx(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg) { int error, size; enum vm_reg_name reg; uint64_t val; size = vie->opsize; error = EINVAL; switch (vie->op.op_byte) { case 0xB6: /* * MOV and zero extend byte from mem (ModRM:r/m) to * reg (ModRM:reg). * * 0F B6/r movzx r16, r/m8 * 0F B6/r movzx r32, r/m8 * REX.W + 0F B6/r movzx r64, r/m8 */ /* get the first operand */ error = memread(vcpu, gpa, &val, 1, arg); if (error) break; /* get the second operand */ reg = gpr_map[vie->reg]; /* zero-extend byte */ val = (uint8_t)val; /* write the result */ error = vie_update_register(vcpu, reg, val, size); break; case 0xB7: /* * MOV and zero extend word from mem (ModRM:r/m) to * reg (ModRM:reg). * * 0F B7/r movzx r32, r/m16 * REX.W + 0F B7/r movzx r64, r/m16 */ error = memread(vcpu, gpa, &val, 2, arg); if (error) return (error); reg = gpr_map[vie->reg]; /* zero-extend word */ val = (uint16_t)val; error = vie_update_register(vcpu, reg, val, size); break; case 0xBE: /* * MOV and sign extend byte from mem (ModRM:r/m) to * reg (ModRM:reg). * * 0F BE/r movsx r16, r/m8 * 0F BE/r movsx r32, r/m8 * REX.W + 0F BE/r movsx r64, r/m8 */ /* get the first operand */ error = memread(vcpu, gpa, &val, 1, arg); if (error) break; /* get the second operand */ reg = gpr_map[vie->reg]; /* sign extend byte */ val = (int8_t)val; /* write the result */ error = vie_update_register(vcpu, reg, val, size); break; default: break; } return (error); } /* * Helper function to calculate and validate a linear address. */ static int get_gla(struct vcpu *vcpu, struct vie *vie __unused, struct vm_guest_paging *paging, int opsize, int addrsize, int prot, enum vm_reg_name seg, enum vm_reg_name gpr, uint64_t *gla, int *fault) { struct seg_desc desc; uint64_t cr0, val, rflags; int error __diagused; error = vie_read_register(vcpu, VM_REG_GUEST_CR0, &cr0); KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error)); error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error)); error = vm_get_seg_desc(vcpu, seg, &desc); KASSERT(error == 0, ("%s: error %d getting segment descriptor %d", __func__, error, seg)); error = vie_read_register(vcpu, gpr, &val); KASSERT(error == 0, ("%s: error %d getting register %d", __func__, error, gpr)); if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize, addrsize, prot, gla)) { if (seg == VM_REG_GUEST_SS) vm_inject_ss(vcpu, 0); else vm_inject_gp(vcpu); goto guest_fault; } if (vie_canonical_check(paging->cpu_mode, *gla)) { if (seg == VM_REG_GUEST_SS) vm_inject_ss(vcpu, 0); else vm_inject_gp(vcpu); goto guest_fault; } if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) { vm_inject_ac(vcpu, 0); goto guest_fault; } *fault = 0; return (0); guest_fault: *fault = 1; return (0); } static int emulate_movs(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, struct vm_guest_paging *paging, mem_region_read_t memread, mem_region_write_t memwrite, void *arg) { #ifdef _KERNEL struct vm_copyinfo copyinfo[2]; #else struct iovec copyinfo[2]; #endif uint64_t dstaddr, srcaddr, dstgpa, srcgpa, val; uint64_t rcx, rdi, rsi, rflags; int error, fault, opsize, seg, repeat; opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize; val = 0; error = 0; /* * XXX although the MOVS instruction is only supposed to be used with * the "rep" prefix some guests like FreeBSD will use "repnz" instead. * * Empirically the "repnz" prefix has identical behavior to "rep" * and the zero flag does not make a difference. */ repeat = vie->repz_present | vie->repnz_present; if (repeat) { error = vie_read_register(vcpu, VM_REG_GUEST_RCX, &rcx); KASSERT(!error, ("%s: error %d getting rcx", __func__, error)); /* * The count register is %rcx, %ecx or %cx depending on the * address size of the instruction. */ if ((rcx & vie_size2mask(vie->addrsize)) == 0) { error = 0; goto done; } } /* * Source Destination Comments * -------------------------------------------- * (1) memory memory n/a * (2) memory mmio emulated * (3) mmio memory emulated * (4) mmio mmio emulated * * At this point we don't have sufficient information to distinguish * between (2), (3) and (4). We use 'vm_copy_setup()' to tease this * out because it will succeed only when operating on regular memory. * * XXX the emulation doesn't properly handle the case where 'gpa' * is straddling the boundary between the normal memory and MMIO. */ seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS; error = get_gla(vcpu, vie, paging, opsize, vie->addrsize, PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr, &fault); if (error || fault) goto done; error = vm_copy_setup(vcpu, paging, srcaddr, opsize, PROT_READ, copyinfo, nitems(copyinfo), &fault); if (error == 0) { if (fault) goto done; /* Resume guest to handle fault */ /* * case (2): read from system memory and write to mmio. */ vm_copyin(copyinfo, &val, opsize); vm_copy_teardown(copyinfo, nitems(copyinfo)); error = memwrite(vcpu, gpa, val, opsize, arg); if (error) goto done; } else { /* * 'vm_copy_setup()' is expected to fail for cases (3) and (4) * if 'srcaddr' is in the mmio space. */ error = get_gla(vcpu, vie, paging, opsize, vie->addrsize, PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr, &fault); if (error || fault) goto done; error = vm_copy_setup(vcpu, paging, dstaddr, opsize, PROT_WRITE, copyinfo, nitems(copyinfo), &fault); if (error == 0) { if (fault) goto done; /* Resume guest to handle fault */ /* * case (3): read from MMIO and write to system memory. * * A MMIO read can have side-effects so we * commit to it only after vm_copy_setup() is * successful. If a page-fault needs to be * injected into the guest then it will happen * before the MMIO read is attempted. */ error = memread(vcpu, gpa, &val, opsize, arg); if (error) goto done; vm_copyout(&val, copyinfo, opsize); vm_copy_teardown(copyinfo, nitems(copyinfo)); } else { /* * Case (4): read from and write to mmio. * * Commit to the MMIO read/write (with potential * side-effects) only after we are sure that the * instruction is not going to be restarted due * to address translation faults. */ error = vm_gla2gpa(vcpu, paging, srcaddr, PROT_READ, &srcgpa, &fault); if (error || fault) goto done; error = vm_gla2gpa(vcpu, paging, dstaddr, PROT_WRITE, &dstgpa, &fault); if (error || fault) goto done; error = memread(vcpu, srcgpa, &val, opsize, arg); if (error) goto done; error = memwrite(vcpu, dstgpa, val, opsize, arg); if (error) goto done; } } error = vie_read_register(vcpu, VM_REG_GUEST_RSI, &rsi); KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error)); error = vie_read_register(vcpu, VM_REG_GUEST_RDI, &rdi); KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error)); error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error)); if (rflags & PSL_D) { rsi -= opsize; rdi -= opsize; } else { rsi += opsize; rdi += opsize; } error = vie_update_register(vcpu, VM_REG_GUEST_RSI, rsi, vie->addrsize); KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error)); error = vie_update_register(vcpu, VM_REG_GUEST_RDI, rdi, vie->addrsize); KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error)); if (repeat) { rcx = rcx - 1; error = vie_update_register(vcpu, VM_REG_GUEST_RCX, rcx, vie->addrsize); KASSERT(!error, ("%s: error %d updating rcx", __func__, error)); /* * Repeat the instruction if the count register is not zero. */ if ((rcx & vie_size2mask(vie->addrsize)) != 0) vm_restart_instruction(vcpu); } done: KASSERT(error == 0 || error == EFAULT, ("%s: unexpected error %d", __func__, error)); return (error); } static int emulate_stos(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, struct vm_guest_paging *paging __unused, mem_region_read_t memread __unused, mem_region_write_t memwrite, void *arg) { int error, opsize, repeat; uint64_t val; uint64_t rcx, rdi, rflags; opsize = (vie->op.op_byte == 0xAA) ? 1 : vie->opsize; repeat = vie->repz_present | vie->repnz_present; if (repeat) { error = vie_read_register(vcpu, VM_REG_GUEST_RCX, &rcx); KASSERT(!error, ("%s: error %d getting rcx", __func__, error)); /* * The count register is %rcx, %ecx or %cx depending on the * address size of the instruction. */ if ((rcx & vie_size2mask(vie->addrsize)) == 0) return (0); } error = vie_read_register(vcpu, VM_REG_GUEST_RAX, &val); KASSERT(!error, ("%s: error %d getting rax", __func__, error)); error = memwrite(vcpu, gpa, val, opsize, arg); if (error) return (error); error = vie_read_register(vcpu, VM_REG_GUEST_RDI, &rdi); KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error)); error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error)); if (rflags & PSL_D) rdi -= opsize; else rdi += opsize; error = vie_update_register(vcpu, VM_REG_GUEST_RDI, rdi, vie->addrsize); KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error)); if (repeat) { rcx = rcx - 1; error = vie_update_register(vcpu, VM_REG_GUEST_RCX, rcx, vie->addrsize); KASSERT(!error, ("%s: error %d updating rcx", __func__, error)); /* * Repeat the instruction if the count register is not zero. */ if ((rcx & vie_size2mask(vie->addrsize)) != 0) vm_restart_instruction(vcpu); } return (0); } static int emulate_and(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, mem_region_read_t memread, mem_region_write_t memwrite, void *arg) { int error, size; enum vm_reg_name reg; uint64_t result, rflags, rflags2, val1, val2; size = vie->opsize; error = EINVAL; switch (vie->op.op_byte) { case 0x23: /* * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the * result in reg. * * 23/r and r16, r/m16 * 23/r and r32, r/m32 * REX.W + 23/r and r64, r/m64 */ /* get the first operand */ reg = gpr_map[vie->reg]; error = vie_read_register(vcpu, reg, &val1); if (error) break; /* get the second operand */ error = memread(vcpu, gpa, &val2, size, arg); if (error) break; /* perform the operation and write the result */ result = val1 & val2; error = vie_update_register(vcpu, reg, result, size); break; case 0x81: case 0x83: /* * AND mem (ModRM:r/m) with immediate and store the * result in mem. * * 81 /4 and r/m16, imm16 * 81 /4 and r/m32, imm32 * REX.W + 81 /4 and r/m64, imm32 sign-extended to 64 * * 83 /4 and r/m16, imm8 sign-extended to 16 * 83 /4 and r/m32, imm8 sign-extended to 32 * REX.W + 83/4 and r/m64, imm8 sign-extended to 64 */ /* get the first operand */ error = memread(vcpu, gpa, &val1, size, arg); if (error) break; /* * perform the operation with the pre-fetched immediate * operand and write the result */ result = val1 & vie->immediate; error = memwrite(vcpu, gpa, result, size, arg); break; default: break; } if (error) return (error); error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); if (error) return (error); /* * OF and CF are cleared; the SF, ZF and PF flags are set according * to the result; AF is undefined. * * The updated status flags are obtained by subtracting 0 from 'result'. */ rflags2 = getcc(size, result, 0); rflags &= ~RFLAGS_STATUS_BITS; rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N); error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8); return (error); } static int emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, mem_region_read_t memread, mem_region_write_t memwrite, void *arg) { int error, size; enum vm_reg_name reg; uint64_t result, rflags, rflags2, val1, val2; size = vie->opsize; error = EINVAL; switch (vie->op.op_byte) { case 0x0B: /* * OR reg (ModRM:reg) and mem (ModRM:r/m) and store the * result in reg. * * 0b/r or r16, r/m16 * 0b/r or r32, r/m32 * REX.W + 0b/r or r64, r/m64 */ /* get the first operand */ reg = gpr_map[vie->reg]; error = vie_read_register(vcpu, reg, &val1); if (error) break; /* get the second operand */ error = memread(vcpu, gpa, &val2, size, arg); if (error) break; /* perform the operation and write the result */ result = val1 | val2; error = vie_update_register(vcpu, reg, result, size); break; case 0x81: case 0x83: /* * OR mem (ModRM:r/m) with immediate and store the * result in mem. * * 81 /1 or r/m16, imm16 * 81 /1 or r/m32, imm32 * REX.W + 81 /1 or r/m64, imm32 sign-extended to 64 * * 83 /1 or r/m16, imm8 sign-extended to 16 * 83 /1 or r/m32, imm8 sign-extended to 32 * REX.W + 83/1 or r/m64, imm8 sign-extended to 64 */ /* get the first operand */ error = memread(vcpu, gpa, &val1, size, arg); if (error) break; /* * perform the operation with the pre-fetched immediate * operand and write the result */ result = val1 | vie->immediate; error = memwrite(vcpu, gpa, result, size, arg); break; default: break; } if (error) return (error); error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); if (error) return (error); /* * OF and CF are cleared; the SF, ZF and PF flags are set according * to the result; AF is undefined. * * The updated status flags are obtained by subtracting 0 from 'result'. */ rflags2 = getcc(size, result, 0); rflags &= ~RFLAGS_STATUS_BITS; rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N); error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8); return (error); } static int emulate_cmp(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg) { int error, size; uint64_t regop, memop, op1, op2, rflags, rflags2; enum vm_reg_name reg; size = vie->opsize; switch (vie->op.op_byte) { case 0x39: case 0x3B: /* * 39/r CMP r/m16, r16 * 39/r CMP r/m32, r32 * REX.W 39/r CMP r/m64, r64 * * 3B/r CMP r16, r/m16 * 3B/r CMP r32, r/m32 * REX.W + 3B/r CMP r64, r/m64 * * Compare the first operand with the second operand and * set status flags in EFLAGS register. The comparison is * performed by subtracting the second operand from the first * operand and then setting the status flags. */ /* Get the register operand */ reg = gpr_map[vie->reg]; error = vie_read_register(vcpu, reg, ®op); if (error) return (error); /* Get the memory operand */ error = memread(vcpu, gpa, &memop, size, arg); if (error) return (error); if (vie->op.op_byte == 0x3B) { op1 = regop; op2 = memop; } else { op1 = memop; op2 = regop; } rflags2 = getcc(size, op1, op2); break; case 0x80: case 0x81: case 0x83: /* * 80 /7 cmp r/m8, imm8 * REX + 80 /7 cmp r/m8, imm8 * * 81 /7 cmp r/m16, imm16 * 81 /7 cmp r/m32, imm32 * REX.W + 81 /7 cmp r/m64, imm32 sign-extended to 64 * * 83 /7 cmp r/m16, imm8 sign-extended to 16 * 83 /7 cmp r/m32, imm8 sign-extended to 32 * REX.W + 83 /7 cmp r/m64, imm8 sign-extended to 64 * * Compare mem (ModRM:r/m) with immediate and set * status flags according to the results. The * comparison is performed by subtracting the * immediate from the first operand and then setting * the status flags. * */ if (vie->op.op_byte == 0x80) size = 1; /* get the first operand */ error = memread(vcpu, gpa, &op1, size, arg); if (error) return (error); rflags2 = getcc(size, op1, vie->immediate); break; default: return (EINVAL); } error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); if (error) return (error); rflags &= ~RFLAGS_STATUS_BITS; rflags |= rflags2 & RFLAGS_STATUS_BITS; error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8); return (error); } static int emulate_test(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg) { int error, size; uint64_t op1, rflags, rflags2; size = vie->opsize; error = EINVAL; switch (vie->op.op_byte) { case 0xF7: /* * F7 /0 test r/m16, imm16 * F7 /0 test r/m32, imm32 * REX.W + F7 /0 test r/m64, imm32 sign-extended to 64 * * Test mem (ModRM:r/m) with immediate and set status * flags according to the results. The comparison is * performed by anding the immediate from the first * operand and then setting the status flags. */ if ((vie->reg & 7) != 0) return (EINVAL); error = memread(vcpu, gpa, &op1, size, arg); if (error) return (error); rflags2 = getandflags(size, op1, vie->immediate); break; default: return (EINVAL); } error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); if (error) return (error); /* * OF and CF are cleared; the SF, ZF and PF flags are set according * to the result; AF is undefined. */ rflags &= ~RFLAGS_STATUS_BITS; rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N); error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8); return (error); } static int emulate_bextr(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, struct vm_guest_paging *paging, mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg) { uint64_t src1, src2, dst, rflags; unsigned start, len, size; int error; size = vie->opsize; error = EINVAL; /* * VEX.LZ.0F38.W0 F7 /r BEXTR r32a, r/m32, r32b * VEX.LZ.0F38.W1 F7 /r BEXTR r64a, r/m64, r64b * * Destination operand is ModRM:reg. Source operands are ModRM:r/m and * Vex.vvvv. * * Operand size is always 32-bit if not in 64-bit mode (W1 is ignored). */ if (size != 4 && paging->cpu_mode != CPU_MODE_64BIT) size = 4; /* * Extracts contiguous bits from the first /source/ operand (second * operand) using an index and length specified in the second /source/ * operand (third operand). */ error = memread(vcpu, gpa, &src1, size, arg); if (error) return (error); error = vie_read_register(vcpu, gpr_map[vie->vex_reg], &src2); if (error) return (error); error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); if (error) return (error); start = (src2 & 0xff); len = (src2 & 0xff00) >> 8; /* If no bits are extracted, the destination register is cleared. */ dst = 0; /* If START exceeds the operand size, no bits are extracted. */ if (start > size * 8) goto done; /* Length is bounded by both the destination size and start offset. */ if (start + len > size * 8) len = (size * 8) - start; if (len == 0) goto done; if (start > 0) src1 = (src1 >> start); if (len < 64) src1 = src1 & ((1ull << len) - 1); dst = src1; done: error = vie_update_register(vcpu, gpr_map[vie->reg], dst, size); if (error) return (error); /* * AMD: OF, CF cleared; SF/AF/PF undefined; ZF set by result. * Intel: ZF is set by result; AF/SF/PF undefined; all others cleared. */ rflags &= ~RFLAGS_STATUS_BITS; if (dst == 0) rflags |= PSL_Z; error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8); return (error); } static int emulate_add(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg) { int error, size; uint64_t nval, rflags, rflags2, val1, val2; enum vm_reg_name reg; size = vie->opsize; error = EINVAL; switch (vie->op.op_byte) { case 0x03: /* * ADD r/m to r and store the result in r * * 03/r ADD r16, r/m16 * 03/r ADD r32, r/m32 * REX.W + 03/r ADD r64, r/m64 */ /* get the first operand */ reg = gpr_map[vie->reg]; error = vie_read_register(vcpu, reg, &val1); if (error) break; /* get the second operand */ error = memread(vcpu, gpa, &val2, size, arg); if (error) break; /* perform the operation and write the result */ nval = val1 + val2; error = vie_update_register(vcpu, reg, nval, size); break; default: break; } if (!error) { rflags2 = getaddflags(size, val1, val2); error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); if (error) return (error); rflags &= ~RFLAGS_STATUS_BITS; rflags |= rflags2 & RFLAGS_STATUS_BITS; error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8); } return (error); } static int emulate_sub(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg) { int error, size; uint64_t nval, rflags, rflags2, val1, val2; enum vm_reg_name reg; size = vie->opsize; error = EINVAL; switch (vie->op.op_byte) { case 0x2B: /* * SUB r/m from r and store the result in r * * 2B/r SUB r16, r/m16 * 2B/r SUB r32, r/m32 * REX.W + 2B/r SUB r64, r/m64 */ /* get the first operand */ reg = gpr_map[vie->reg]; error = vie_read_register(vcpu, reg, &val1); if (error) break; /* get the second operand */ error = memread(vcpu, gpa, &val2, size, arg); if (error) break; /* perform the operation and write the result */ nval = val1 - val2; error = vie_update_register(vcpu, reg, nval, size); break; default: break; } if (!error) { rflags2 = getcc(size, val1, val2); error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); if (error) return (error); rflags &= ~RFLAGS_STATUS_BITS; rflags |= rflags2 & RFLAGS_STATUS_BITS; error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8); } return (error); } static int emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie, struct vm_guest_paging *paging, mem_region_read_t memread, mem_region_write_t memwrite, void *arg) { #ifdef _KERNEL struct vm_copyinfo copyinfo[2]; #else struct iovec copyinfo[2]; #endif struct seg_desc ss_desc; uint64_t cr0, rflags, rsp, stack_gla, val; int error, fault, size, stackaddrsize, pushop; val = 0; size = vie->opsize; pushop = (vie->op.op_type == VIE_OP_TYPE_PUSH) ? 1 : 0; /* * From "Address-Size Attributes for Stack Accesses", Intel SDL, Vol 1 */ if (paging->cpu_mode == CPU_MODE_REAL) { stackaddrsize = 2; } else if (paging->cpu_mode == CPU_MODE_64BIT) { /* * "Stack Manipulation Instructions in 64-bit Mode", SDM, Vol 3 * - Stack pointer size is always 64-bits. * - PUSH/POP of 32-bit values is not possible in 64-bit mode. * - 16-bit PUSH/POP is supported by using the operand size * override prefix (66H). */ stackaddrsize = 8; size = vie->opsize_override ? 2 : 8; } else { /* * In protected or compatibility mode the 'B' flag in the * stack-segment descriptor determines the size of the * stack pointer. */ error = vm_get_seg_desc(vcpu, VM_REG_GUEST_SS, &ss_desc); KASSERT(error == 0, ("%s: error %d getting SS descriptor", __func__, error)); if (SEG_DESC_DEF32(ss_desc.access)) stackaddrsize = 4; else stackaddrsize = 2; } error = vie_read_register(vcpu, VM_REG_GUEST_CR0, &cr0); KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error)); error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error)); error = vie_read_register(vcpu, VM_REG_GUEST_RSP, &rsp); KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error)); if (pushop) { rsp -= size; } if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc, rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ, &stack_gla)) { vm_inject_ss(vcpu, 0); return (0); } if (vie_canonical_check(paging->cpu_mode, stack_gla)) { vm_inject_ss(vcpu, 0); return (0); } if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) { vm_inject_ac(vcpu, 0); return (0); } error = vm_copy_setup(vcpu, paging, stack_gla, size, pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo), &fault); if (error || fault) return (error); if (pushop) { error = memread(vcpu, mmio_gpa, &val, size, arg); if (error == 0) vm_copyout(&val, copyinfo, size); } else { vm_copyin(copyinfo, &val, size); error = memwrite(vcpu, mmio_gpa, val, size, arg); rsp += size; } vm_copy_teardown(copyinfo, nitems(copyinfo)); if (error == 0) { error = vie_update_register(vcpu, VM_REG_GUEST_RSP, rsp, stackaddrsize); KASSERT(error == 0, ("error %d updating rsp", error)); } return (error); } static int emulate_push(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie, struct vm_guest_paging *paging, mem_region_read_t memread, mem_region_write_t memwrite, void *arg) { int error; /* * Table A-6, "Opcode Extensions", Intel SDM, Vol 2. * * PUSH is part of the group 5 extended opcodes and is identified * by ModRM:reg = b110. */ if ((vie->reg & 7) != 6) return (EINVAL); error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread, memwrite, arg); return (error); } static int emulate_pop(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie, struct vm_guest_paging *paging, mem_region_read_t memread, mem_region_write_t memwrite, void *arg) { int error; /* * Table A-6, "Opcode Extensions", Intel SDM, Vol 2. * * POP is part of the group 1A extended opcodes and is identified * by ModRM:reg = b000. */ if ((vie->reg & 7) != 0) return (EINVAL); error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread, memwrite, arg); return (error); } static int emulate_group1(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, struct vm_guest_paging *paging __unused, mem_region_read_t memread, mem_region_write_t memwrite, void *memarg) { int error; switch (vie->reg & 7) { case 0x1: /* OR */ error = emulate_or(vcpu, gpa, vie, memread, memwrite, memarg); break; case 0x4: /* AND */ error = emulate_and(vcpu, gpa, vie, memread, memwrite, memarg); break; case 0x7: /* CMP */ error = emulate_cmp(vcpu, gpa, vie, memread, memwrite, memarg); break; default: error = EINVAL; break; } return (error); } static int emulate_bittest(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, mem_region_read_t memread, mem_region_write_t memwrite __unused, void *memarg) { uint64_t val, rflags; int error, bitmask, bitoff; /* * 0F BA is a Group 8 extended opcode. * * Currently we only emulate the 'Bit Test' instruction which is * identified by a ModR/M:reg encoding of 100b. */ if ((vie->reg & 7) != 4) return (EINVAL); error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags); KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error)); error = memread(vcpu, gpa, &val, vie->opsize, memarg); if (error) return (error); /* * Intel SDM, Vol 2, Table 3-2: * "Range of Bit Positions Specified by Bit Offset Operands" */ bitmask = vie->opsize * 8 - 1; bitoff = vie->immediate & bitmask; /* Copy the bit into the Carry flag in %rflags */ if (val & (1UL << bitoff)) rflags |= PSL_C; else rflags &= ~PSL_C; error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8); KASSERT(error == 0, ("%s: error %d updating rflags", __func__, error)); return (0); } static int emulate_twob_group15(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, mem_region_read_t memread, mem_region_write_t memwrite __unused, void *memarg) { int error; uint64_t buf; switch (vie->reg & 7) { case 0x7: /* CLFLUSH, CLFLUSHOPT, and SFENCE */ if (vie->mod == 0x3) { /* * SFENCE. Ignore it, VM exit provides enough * barriers on its own. */ error = 0; } else { /* * CLFLUSH, CLFLUSHOPT. Only check for access * rights. */ error = memread(vcpu, gpa, &buf, 1, memarg); } break; default: error = EINVAL; break; } return (error); } int vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, struct vm_guest_paging *paging, mem_region_read_t memread, mem_region_write_t memwrite, void *memarg) { int error; if (!vie->decoded) return (EINVAL); switch (vie->op.op_type) { case VIE_OP_TYPE_GROUP1: error = emulate_group1(vcpu, gpa, vie, paging, memread, memwrite, memarg); break; case VIE_OP_TYPE_POP: error = emulate_pop(vcpu, gpa, vie, paging, memread, memwrite, memarg); break; case VIE_OP_TYPE_PUSH: error = emulate_push(vcpu, gpa, vie, paging, memread, memwrite, memarg); break; case VIE_OP_TYPE_CMP: error = emulate_cmp(vcpu, gpa, vie, memread, memwrite, memarg); break; case VIE_OP_TYPE_MOV: error = emulate_mov(vcpu, gpa, vie, memread, memwrite, memarg); break; case VIE_OP_TYPE_MOVSX: case VIE_OP_TYPE_MOVZX: error = emulate_movx(vcpu, gpa, vie, memread, memwrite, memarg); break; case VIE_OP_TYPE_MOVS: error = emulate_movs(vcpu, gpa, vie, paging, memread, memwrite, memarg); break; case VIE_OP_TYPE_STOS: error = emulate_stos(vcpu, gpa, vie, paging, memread, memwrite, memarg); break; case VIE_OP_TYPE_AND: error = emulate_and(vcpu, gpa, vie, memread, memwrite, memarg); break; case VIE_OP_TYPE_OR: error = emulate_or(vcpu, gpa, vie, memread, memwrite, memarg); break; case VIE_OP_TYPE_SUB: error = emulate_sub(vcpu, gpa, vie, memread, memwrite, memarg); break; case VIE_OP_TYPE_BITTEST: error = emulate_bittest(vcpu, gpa, vie, memread, memwrite, memarg); break; case VIE_OP_TYPE_TWOB_GRP15: error = emulate_twob_group15(vcpu, gpa, vie, memread, memwrite, memarg); break; case VIE_OP_TYPE_ADD: error = emulate_add(vcpu, gpa, vie, memread, memwrite, memarg); break; case VIE_OP_TYPE_TEST: error = emulate_test(vcpu, gpa, vie, memread, memwrite, memarg); break; case VIE_OP_TYPE_BEXTR: error = emulate_bextr(vcpu, gpa, vie, paging, memread, memwrite, memarg); break; default: error = EINVAL; break; } return (error); } int vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla) { KASSERT(size == 1 || size == 2 || size == 4 || size == 8, ("%s: invalid size %d", __func__, size)); KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl)); if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0) return (0); return ((gla & (size - 1)) ? 1 : 0); } int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla) { uint64_t mask; if (cpu_mode != CPU_MODE_64BIT) return (0); /* * The value of the bit 47 in the 'gla' should be replicated in the * most significant 16 bits. */ mask = ~((1UL << 48) - 1); if (gla & (1UL << 47)) return ((gla & mask) != mask); else return ((gla & mask) != 0); } uint64_t vie_size2mask(int size) { KASSERT(size == 1 || size == 2 || size == 4 || size == 8, ("vie_size2mask: invalid size %d", size)); return (size2mask[size]); } int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg, struct seg_desc *desc, uint64_t offset, int length, int addrsize, int prot, uint64_t *gla) { uint64_t firstoff, low_limit, high_limit, segbase; int glasize, type; KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS, ("%s: invalid segment %d", __func__, seg)); KASSERT(length == 1 || length == 2 || length == 4 || length == 8, ("%s: invalid operand size %d", __func__, length)); KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0, ("%s: invalid prot %#x", __func__, prot)); firstoff = offset; if (cpu_mode == CPU_MODE_64BIT) { KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address " "size %d for cpu_mode %d", __func__, addrsize, cpu_mode)); glasize = 8; } else { KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address " "size %d for cpu mode %d", __func__, addrsize, cpu_mode)); glasize = 4; /* * If the segment selector is loaded with a NULL selector * then the descriptor is unusable and attempting to use * it results in a #GP(0). */ if (SEG_DESC_UNUSABLE(desc->access)) return (-1); /* * The processor generates a #NP exception when a segment * register is loaded with a selector that points to a * descriptor that is not present. If this was the case then * it would have been checked before the VM-exit. */ KASSERT(SEG_DESC_PRESENT(desc->access), ("segment %d not present: %#x", seg, desc->access)); /* * The descriptor type must indicate a code/data segment. */ type = SEG_DESC_TYPE(desc->access); KASSERT(type >= 16 && type <= 31, ("segment %d has invalid " "descriptor type %#x", seg, type)); if (prot & PROT_READ) { /* #GP on a read access to a exec-only code segment */ if ((type & 0xA) == 0x8) return (-1); } if (prot & PROT_WRITE) { /* * #GP on a write access to a code segment or a * read-only data segment. */ if (type & 0x8) /* code segment */ return (-1); if ((type & 0xA) == 0) /* read-only data seg */ return (-1); } /* * 'desc->limit' is fully expanded taking granularity into * account. */ if ((type & 0xC) == 0x4) { /* expand-down data segment */ low_limit = desc->limit + 1; high_limit = SEG_DESC_DEF32(desc->access) ? 0xffffffff : 0xffff; } else { /* code segment or expand-up data segment */ low_limit = 0; high_limit = desc->limit; } while (length > 0) { offset &= vie_size2mask(addrsize); if (offset < low_limit || offset > high_limit) return (-1); offset++; length--; } } /* * In 64-bit mode all segments except %fs and %gs have a segment * base address of 0. */ if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS && seg != VM_REG_GUEST_GS) { segbase = 0; } else { segbase = desc->base; } /* * Truncate 'firstoff' to the effective address size before adding * it to the segment base. */ firstoff &= vie_size2mask(addrsize); *gla = (segbase + firstoff) & vie_size2mask(glasize); return (0); } /* * Prepare a partially decoded vie for a 2nd attempt. */ void vie_restart(struct vie *vie) { _Static_assert( offsetof(struct vie, inst) < offsetof(struct vie, vie_startzero) && offsetof(struct vie, num_valid) < offsetof(struct vie, vie_startzero), "restart should not erase instruction length or contents"); memset((char *)vie + offsetof(struct vie, vie_startzero), 0, sizeof(*vie) - offsetof(struct vie, vie_startzero)); vie->base_register = VM_REG_LAST; vie->index_register = VM_REG_LAST; vie->segment_register = VM_REG_LAST; } void vie_init(struct vie *vie, const char *inst_bytes, int inst_length) { KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE, ("%s: invalid instruction length (%d)", __func__, inst_length)); vie_restart(vie); memset(vie->inst, 0, sizeof(vie->inst)); if (inst_length != 0) memcpy(vie->inst, inst_bytes, inst_length); vie->num_valid = inst_length; } #ifdef _KERNEL static int pf_error_code(int usermode, int prot, int rsvd, uint64_t pte) { int error_code = 0; if (pte & PG_V) error_code |= PGEX_P; if (prot & VM_PROT_WRITE) error_code |= PGEX_W; if (usermode) error_code |= PGEX_U; if (rsvd) error_code |= PGEX_RSV; if (prot & VM_PROT_EXECUTE) error_code |= PGEX_I; return (error_code); } static void ptp_release(void **cookie) { if (*cookie != NULL) { vm_gpa_release(*cookie); *cookie = NULL; } } static void * ptp_hold(struct vcpu *vcpu, vm_paddr_t ptpphys, size_t len, void **cookie) { void *ptr; ptp_release(cookie); ptr = vm_gpa_hold(vcpu, ptpphys, len, VM_PROT_RW, cookie); return (ptr); } static int _vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging, uint64_t gla, int prot, uint64_t *gpa, int *guest_fault, bool check_only) { int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable; u_int retries; uint64_t *ptpbase, ptpphys, pte, pgsize; uint32_t *ptpbase32, pte32; void *cookie; *guest_fault = 0; usermode = (paging->cpl == 3 ? 1 : 0); writable = prot & VM_PROT_WRITE; cookie = NULL; retval = 0; retries = 0; restart: ptpphys = paging->cr3; /* root of the page tables */ ptp_release(&cookie); if (retries++ > 0) maybe_yield(); if (vie_canonical_check(paging->cpu_mode, gla)) { /* * XXX assuming a non-stack reference otherwise a stack fault * should be generated. */ if (!check_only) vm_inject_gp(vcpu); goto fault; } if (paging->paging_mode == PAGING_MODE_FLAT) { *gpa = gla; goto done; } if (paging->paging_mode == PAGING_MODE_32) { nlevels = 2; while (--nlevels >= 0) { /* Zero out the lower 12 bits. */ ptpphys &= ~0xfff; ptpbase32 = ptp_hold(vcpu, ptpphys, PAGE_SIZE, &cookie); if (ptpbase32 == NULL) goto error; ptpshift = PAGE_SHIFT + nlevels * 10; ptpindex = (gla >> ptpshift) & 0x3FF; pgsize = 1UL << ptpshift; pte32 = ptpbase32[ptpindex]; if ((pte32 & PG_V) == 0 || (usermode && (pte32 & PG_U) == 0) || (writable && (pte32 & PG_RW) == 0)) { if (!check_only) { pfcode = pf_error_code(usermode, prot, 0, pte32); vm_inject_pf(vcpu, pfcode, gla); } goto fault; } /* * Emulate the x86 MMU's management of the accessed * and dirty flags. While the accessed flag is set * at every level of the page table, the dirty flag * is only set at the last level providing the guest * physical address. */ if (!check_only && (pte32 & PG_A) == 0) { if (atomic_cmpset_32(&ptpbase32[ptpindex], pte32, pte32 | PG_A) == 0) { goto restart; } } /* XXX must be ignored if CR4.PSE=0 */ if (nlevels > 0 && (pte32 & PG_PS) != 0) break; ptpphys = pte32; } /* Set the dirty bit in the page table entry if necessary */ if (!check_only && writable && (pte32 & PG_M) == 0) { if (atomic_cmpset_32(&ptpbase32[ptpindex], pte32, pte32 | PG_M) == 0) { goto restart; } } /* Zero out the lower 'ptpshift' bits */ pte32 >>= ptpshift; pte32 <<= ptpshift; *gpa = pte32 | (gla & (pgsize - 1)); goto done; } if (paging->paging_mode == PAGING_MODE_PAE) { /* Zero out the lower 5 bits and the upper 32 bits */ ptpphys &= 0xffffffe0UL; ptpbase = ptp_hold(vcpu, ptpphys, sizeof(*ptpbase) * 4, &cookie); if (ptpbase == NULL) goto error; ptpindex = (gla >> 30) & 0x3; pte = ptpbase[ptpindex]; if ((pte & PG_V) == 0) { if (!check_only) { pfcode = pf_error_code(usermode, prot, 0, pte); vm_inject_pf(vcpu, pfcode, gla); } goto fault; } ptpphys = pte; nlevels = 2; } else if (paging->paging_mode == PAGING_MODE_64_LA57) { nlevels = 5; } else { nlevels = 4; } while (--nlevels >= 0) { /* Zero out the lower 12 bits and the upper 12 bits */ ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12; ptpbase = ptp_hold(vcpu, ptpphys, PAGE_SIZE, &cookie); if (ptpbase == NULL) goto error; ptpshift = PAGE_SHIFT + nlevels * 9; ptpindex = (gla >> ptpshift) & 0x1FF; pgsize = 1UL << ptpshift; pte = ptpbase[ptpindex]; if ((pte & PG_V) == 0 || (usermode && (pte & PG_U) == 0) || (writable && (pte & PG_RW) == 0)) { if (!check_only) { pfcode = pf_error_code(usermode, prot, 0, pte); vm_inject_pf(vcpu, pfcode, gla); } goto fault; } /* Set the accessed bit in the page table entry */ if (!check_only && (pte & PG_A) == 0) { if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_A) == 0) { goto restart; } } if (nlevels > 0 && (pte & PG_PS) != 0) { if (pgsize > 1 * GB) { if (!check_only) { pfcode = pf_error_code(usermode, prot, 1, pte); vm_inject_pf(vcpu, pfcode, gla); } goto fault; } break; } ptpphys = pte; } /* Set the dirty bit in the page table entry if necessary */ if (!check_only && writable && (pte & PG_M) == 0) { if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0) goto restart; } /* Zero out the lower 'ptpshift' bits and the upper 12 bits */ pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12; *gpa = pte | (gla & (pgsize - 1)); done: ptp_release(&cookie); KASSERT(retval == 0 || retval == EFAULT, ("%s: unexpected retval %d", __func__, retval)); return (retval); error: retval = EFAULT; goto done; fault: *guest_fault = 1; goto done; } int vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging, uint64_t gla, int prot, uint64_t *gpa, int *guest_fault) { return (_vm_gla2gpa(vcpu, paging, gla, prot, gpa, guest_fault, false)); } int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, uint64_t gla, int prot, uint64_t *gpa, int *guest_fault) { return (_vm_gla2gpa(vcpu, paging, gla, prot, gpa, guest_fault, true)); } int vmm_fetch_instruction(struct vcpu *vcpu, struct vm_guest_paging *paging, uint64_t rip, int inst_length, struct vie *vie, int *faultptr) { struct vm_copyinfo copyinfo[2]; int error, prot; if (inst_length > VIE_INST_SIZE) panic("vmm_fetch_instruction: invalid length %d", inst_length); prot = PROT_READ | PROT_EXEC; error = vm_copy_setup(vcpu, paging, rip, inst_length, prot, copyinfo, nitems(copyinfo), faultptr); if (error || *faultptr) return (error); vm_copyin(copyinfo, vie->inst, inst_length); vm_copy_teardown(copyinfo, nitems(copyinfo)); vie->num_valid = inst_length; return (0); } #endif /* _KERNEL */ static int vie_peek(struct vie *vie, uint8_t *x) { if (vie->num_processed < vie->num_valid) { *x = vie->inst[vie->num_processed]; return (0); } else return (-1); } static void vie_advance(struct vie *vie) { vie->num_processed++; } static bool segment_override(uint8_t x, int *seg) { switch (x) { case 0x2E: *seg = VM_REG_GUEST_CS; break; case 0x36: *seg = VM_REG_GUEST_SS; break; case 0x3E: *seg = VM_REG_GUEST_DS; break; case 0x26: *seg = VM_REG_GUEST_ES; break; case 0x64: *seg = VM_REG_GUEST_FS; break; case 0x65: *seg = VM_REG_GUEST_GS; break; default: return (false); } return (true); } static int decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d) { uint8_t x; while (1) { if (vie_peek(vie, &x)) return (-1); if (x == 0x66) vie->opsize_override = 1; else if (x == 0x67) vie->addrsize_override = 1; else if (x == 0xF3) vie->repz_present = 1; else if (x == 0xF2) vie->repnz_present = 1; else if (segment_override(x, &vie->segment_register)) vie->segment_override = 1; else break; vie_advance(vie); } /* * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2: * - Only one REX prefix is allowed per instruction. * - The REX prefix must immediately precede the opcode byte or the * escape opcode byte. * - If an instruction has a mandatory prefix (0x66, 0xF2 or 0xF3) * the mandatory prefix must come before the REX prefix. */ if (cpu_mode == CPU_MODE_64BIT && x >= 0x40 && x <= 0x4F) { vie->rex_present = 1; vie->rex_w = x & 0x8 ? 1 : 0; vie->rex_r = x & 0x4 ? 1 : 0; vie->rex_x = x & 0x2 ? 1 : 0; vie->rex_b = x & 0x1 ? 1 : 0; vie_advance(vie); } /* * § 2.3.5, "The VEX Prefix", SDM Vol 2. */ if ((cpu_mode == CPU_MODE_64BIT || cpu_mode == CPU_MODE_COMPATIBILITY) && x == 0xC4) { const struct vie_op *optab; /* 3-byte VEX prefix. */ vie->vex_present = 1; vie_advance(vie); if (vie_peek(vie, &x)) return (-1); /* * 2nd byte: [R', X', B', mmmmm[4:0]]. Bits are inverted * relative to REX encoding. */ vie->rex_r = x & 0x80 ? 0 : 1; vie->rex_x = x & 0x40 ? 0 : 1; vie->rex_b = x & 0x20 ? 0 : 1; switch (x & 0x1F) { case 0x2: /* 0F 38. */ optab = three_byte_opcodes_0f38; break; case 0x1: /* 0F class - nothing handled here yet. */ /* FALLTHROUGH */ case 0x3: /* 0F 3A class - nothing handled here yet. */ /* FALLTHROUGH */ default: /* Reserved (#UD). */ return (-1); } vie_advance(vie); if (vie_peek(vie, &x)) return (-1); /* 3rd byte: [W, vvvv[6:3], L, pp[1:0]]. */ vie->rex_w = x & 0x80 ? 1 : 0; vie->vex_reg = ((~(unsigned)x & 0x78u) >> 3); vie->vex_l = !!(x & 0x4); vie->vex_pp = (x & 0x3); /* PP: 1=66 2=F3 3=F2 prefixes. */ switch (vie->vex_pp) { case 0x1: vie->opsize_override = 1; break; case 0x2: vie->repz_present = 1; break; case 0x3: vie->repnz_present = 1; break; } vie_advance(vie); /* Opcode, sans literal prefix prefix. */ if (vie_peek(vie, &x)) return (-1); vie->op = optab[x]; if (vie->op.op_type == VIE_OP_TYPE_NONE) return (-1); vie_advance(vie); } /* * Section "Operand-Size And Address-Size Attributes", Intel SDM, Vol 1 */ if (cpu_mode == CPU_MODE_64BIT) { /* * Default address size is 64-bits and default operand size * is 32-bits. */ vie->addrsize = vie->addrsize_override ? 4 : 8; if (vie->rex_w) vie->opsize = 8; else if (vie->opsize_override) vie->opsize = 2; else vie->opsize = 4; } else if (cs_d) { /* Default address and operand sizes are 32-bits */ vie->addrsize = vie->addrsize_override ? 2 : 4; vie->opsize = vie->opsize_override ? 2 : 4; } else { /* Default address and operand sizes are 16-bits */ vie->addrsize = vie->addrsize_override ? 4 : 2; vie->opsize = vie->opsize_override ? 4 : 2; } return (0); } static int decode_two_byte_opcode(struct vie *vie) { uint8_t x; if (vie_peek(vie, &x)) return (-1); vie->op = two_byte_opcodes[x]; if (vie->op.op_type == VIE_OP_TYPE_NONE) return (-1); vie_advance(vie); return (0); } static int decode_opcode(struct vie *vie) { uint8_t x; if (vie_peek(vie, &x)) return (-1); /* Already did this via VEX prefix. */ if (vie->op.op_type != VIE_OP_TYPE_NONE) return (0); vie->op = one_byte_opcodes[x]; if (vie->op.op_type == VIE_OP_TYPE_NONE) return (-1); vie_advance(vie); if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE) return (decode_two_byte_opcode(vie)); return (0); } static int decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode) { uint8_t x; if (vie->op.op_flags & VIE_OP_F_NO_MODRM) return (0); if (cpu_mode == CPU_MODE_REAL) return (-1); if (vie_peek(vie, &x)) return (-1); vie->mod = (x >> 6) & 0x3; vie->rm = (x >> 0) & 0x7; vie->reg = (x >> 3) & 0x7; /* * A direct addressing mode makes no sense in the context of an EPT * fault. There has to be a memory access involved to cause the * EPT fault. */ if (vie->mod == VIE_MOD_DIRECT) return (-1); if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) || (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) { /* * Table 2-5: Special Cases of REX Encodings * * mod=0, r/m=5 is used in the compatibility mode to * indicate a disp32 without a base register. * * mod!=3, r/m=4 is used in the compatibility mode to * indicate that the SIB byte is present. * * The 'b' bit in the REX prefix is don't care in * this case. */ } else { vie->rm |= (vie->rex_b << 3); } vie->reg |= (vie->rex_r << 3); /* SIB */ if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB) goto done; vie->base_register = gpr_map[vie->rm]; switch (vie->mod) { case VIE_MOD_INDIRECT_DISP8: vie->disp_bytes = 1; break; case VIE_MOD_INDIRECT_DISP32: vie->disp_bytes = 4; break; case VIE_MOD_INDIRECT: if (vie->rm == VIE_RM_DISP32) { vie->disp_bytes = 4; /* * Table 2-7. RIP-Relative Addressing * * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32 * whereas in compatibility mode it just implies disp32. */ if (cpu_mode == CPU_MODE_64BIT) vie->base_register = VM_REG_GUEST_RIP; else vie->base_register = VM_REG_LAST; } break; } done: vie_advance(vie); return (0); } static int decode_sib(struct vie *vie) { uint8_t x; /* Proceed only if SIB byte is present */ if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB) return (0); if (vie_peek(vie, &x)) return (-1); /* De-construct the SIB byte */ vie->ss = (x >> 6) & 0x3; vie->index = (x >> 3) & 0x7; vie->base = (x >> 0) & 0x7; /* Apply the REX prefix modifiers */ vie->index |= vie->rex_x << 3; vie->base |= vie->rex_b << 3; switch (vie->mod) { case VIE_MOD_INDIRECT_DISP8: vie->disp_bytes = 1; break; case VIE_MOD_INDIRECT_DISP32: vie->disp_bytes = 4; break; } if (vie->mod == VIE_MOD_INDIRECT && (vie->base == 5 || vie->base == 13)) { /* * Special case when base register is unused if mod = 0 * and base = %rbp or %r13. * * Documented in: * Table 2-3: 32-bit Addressing Forms with the SIB Byte * Table 2-5: Special Cases of REX Encodings */ vie->disp_bytes = 4; } else { vie->base_register = gpr_map[vie->base]; } /* * All encodings of 'index' are valid except for %rsp (4). * * Documented in: * Table 2-3: 32-bit Addressing Forms with the SIB Byte * Table 2-5: Special Cases of REX Encodings */ if (vie->index != 4) vie->index_register = gpr_map[vie->index]; /* 'scale' makes sense only in the context of an index register */ if (vie->index_register < VM_REG_LAST) vie->scale = 1 << vie->ss; vie_advance(vie); return (0); } static int decode_displacement(struct vie *vie) { int n, i; uint8_t x; union { char buf[4]; int8_t signed8; int32_t signed32; } u; if ((n = vie->disp_bytes) == 0) return (0); if (n != 1 && n != 4) panic("decode_displacement: invalid disp_bytes %d", n); for (i = 0; i < n; i++) { if (vie_peek(vie, &x)) return (-1); u.buf[i] = x; vie_advance(vie); } if (n == 1) vie->displacement = u.signed8; /* sign-extended */ else vie->displacement = u.signed32; /* sign-extended */ return (0); } static int decode_immediate(struct vie *vie) { int i, n; uint8_t x; union { char buf[4]; int8_t signed8; int16_t signed16; int32_t signed32; } u; /* Figure out immediate operand size (if any) */ if (vie->op.op_flags & VIE_OP_F_IMM) { /* * Section 2.2.1.5 "Immediates", Intel SDM: * In 64-bit mode the typical size of immediate operands * remains 32-bits. When the operand size if 64-bits, the * processor sign-extends all immediates to 64-bits prior * to their use. */ if (vie->opsize == 4 || vie->opsize == 8) vie->imm_bytes = 4; else vie->imm_bytes = 2; } else if (vie->op.op_flags & VIE_OP_F_IMM8) { vie->imm_bytes = 1; } if ((n = vie->imm_bytes) == 0) return (0); KASSERT(n == 1 || n == 2 || n == 4, ("%s: invalid number of immediate bytes: %d", __func__, n)); for (i = 0; i < n; i++) { if (vie_peek(vie, &x)) return (-1); u.buf[i] = x; vie_advance(vie); } /* sign-extend the immediate value before use */ if (n == 1) vie->immediate = u.signed8; else if (n == 2) vie->immediate = u.signed16; else vie->immediate = u.signed32; return (0); } static int decode_moffset(struct vie *vie) { int i, n; uint8_t x; union { char buf[8]; uint64_t u64; } u; if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0) return (0); /* * Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM: * The memory offset size follows the address-size of the instruction. */ n = vie->addrsize; KASSERT(n == 2 || n == 4 || n == 8, ("invalid moffset bytes: %d", n)); u.u64 = 0; for (i = 0; i < n; i++) { if (vie_peek(vie, &x)) return (-1); u.buf[i] = x; vie_advance(vie); } vie->displacement = u.u64; return (0); } #ifdef _KERNEL /* * Verify that the 'guest linear address' provided as collateral of the nested * page table fault matches with our instruction decoding. */ static int verify_gla(struct vcpu *vcpu, uint64_t gla, struct vie *vie, enum vm_cpu_mode cpu_mode) { int error; uint64_t base, segbase, idx, gla2; enum vm_reg_name seg; struct seg_desc desc; /* Skip 'gla' verification */ if (gla == VIE_INVALID_GLA) return (0); base = 0; if (vie->base_register != VM_REG_LAST) { error = vm_get_register(vcpu, vie->base_register, &base); if (error) { printf("verify_gla: error %d getting base reg %d\n", error, vie->base_register); return (-1); } /* * RIP-relative addressing starts from the following * instruction */ if (vie->base_register == VM_REG_GUEST_RIP) base += vie->num_processed; } idx = 0; if (vie->index_register != VM_REG_LAST) { error = vm_get_register(vcpu, vie->index_register, &idx); if (error) { printf("verify_gla: error %d getting index reg %d\n", error, vie->index_register); return (-1); } } /* * From "Specifying a Segment Selector", Intel SDM, Vol 1 * * In 64-bit mode, segmentation is generally (but not * completely) disabled. The exceptions are the FS and GS * segments. * * In legacy IA-32 mode, when the ESP or EBP register is used * as the base, the SS segment is the default segment. For * other data references, except when relative to stack or * string destination the DS segment is the default. These * can be overridden to allow other segments to be accessed. */ if (vie->segment_override) seg = vie->segment_register; else if (vie->base_register == VM_REG_GUEST_RSP || vie->base_register == VM_REG_GUEST_RBP) seg = VM_REG_GUEST_SS; else seg = VM_REG_GUEST_DS; if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS && seg != VM_REG_GUEST_GS) { segbase = 0; } else { error = vm_get_seg_desc(vcpu, seg, &desc); if (error) { printf("verify_gla: error %d getting segment" " descriptor %d", error, vie->segment_register); return (-1); } segbase = desc.base; } gla2 = segbase + base + vie->scale * idx + vie->displacement; gla2 &= size2mask[vie->addrsize]; if (gla != gla2) { printf("verify_gla mismatch: segbase(0x%0lx)" "base(0x%0lx), scale(%d), index(0x%0lx), " "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n", segbase, base, vie->scale, idx, vie->displacement, gla, gla2); return (-1); } return (0); } #endif /* _KERNEL */ int #ifdef _KERNEL vmm_decode_instruction(struct vcpu *vcpu, uint64_t gla, enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie) #else vmm_decode_instruction(enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie) #endif { if (decode_prefixes(vie, cpu_mode, cs_d)) return (-1); if (decode_opcode(vie)) return (-1); if (decode_modrm(vie, cpu_mode)) return (-1); if (decode_sib(vie)) return (-1); if (decode_displacement(vie)) return (-1); if (decode_immediate(vie)) return (-1); if (decode_moffset(vie)) return (-1); #ifdef _KERNEL if ((vie->op.op_flags & VIE_OP_F_NO_GLA_VERIFICATION) == 0) { if (verify_gla(vcpu, gla, vie, cpu_mode)) return (-1); } #endif vie->decoded = 1; /* success */ return (0); } diff --git a/sys/amd64/vmm/vmm_mem.c b/sys/amd64/vmm/vmm_mem_machdep.c similarity index 100% rename from sys/amd64/vmm/vmm_mem.c rename to sys/amd64/vmm/vmm_mem_machdep.c diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h index acbd8f5cbcb9..1d783cdacb0d 100644 --- a/sys/arm64/include/vmm.h +++ b/sys/arm64/include/vmm.h @@ -1,376 +1,346 @@ /* * Copyright (C) 2015 Mihai Carabas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _VMM_H_ #define _VMM_H_ #include #include #include #include #include "pte.h" #include "pmap.h" struct vcpu; enum vm_suspend_how { VM_SUSPEND_NONE, VM_SUSPEND_RESET, VM_SUSPEND_POWEROFF, VM_SUSPEND_HALT, VM_SUSPEND_LAST }; /* * Identifiers for architecturally defined registers. */ enum vm_reg_name { VM_REG_GUEST_X0 = 0, VM_REG_GUEST_X1, VM_REG_GUEST_X2, VM_REG_GUEST_X3, VM_REG_GUEST_X4, VM_REG_GUEST_X5, VM_REG_GUEST_X6, VM_REG_GUEST_X7, VM_REG_GUEST_X8, VM_REG_GUEST_X9, VM_REG_GUEST_X10, VM_REG_GUEST_X11, VM_REG_GUEST_X12, VM_REG_GUEST_X13, VM_REG_GUEST_X14, VM_REG_GUEST_X15, VM_REG_GUEST_X16, VM_REG_GUEST_X17, VM_REG_GUEST_X18, VM_REG_GUEST_X19, VM_REG_GUEST_X20, VM_REG_GUEST_X21, VM_REG_GUEST_X22, VM_REG_GUEST_X23, VM_REG_GUEST_X24, VM_REG_GUEST_X25, VM_REG_GUEST_X26, VM_REG_GUEST_X27, VM_REG_GUEST_X28, VM_REG_GUEST_X29, VM_REG_GUEST_LR, VM_REG_GUEST_SP, VM_REG_GUEST_PC, VM_REG_GUEST_CPSR, VM_REG_GUEST_SCTLR_EL1, VM_REG_GUEST_TTBR0_EL1, VM_REG_GUEST_TTBR1_EL1, VM_REG_GUEST_TCR_EL1, VM_REG_GUEST_TCR2_EL1, VM_REG_LAST }; #define VM_INTINFO_VECTOR(info) ((info) & 0xff) #define VM_INTINFO_DEL_ERRCODE 0x800 #define VM_INTINFO_RSVD 0x7ffff000 #define VM_INTINFO_VALID 0x80000000 #define VM_INTINFO_TYPE 0x700 #define VM_INTINFO_HWINTR (0 << 8) #define VM_INTINFO_NMI (2 << 8) #define VM_INTINFO_HWEXCEPTION (3 << 8) #define VM_INTINFO_SWINTR (4 << 8) #define VM_GUEST_BASE_IPA 0x80000000UL /* Guest kernel start ipa */ /* * The VM name has to fit into the pathname length constraints of devfs, * governed primarily by SPECNAMELEN. The length is the total number of * characters in the full path, relative to the mount point and not * including any leading '/' characters. * A prefix and a suffix are added to the name specified by the user. * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters * longer for future use. * The suffix is a string that identifies a bootrom image or some similar * image that is attached to the VM. A separator character gets added to * the suffix automatically when generating the full path, so it must be * accounted for, reducing the effective length by 1. * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37 * bytes for FreeBSD 12. A minimum length is set for safety and supports * a SPECNAMELEN as small as 32 on old systems. */ #define VM_MAX_PREFIXLEN 10 #define VM_MAX_SUFFIXLEN 15 #define VM_MAX_NAMELEN \ (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1) #ifdef _KERNEL struct vm; struct vm_exception; struct vm_exit; struct vm_run; struct vm_object; struct vm_guest_paging; struct vm_vgic_descr; struct pmap; struct vm_eventinfo { void *rptr; /* rendezvous cookie */ int *sptr; /* suspend cookie */ int *iptr; /* reqidle cookie */ }; int vm_create(const char *name, struct vm **retvm); struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid); void vm_disable_vcpu_creation(struct vm *vm); void vm_slock_vcpus(struct vm *vm); void vm_unlock_vcpus(struct vm *vm); void vm_destroy(struct vm *vm); int vm_reinit(struct vm *vm); const char *vm_name(struct vm *vm); -/* - * APIs that modify the guest memory map require all vcpus to be frozen. - */ -void vm_slock_memsegs(struct vm *vm); -void vm_xlock_memsegs(struct vm *vm); -void vm_unlock_memsegs(struct vm *vm); -int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off, - size_t len, int prot, int flags); -int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len); -int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem); -void vm_free_memseg(struct vm *vm, int ident); - -/* - * APIs that inspect the guest memory map require only a *single* vcpu to - * be frozen. This acts like a read lock on the guest memory map since any - * modification requires *all* vcpus to be frozen. - */ -int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, - vm_ooffset_t *segoff, size_t *len, int *prot, int *flags); -int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, - struct vm_object **objptr); -vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm); -void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, - int prot, void **cookie); -void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, - int prot, void **cookie); -void vm_gpa_release(void *cookie); -bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa); - -int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, - uint64_t gla, int prot, uint64_t *gpa, int *is_fault); - uint16_t vm_get_maxcpus(struct vm *vm); void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus); int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus); int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval); int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val); int vm_run(struct vcpu *vcpu); int vm_suspend(struct vm *vm, enum vm_suspend_how how); void* vm_get_cookie(struct vm *vm); int vcpu_vcpuid(struct vcpu *vcpu); void *vcpu_get_cookie(struct vcpu *vcpu); struct vm *vcpu_vm(struct vcpu *vcpu); struct vcpu *vm_vcpu(struct vm *vm, int cpu); int vm_get_capability(struct vcpu *vcpu, int type, int *val); int vm_set_capability(struct vcpu *vcpu, int type, int val); int vm_activate_cpu(struct vcpu *vcpu); int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu); int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu); int vm_inject_exception(struct vcpu *vcpu, uint64_t esr, uint64_t far); int vm_attach_vgic(struct vm *vm, struct vm_vgic_descr *descr); int vm_assert_irq(struct vm *vm, uint32_t irq); int vm_deassert_irq(struct vm *vm, uint32_t irq); int vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot, int func); struct vm_exit *vm_exitinfo(struct vcpu *vcpu); void vm_exit_suspended(struct vcpu *vcpu, uint64_t pc); void vm_exit_debug(struct vcpu *vcpu, uint64_t pc); void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t pc); void vm_exit_astpending(struct vcpu *vcpu, uint64_t pc); cpuset_t vm_active_cpus(struct vm *vm); cpuset_t vm_debug_cpus(struct vm *vm); cpuset_t vm_suspended_cpus(struct vm *vm); static __inline int vcpu_rendezvous_pending(struct vm_eventinfo *info) { return (*((uintptr_t *)(info->rptr)) != 0); } static __inline int vcpu_suspended(struct vm_eventinfo *info) { return (*info->sptr); } int vcpu_debugged(struct vcpu *vcpu); enum vcpu_state { VCPU_IDLE, VCPU_FROZEN, VCPU_RUNNING, VCPU_SLEEPING, }; int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle); enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu); static int __inline vcpu_is_running(struct vcpu *vcpu, int *hostcpu) { return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING); } #ifdef _SYS_PROC_H_ static int __inline vcpu_should_yield(struct vcpu *vcpu) { struct thread *td; td = curthread; return (td->td_ast != 0 || td->td_owepreempt != 0); } #endif void *vcpu_stats(struct vcpu *vcpu); void vcpu_notify_event(struct vcpu *vcpu); +struct vmspace *vm_vmspace(struct vm *vm); +struct vm_mem *vm_mem(struct vm *vm); enum vm_reg_name vm_segment_name(int seg_encoding); struct vm_copyinfo { uint64_t gpa; size_t len; void *hva; void *cookie; }; #endif /* _KERNEL */ #define VM_DIR_READ 0 #define VM_DIR_WRITE 1 #define VM_GP_M_MASK 0x1f #define VM_GP_MMU_ENABLED (1 << 5) struct vm_guest_paging { uint64_t ttbr0_addr; uint64_t ttbr1_addr; uint64_t tcr_el1; uint64_t tcr2_el1; int flags; int padding; }; struct vie { uint8_t access_size:4, sign_extend:1, dir:1, unused:2; enum vm_reg_name reg; }; struct vre { uint32_t inst_syndrome; uint8_t dir:1, unused:7; enum vm_reg_name reg; }; /* * Identifiers for optional vmm capabilities */ enum vm_cap_type { VM_CAP_HALT_EXIT, VM_CAP_PAUSE_EXIT, VM_CAP_UNRESTRICTED_GUEST, VM_CAP_BRK_EXIT, VM_CAP_SS_EXIT, VM_CAP_MASK_HWINTR, VM_CAP_MAX }; enum vm_exitcode { VM_EXITCODE_BOGUS, VM_EXITCODE_INST_EMUL, VM_EXITCODE_REG_EMUL, VM_EXITCODE_HVC, VM_EXITCODE_SUSPENDED, VM_EXITCODE_HYP, VM_EXITCODE_WFI, VM_EXITCODE_PAGING, VM_EXITCODE_SMCCC, VM_EXITCODE_DEBUG, VM_EXITCODE_BRK, VM_EXITCODE_SS, VM_EXITCODE_MAX }; struct vm_exit { enum vm_exitcode exitcode; int inst_length; uint64_t pc; union { /* * ARM specific payload. */ struct { uint32_t exception_nr; uint32_t pad; uint64_t esr_el2; /* Exception Syndrome Register */ uint64_t far_el2; /* Fault Address Register */ uint64_t hpfar_el2; /* Hypervisor IPA Fault Address Register */ } hyp; struct { struct vre vre; } reg_emul; struct { uint64_t gpa; uint64_t esr; } paging; struct { uint64_t gpa; struct vm_guest_paging paging; struct vie vie; } inst_emul; /* * A SMCCC call, e.g. starting a core via PSCI. * Further arguments can be read by asking the kernel for * all register values. */ struct { uint64_t func_id; uint64_t args[7]; } smccc_call; struct { enum vm_suspend_how how; } suspended; } u; }; #endif /* _VMM_H_ */ diff --git a/sys/arm64/vmm/vmm.c b/sys/arm64/vmm/vmm.c index 77c565e37264..ad82e6dbd432 100644 --- a/sys/arm64/vmm/vmm.c +++ b/sys/arm64/vmm/vmm.c @@ -1,1891 +1,1523 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (C) 2015 Mihai Carabas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include "arm64.h" #include "mmu.h" #include "io/vgic.h" #include "io/vtimer.h" struct vcpu { int flags; enum vcpu_state state; struct mtx mtx; int hostcpu; /* host cpuid this vcpu last ran on */ int vcpuid; void *stats; struct vm_exit exitinfo; uint64_t nextpc; /* (x) next instruction to execute */ struct vm *vm; /* (o) */ void *cookie; /* (i) cpu-specific data */ struct vfpstate *guestfpu; /* (a,i) guest fpu state */ }; #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx)) #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) -struct mem_seg { - uint64_t gpa; - size_t len; - bool wired; - bool sysmem; - vm_object_t object; -}; -#define VM_MAX_MEMSEGS 3 - -struct mem_map { - vm_paddr_t gpa; - size_t len; - vm_ooffset_t segoff; - int segid; - int prot; - int flags; -}; -#define VM_MAX_MEMMAPS 4 - struct vmm_mmio_region { uint64_t start; uint64_t end; mem_region_read_t read; mem_region_write_t write; }; #define VM_MAX_MMIO_REGIONS 4 struct vmm_special_reg { uint32_t esr_iss; uint32_t esr_mask; reg_read_t reg_read; reg_write_t reg_write; void *arg; }; #define VM_MAX_SPECIAL_REGS 16 /* * Initialization: * (o) initialized the first time the VM is created * (i) initialized when VM is created and when it is reinitialized * (x) initialized before use */ struct vm { void *cookie; /* (i) cpu-specific data */ volatile cpuset_t active_cpus; /* (i) active vcpus */ volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */ int suspend; /* (i) stop VM execution */ bool dying; /* (o) is dying */ volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ - struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */ - struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */ struct vmspace *vmspace; /* (o) guest's address space */ + struct vm_mem mem; /* (i) guest memory */ char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ struct vcpu **vcpu; /* (i) guest vcpus */ struct vmm_mmio_region mmio_region[VM_MAX_MMIO_REGIONS]; /* (o) guest MMIO regions */ struct vmm_special_reg special_reg[VM_MAX_SPECIAL_REGS]; /* The following describe the vm cpu topology */ uint16_t sockets; /* (o) num of sockets */ uint16_t cores; /* (o) num of cores/socket */ uint16_t threads; /* (o) num of threads/core */ uint16_t maxcpus; /* (o) max pluggable cpus */ - struct sx mem_segs_lock; /* (o) */ struct sx vcpus_init_lock; /* (o) */ }; static bool vmm_initialized = false; static int vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu); static MALLOC_DEFINE(M_VMM, "vmm", "vmm"); /* statistics */ static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); static int vmm_ipinum; SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, "IPI vector used for vcpu notifications"); struct vmm_regs { uint64_t id_aa64afr0; uint64_t id_aa64afr1; uint64_t id_aa64dfr0; uint64_t id_aa64dfr1; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64isar2; uint64_t id_aa64mmfr0; uint64_t id_aa64mmfr1; uint64_t id_aa64mmfr2; uint64_t id_aa64pfr0; uint64_t id_aa64pfr1; }; static const struct vmm_regs vmm_arch_regs_masks = { .id_aa64dfr0 = ID_AA64DFR0_CTX_CMPs_MASK | ID_AA64DFR0_WRPs_MASK | ID_AA64DFR0_BRPs_MASK | ID_AA64DFR0_PMUVer_3 | ID_AA64DFR0_DebugVer_8, .id_aa64isar0 = ID_AA64ISAR0_TLB_TLBIOSR | ID_AA64ISAR0_SHA3_IMPL | ID_AA64ISAR0_RDM_IMPL | ID_AA64ISAR0_Atomic_IMPL | ID_AA64ISAR0_CRC32_BASE | ID_AA64ISAR0_SHA2_512 | ID_AA64ISAR0_SHA1_BASE | ID_AA64ISAR0_AES_PMULL, .id_aa64mmfr0 = ID_AA64MMFR0_TGran4_IMPL | ID_AA64MMFR0_TGran64_IMPL | ID_AA64MMFR0_TGran16_IMPL | ID_AA64MMFR0_ASIDBits_16 | ID_AA64MMFR0_PARange_4P, .id_aa64mmfr1 = ID_AA64MMFR1_SpecSEI_IMPL | ID_AA64MMFR1_PAN_ATS1E1 | ID_AA64MMFR1_HAFDBS_AF, .id_aa64pfr0 = ID_AA64PFR0_GIC_CPUIF_NONE | ID_AA64PFR0_AdvSIMD_HP | ID_AA64PFR0_FP_HP | ID_AA64PFR0_EL3_64 | ID_AA64PFR0_EL2_64 | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64, }; /* Host registers masked by vmm_arch_regs_masks. */ static struct vmm_regs vmm_arch_regs; u_int vm_maxcpu; SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &vm_maxcpu, 0, "Maximum number of vCPUs"); -static void vm_free_memmap(struct vm *vm, int ident); -static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); static void vcpu_notify_event_locked(struct vcpu *vcpu); /* global statistics */ VMM_STAT(VMEXIT_COUNT, "total number of vm exits"); VMM_STAT(VMEXIT_UNKNOWN, "number of vmexits for the unknown exception"); VMM_STAT(VMEXIT_WFI, "number of times wfi was intercepted"); VMM_STAT(VMEXIT_WFE, "number of times wfe was intercepted"); VMM_STAT(VMEXIT_HVC, "number of times hvc was intercepted"); VMM_STAT(VMEXIT_MSR, "number of times msr/mrs was intercepted"); VMM_STAT(VMEXIT_DATA_ABORT, "number of vmexits for a data abort"); VMM_STAT(VMEXIT_INSN_ABORT, "number of vmexits for an instruction abort"); VMM_STAT(VMEXIT_UNHANDLED_SYNC, "number of vmexits for an unhandled synchronous exception"); VMM_STAT(VMEXIT_IRQ, "number of vmexits for an irq"); VMM_STAT(VMEXIT_FIQ, "number of vmexits for an interrupt"); VMM_STAT(VMEXIT_BRK, "number of vmexits for a breakpoint exception"); VMM_STAT(VMEXIT_SS, "number of vmexits for a single-step exception"); VMM_STAT(VMEXIT_UNHANDLED_EL2, "number of vmexits for an unhandled EL2 exception"); VMM_STAT(VMEXIT_UNHANDLED, "number of vmexits for an unhandled exception"); /* * Upper limit on vm_maxcpu. We could increase this to 28 bits, but this * is a safe value for now. */ #define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE) static int vmm_regs_init(struct vmm_regs *regs, const struct vmm_regs *masks) { #define _FETCH_KERN_REG(reg, field) do { \ regs->field = vmm_arch_regs_masks.field; \ if (!get_kernel_reg_masked(reg, ®s->field, masks->field)) \ regs->field = 0; \ } while (0) _FETCH_KERN_REG(ID_AA64AFR0_EL1, id_aa64afr0); _FETCH_KERN_REG(ID_AA64AFR1_EL1, id_aa64afr1); _FETCH_KERN_REG(ID_AA64DFR0_EL1, id_aa64dfr0); _FETCH_KERN_REG(ID_AA64DFR1_EL1, id_aa64dfr1); _FETCH_KERN_REG(ID_AA64ISAR0_EL1, id_aa64isar0); _FETCH_KERN_REG(ID_AA64ISAR1_EL1, id_aa64isar1); _FETCH_KERN_REG(ID_AA64ISAR2_EL1, id_aa64isar2); _FETCH_KERN_REG(ID_AA64MMFR0_EL1, id_aa64mmfr0); _FETCH_KERN_REG(ID_AA64MMFR1_EL1, id_aa64mmfr1); _FETCH_KERN_REG(ID_AA64MMFR2_EL1, id_aa64mmfr2); _FETCH_KERN_REG(ID_AA64PFR0_EL1, id_aa64pfr0); _FETCH_KERN_REG(ID_AA64PFR1_EL1, id_aa64pfr1); #undef _FETCH_KERN_REG return (0); } static void vcpu_cleanup(struct vcpu *vcpu, bool destroy) { vmmops_vcpu_cleanup(vcpu->cookie); vcpu->cookie = NULL; if (destroy) { vmm_stat_free(vcpu->stats); fpu_save_area_free(vcpu->guestfpu); vcpu_lock_destroy(vcpu); } } static struct vcpu * vcpu_alloc(struct vm *vm, int vcpu_id) { struct vcpu *vcpu; KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, ("vcpu_alloc: invalid vcpu %d", vcpu_id)); vcpu = malloc(sizeof(*vcpu), M_VMM, M_WAITOK | M_ZERO); vcpu_lock_init(vcpu); vcpu->state = VCPU_IDLE; vcpu->hostcpu = NOCPU; vcpu->vcpuid = vcpu_id; vcpu->vm = vm; vcpu->guestfpu = fpu_save_area_alloc(); vcpu->stats = vmm_stat_alloc(); return (vcpu); } static void vcpu_init(struct vcpu *vcpu) { vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); MPASS(vcpu->cookie != NULL); fpu_save_area_reset(vcpu->guestfpu); vmm_stat_init(vcpu->stats); } struct vm_exit * vm_exitinfo(struct vcpu *vcpu) { return (&vcpu->exitinfo); } static int vmm_init(void) { int error; vm_maxcpu = mp_ncpus; TUNABLE_INT_FETCH("hw.vmm.maxcpu", &vm_maxcpu); if (vm_maxcpu > VM_MAXCPU) { printf("vmm: vm_maxcpu clamped to %u\n", VM_MAXCPU); vm_maxcpu = VM_MAXCPU; } if (vm_maxcpu == 0) vm_maxcpu = 1; error = vmm_regs_init(&vmm_arch_regs, &vmm_arch_regs_masks); if (error != 0) return (error); return (vmmops_modinit(0)); } static int vmm_handler(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: error = vmmdev_init(); if (error != 0) break; error = vmm_init(); if (error == 0) vmm_initialized = true; else (void)vmmdev_cleanup(); break; case MOD_UNLOAD: error = vmmdev_cleanup(); if (error == 0 && vmm_initialized) { error = vmmops_modcleanup(); if (error) { /* * Something bad happened - prevent new * VMs from being created */ vmm_initialized = false; } } break; default: error = 0; break; } return (error); } static moduledata_t vmm_kmod = { "vmm", vmm_handler, NULL }; /* * vmm initialization has the following dependencies: * * - HYP initialization requires smp_rendezvous() and therefore must happen * after SMP is fully functional (after SI_SUB_SMP). * - vmm device initialization requires an initialized devfs. */ DECLARE_MODULE(vmm, vmm_kmod, MAX(SI_SUB_SMP, SI_SUB_DEVFS) + 1, SI_ORDER_ANY); MODULE_VERSION(vmm, 1); static void vm_init(struct vm *vm, bool create) { int i; vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); MPASS(vm->cookie != NULL); CPU_ZERO(&vm->active_cpus); CPU_ZERO(&vm->debug_cpus); vm->suspend = 0; CPU_ZERO(&vm->suspended_cpus); memset(vm->mmio_region, 0, sizeof(vm->mmio_region)); memset(vm->special_reg, 0, sizeof(vm->special_reg)); if (!create) { for (i = 0; i < vm->maxcpus; i++) { if (vm->vcpu[i] != NULL) vcpu_init(vm->vcpu[i]); } } } void vm_disable_vcpu_creation(struct vm *vm) { sx_xlock(&vm->vcpus_init_lock); vm->dying = true; sx_xunlock(&vm->vcpus_init_lock); } struct vcpu * vm_alloc_vcpu(struct vm *vm, int vcpuid) { struct vcpu *vcpu; if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) return (NULL); /* Some interrupt controllers may have a CPU limit */ if (vcpuid >= vgic_max_cpu_count(vm->cookie)) return (NULL); vcpu = (struct vcpu *) atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); if (__predict_true(vcpu != NULL)) return (vcpu); sx_xlock(&vm->vcpus_init_lock); vcpu = vm->vcpu[vcpuid]; if (vcpu == NULL && !vm->dying) { vcpu = vcpu_alloc(vm, vcpuid); vcpu_init(vcpu); /* * Ensure vCPU is fully created before updating pointer * to permit unlocked reads above. */ atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], (uintptr_t)vcpu); } sx_xunlock(&vm->vcpus_init_lock); return (vcpu); } void vm_slock_vcpus(struct vm *vm) { sx_slock(&vm->vcpus_init_lock); } void vm_unlock_vcpus(struct vm *vm) { sx_unlock(&vm->vcpus_init_lock); } int vm_create(const char *name, struct vm **retvm) { struct vm *vm; struct vmspace *vmspace; /* * If vmm.ko could not be successfully initialized then don't attempt * to create the virtual machine. */ if (!vmm_initialized) return (ENXIO); if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) return (EINVAL); vmspace = vmmops_vmspace_alloc(0, 1ul << 39); if (vmspace == NULL) return (ENOMEM); vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO); strcpy(vm->name, name); vm->vmspace = vmspace; - sx_init(&vm->mem_segs_lock, "vm mem_segs"); + vm_mem_init(&vm->mem); sx_init(&vm->vcpus_init_lock, "vm vcpus"); vm->sockets = 1; vm->cores = 1; /* XXX backwards compatibility */ vm->threads = 1; /* XXX backwards compatibility */ vm->maxcpus = vm_maxcpu; vm->vcpu = malloc(sizeof(*vm->vcpu) * vm->maxcpus, M_VMM, M_WAITOK | M_ZERO); vm_init(vm, true); *retvm = vm; return (0); } void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus) { *sockets = vm->sockets; *cores = vm->cores; *threads = vm->threads; *maxcpus = vm->maxcpus; } uint16_t vm_get_maxcpus(struct vm *vm) { return (vm->maxcpus); } int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus) { /* Ignore maxcpus. */ if ((sockets * cores * threads) > vm->maxcpus) return (EINVAL); vm->sockets = sockets; vm->cores = cores; vm->threads = threads; return(0); } static void vm_cleanup(struct vm *vm, bool destroy) { - struct mem_map *mm; pmap_t pmap __diagused; int i; if (destroy) { + vm_xlock_memsegs(vm); pmap = vmspace_pmap(vm->vmspace); sched_pin(); PCPU_SET(curvmpmap, NULL); sched_unpin(); CPU_FOREACH(i) { MPASS(cpuid_to_pcpu[i]->pc_curvmpmap != pmap); } - } + } else + vm_assert_memseg_xlocked(vm); + vgic_detach_from_vm(vm->cookie); for (i = 0; i < vm->maxcpus; i++) { if (vm->vcpu[i] != NULL) vcpu_cleanup(vm->vcpu[i], destroy); } vmmops_cleanup(vm->cookie); - /* - * System memory is removed from the guest address space only when - * the VM is destroyed. This is because the mapping remains the same - * across VM reset. - * - * Device memory can be relocated by the guest (e.g. using PCI BARs) - * so those mappings are removed on a VM reset. - */ - if (!destroy) { - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (destroy || !sysmem_mapping(vm, mm)) - vm_free_memmap(vm, i); - } - } - + vm_mem_cleanup(vm); if (destroy) { - for (i = 0; i < VM_MAX_MEMSEGS; i++) - vm_free_memseg(vm, i); + vm_mem_destroy(vm); vmmops_vmspace_free(vm->vmspace); vm->vmspace = NULL; for (i = 0; i < vm->maxcpus; i++) free(vm->vcpu[i], M_VMM); free(vm->vcpu, M_VMM); sx_destroy(&vm->vcpus_init_lock); - sx_destroy(&vm->mem_segs_lock); } } void vm_destroy(struct vm *vm) { vm_cleanup(vm, true); free(vm, M_VMM); } int vm_reinit(struct vm *vm) { int error; /* * A virtual machine can be reset only if all vcpus are suspended. */ if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { vm_cleanup(vm, false); vm_init(vm, false); error = 0; } else { error = EBUSY; } return (error); } const char * vm_name(struct vm *vm) { return (vm->name); } -void -vm_slock_memsegs(struct vm *vm) -{ - sx_slock(&vm->mem_segs_lock); -} - -void -vm_xlock_memsegs(struct vm *vm) -{ - sx_xlock(&vm->mem_segs_lock); -} - -void -vm_unlock_memsegs(struct vm *vm) -{ - sx_unlock(&vm->mem_segs_lock); -} - -/* - * Return 'true' if 'gpa' is allocated in the guest address space. - * - * This function is called in the context of a running vcpu which acts as - * an implicit lock on 'vm->mem_maps[]'. - */ -bool -vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa) -{ - struct vm *vm = vcpu->vm; - struct mem_map *mm; - int i; - -#ifdef INVARIANTS - int hostcpu, state; - state = vcpu_get_state(vcpu, &hostcpu); - KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, - ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); -#endif - - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) - return (true); /* 'gpa' is sysmem or devmem */ - } - - return (false); -} - -int -vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) -{ - struct mem_seg *seg; - vm_object_t obj; - - sx_assert(&vm->mem_segs_lock, SX_XLOCKED); - - if (ident < 0 || ident >= VM_MAX_MEMSEGS) - return (EINVAL); - - if (len == 0 || (len & PAGE_MASK)) - return (EINVAL); - - seg = &vm->mem_segs[ident]; - if (seg->object != NULL) { - if (seg->len == len && seg->sysmem == sysmem) - return (EEXIST); - else - return (EINVAL); - } - - obj = vm_object_allocate(OBJT_DEFAULT, len >> PAGE_SHIFT); - if (obj == NULL) - return (ENOMEM); - - seg->len = len; - seg->object = obj; - seg->sysmem = sysmem; - return (0); -} - -int -vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, - vm_object_t *objptr) -{ - struct mem_seg *seg; - - sx_assert(&vm->mem_segs_lock, SX_LOCKED); - - if (ident < 0 || ident >= VM_MAX_MEMSEGS) - return (EINVAL); - - seg = &vm->mem_segs[ident]; - if (len) - *len = seg->len; - if (sysmem) - *sysmem = seg->sysmem; - if (objptr) - *objptr = seg->object; - return (0); -} - -void -vm_free_memseg(struct vm *vm, int ident) -{ - struct mem_seg *seg; - - KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, - ("%s: invalid memseg ident %d", __func__, ident)); - - seg = &vm->mem_segs[ident]; - if (seg->object != NULL) { - vm_object_deallocate(seg->object); - bzero(seg, sizeof(struct mem_seg)); - } -} - -int -vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, - size_t len, int prot, int flags) -{ - struct mem_seg *seg; - struct mem_map *m, *map; - vm_ooffset_t last; - int i, error; - - if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0) - return (EINVAL); - - if (flags & ~VM_MEMMAP_F_WIRED) - return (EINVAL); - - if (segid < 0 || segid >= VM_MAX_MEMSEGS) - return (EINVAL); - - seg = &vm->mem_segs[segid]; - if (seg->object == NULL) - return (EINVAL); - - last = first + len; - if (first < 0 || first >= last || last > seg->len) - return (EINVAL); - - if ((gpa | first | last) & PAGE_MASK) - return (EINVAL); - - map = NULL; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - m = &vm->mem_maps[i]; - if (m->len == 0) { - map = m; - break; - } - } - - if (map == NULL) - return (ENOSPC); - - error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa, - len, 0, VMFS_NO_SPACE, prot, prot, 0); - if (error != KERN_SUCCESS) - return (EFAULT); - - vm_object_reference(seg->object); - - if (flags & VM_MEMMAP_F_WIRED) { - error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len, - VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); - if (error != KERN_SUCCESS) { - vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len); - return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM : - EFAULT); - } - } - - map->gpa = gpa; - map->len = len; - map->segoff = first; - map->segid = segid; - map->prot = prot; - map->flags = flags; - return (0); -} - -int -vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) -{ - struct mem_map *m; - int i; - - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - m = &vm->mem_maps[i]; - if (m->gpa == gpa && m->len == len) { - vm_free_memmap(vm, i); - return (0); - } - } - - return (EINVAL); -} - -int -vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, - vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) -{ - struct mem_map *mm, *mmnext; - int i; - - mmnext = NULL; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (mm->len == 0 || mm->gpa < *gpa) - continue; - if (mmnext == NULL || mm->gpa < mmnext->gpa) - mmnext = mm; - } - - if (mmnext != NULL) { - *gpa = mmnext->gpa; - if (segid) - *segid = mmnext->segid; - if (segoff) - *segoff = mmnext->segoff; - if (len) - *len = mmnext->len; - if (prot) - *prot = mmnext->prot; - if (flags) - *flags = mmnext->flags; - return (0); - } else { - return (ENOENT); - } -} - -static void -vm_free_memmap(struct vm *vm, int ident) -{ - struct mem_map *mm; - int error __diagused; - - mm = &vm->mem_maps[ident]; - if (mm->len) { - error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa, - mm->gpa + mm->len); - KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d", - __func__, error)); - bzero(mm, sizeof(struct mem_map)); - } -} - -static __inline bool -sysmem_mapping(struct vm *vm, struct mem_map *mm) -{ - - if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) - return (true); - else - return (false); -} - -vm_paddr_t -vmm_sysmem_maxaddr(struct vm *vm) -{ - struct mem_map *mm; - vm_paddr_t maxaddr; - int i; - - maxaddr = 0; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (sysmem_mapping(vm, mm)) { - if (maxaddr < mm->gpa + mm->len) - maxaddr = mm->gpa + mm->len; - } - } - return (maxaddr); -} - int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, uint64_t gla, int prot, uint64_t *gpa, int *is_fault) { - - vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault); - return (0); + return (vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault)); } static int vmm_reg_raz(struct vcpu *vcpu, uint64_t *rval, void *arg) { *rval = 0; return (0); } static int vmm_reg_read_arg(struct vcpu *vcpu, uint64_t *rval, void *arg) { *rval = *(uint64_t *)arg; return (0); } static int vmm_reg_wi(struct vcpu *vcpu, uint64_t wval, void *arg) { return (0); } static const struct vmm_special_reg vmm_special_regs[] = { #define SPECIAL_REG(_reg, _read, _write) \ { \ .esr_iss = ((_reg ## _op0) << ISS_MSR_OP0_SHIFT) | \ ((_reg ## _op1) << ISS_MSR_OP1_SHIFT) | \ ((_reg ## _CRn) << ISS_MSR_CRn_SHIFT) | \ ((_reg ## _CRm) << ISS_MSR_CRm_SHIFT) | \ ((_reg ## _op2) << ISS_MSR_OP2_SHIFT), \ .esr_mask = ISS_MSR_REG_MASK, \ .reg_read = (_read), \ .reg_write = (_write), \ .arg = NULL, \ } #define ID_SPECIAL_REG(_reg, _name) \ { \ .esr_iss = ((_reg ## _op0) << ISS_MSR_OP0_SHIFT) | \ ((_reg ## _op1) << ISS_MSR_OP1_SHIFT) | \ ((_reg ## _CRn) << ISS_MSR_CRn_SHIFT) | \ ((_reg ## _CRm) << ISS_MSR_CRm_SHIFT) | \ ((_reg ## _op2) << ISS_MSR_OP2_SHIFT), \ .esr_mask = ISS_MSR_REG_MASK, \ .reg_read = vmm_reg_read_arg, \ .reg_write = vmm_reg_wi, \ .arg = &(vmm_arch_regs._name), \ } /* ID registers */ ID_SPECIAL_REG(ID_AA64PFR0_EL1, id_aa64pfr0), ID_SPECIAL_REG(ID_AA64DFR0_EL1, id_aa64dfr0), ID_SPECIAL_REG(ID_AA64ISAR0_EL1, id_aa64isar0), ID_SPECIAL_REG(ID_AA64MMFR0_EL1, id_aa64mmfr0), ID_SPECIAL_REG(ID_AA64MMFR1_EL1, id_aa64mmfr1), /* * All other ID registers are read as zero. * They are all in the op0=3, op1=0, CRn=0, CRm={0..7} space. */ { .esr_iss = (3 << ISS_MSR_OP0_SHIFT) | (0 << ISS_MSR_OP1_SHIFT) | (0 << ISS_MSR_CRn_SHIFT) | (0 << ISS_MSR_CRm_SHIFT), .esr_mask = ISS_MSR_OP0_MASK | ISS_MSR_OP1_MASK | ISS_MSR_CRn_MASK | (0x8 << ISS_MSR_CRm_SHIFT), .reg_read = vmm_reg_raz, .reg_write = vmm_reg_wi, .arg = NULL, }, /* Counter physical registers */ SPECIAL_REG(CNTP_CTL_EL0, vtimer_phys_ctl_read, vtimer_phys_ctl_write), SPECIAL_REG(CNTP_CVAL_EL0, vtimer_phys_cval_read, vtimer_phys_cval_write), SPECIAL_REG(CNTP_TVAL_EL0, vtimer_phys_tval_read, vtimer_phys_tval_write), SPECIAL_REG(CNTPCT_EL0, vtimer_phys_cnt_read, vtimer_phys_cnt_write), #undef SPECIAL_REG }; void vm_register_reg_handler(struct vm *vm, uint64_t iss, uint64_t mask, reg_read_t reg_read, reg_write_t reg_write, void *arg) { int i; for (i = 0; i < nitems(vm->special_reg); i++) { if (vm->special_reg[i].esr_iss == 0 && vm->special_reg[i].esr_mask == 0) { vm->special_reg[i].esr_iss = iss; vm->special_reg[i].esr_mask = mask; vm->special_reg[i].reg_read = reg_read; vm->special_reg[i].reg_write = reg_write; vm->special_reg[i].arg = arg; return; } } panic("%s: No free special register slot", __func__); } void vm_deregister_reg_handler(struct vm *vm, uint64_t iss, uint64_t mask) { int i; for (i = 0; i < nitems(vm->special_reg); i++) { if (vm->special_reg[i].esr_iss == iss && vm->special_reg[i].esr_mask == mask) { memset(&vm->special_reg[i], 0, sizeof(vm->special_reg[i])); return; } } panic("%s: Invalid special register: iss %lx mask %lx", __func__, iss, mask); } static int vm_handle_reg_emul(struct vcpu *vcpu, bool *retu) { struct vm *vm; struct vm_exit *vme; struct vre *vre; int i, rv; vm = vcpu->vm; vme = &vcpu->exitinfo; vre = &vme->u.reg_emul.vre; for (i = 0; i < nitems(vm->special_reg); i++) { if (vm->special_reg[i].esr_iss == 0 && vm->special_reg[i].esr_mask == 0) continue; if ((vre->inst_syndrome & vm->special_reg[i].esr_mask) == vm->special_reg[i].esr_iss) { rv = vmm_emulate_register(vcpu, vre, vm->special_reg[i].reg_read, vm->special_reg[i].reg_write, vm->special_reg[i].arg); if (rv == 0) { *retu = false; } return (rv); } } for (i = 0; i < nitems(vmm_special_regs); i++) { if ((vre->inst_syndrome & vmm_special_regs[i].esr_mask) == vmm_special_regs[i].esr_iss) { rv = vmm_emulate_register(vcpu, vre, vmm_special_regs[i].reg_read, vmm_special_regs[i].reg_write, vmm_special_regs[i].arg); if (rv == 0) { *retu = false; } return (rv); } } *retu = true; return (0); } void vm_register_inst_handler(struct vm *vm, uint64_t start, uint64_t size, mem_region_read_t mmio_read, mem_region_write_t mmio_write) { int i; for (i = 0; i < nitems(vm->mmio_region); i++) { if (vm->mmio_region[i].start == 0 && vm->mmio_region[i].end == 0) { vm->mmio_region[i].start = start; vm->mmio_region[i].end = start + size; vm->mmio_region[i].read = mmio_read; vm->mmio_region[i].write = mmio_write; return; } } panic("%s: No free MMIO region", __func__); } void vm_deregister_inst_handler(struct vm *vm, uint64_t start, uint64_t size) { int i; for (i = 0; i < nitems(vm->mmio_region); i++) { if (vm->mmio_region[i].start == start && vm->mmio_region[i].end == start + size) { memset(&vm->mmio_region[i], 0, sizeof(vm->mmio_region[i])); return; } } panic("%s: Invalid MMIO region: %lx - %lx", __func__, start, start + size); } static int vm_handle_inst_emul(struct vcpu *vcpu, bool *retu) { struct vm *vm; struct vm_exit *vme; struct vie *vie; struct hyp *hyp; uint64_t fault_ipa; struct vm_guest_paging *paging; struct vmm_mmio_region *vmr; int error, i; vm = vcpu->vm; hyp = vm->cookie; if (!hyp->vgic_attached) goto out_user; vme = &vcpu->exitinfo; vie = &vme->u.inst_emul.vie; paging = &vme->u.inst_emul.paging; fault_ipa = vme->u.inst_emul.gpa; vmr = NULL; for (i = 0; i < nitems(vm->mmio_region); i++) { if (vm->mmio_region[i].start <= fault_ipa && vm->mmio_region[i].end > fault_ipa) { vmr = &vm->mmio_region[i]; break; } } if (vmr == NULL) goto out_user; error = vmm_emulate_instruction(vcpu, fault_ipa, vie, paging, vmr->read, vmr->write, retu); return (error); out_user: *retu = true; return (0); } int vm_suspend(struct vm *vm, enum vm_suspend_how how) { int i; if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) return (EINVAL); if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { VM_CTR2(vm, "virtual machine already suspended %d/%d", vm->suspend, how); return (EALREADY); } VM_CTR1(vm, "virtual machine successfully suspended %d", how); /* * Notify all active vcpus that they are now suspended. */ for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) vcpu_notify_event(vm_vcpu(vm, i)); } return (0); } void vm_exit_suspended(struct vcpu *vcpu, uint64_t pc) { struct vm *vm = vcpu->vm; struct vm_exit *vmexit; KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); vmexit = vm_exitinfo(vcpu); vmexit->pc = pc; vmexit->inst_length = 4; vmexit->exitcode = VM_EXITCODE_SUSPENDED; vmexit->u.suspended.how = vm->suspend; } void vm_exit_debug(struct vcpu *vcpu, uint64_t pc) { struct vm_exit *vmexit; vmexit = vm_exitinfo(vcpu); vmexit->pc = pc; vmexit->inst_length = 4; vmexit->exitcode = VM_EXITCODE_DEBUG; } int vm_activate_cpu(struct vcpu *vcpu) { struct vm *vm = vcpu->vm; if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) return (EBUSY); CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); return (0); } int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) { if (vcpu == NULL) { vm->debug_cpus = vm->active_cpus; for (int i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) vcpu_notify_event(vm_vcpu(vm, i)); } } else { if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) return (EINVAL); CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); vcpu_notify_event(vcpu); } return (0); } int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) { if (vcpu == NULL) { CPU_ZERO(&vm->debug_cpus); } else { if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) return (EINVAL); CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); } return (0); } int vcpu_debugged(struct vcpu *vcpu) { return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); } cpuset_t vm_active_cpus(struct vm *vm) { return (vm->active_cpus); } cpuset_t vm_debug_cpus(struct vm *vm) { return (vm->debug_cpus); } cpuset_t vm_suspended_cpus(struct vm *vm) { return (vm->suspended_cpus); } void * vcpu_stats(struct vcpu *vcpu) { return (vcpu->stats); } /* * This function is called to ensure that a vcpu "sees" a pending event * as soon as possible: * - If the vcpu thread is sleeping then it is woken up. * - If the vcpu is running on a different host_cpu then an IPI will be directed * to the host_cpu to cause the vcpu to trap into the hypervisor. */ static void vcpu_notify_event_locked(struct vcpu *vcpu) { int hostcpu; hostcpu = vcpu->hostcpu; if (vcpu->state == VCPU_RUNNING) { KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); if (hostcpu != curcpu) { ipi_cpu(hostcpu, vmm_ipinum); } else { /* * If the 'vcpu' is running on 'curcpu' then it must * be sending a notification to itself (e.g. SELF_IPI). * The pending event will be picked up when the vcpu * transitions back to guest context. */ } } else { KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " "with hostcpu %d", vcpu->state, hostcpu)); if (vcpu->state == VCPU_SLEEPING) wakeup_one(vcpu); } } void vcpu_notify_event(struct vcpu *vcpu) { vcpu_lock(vcpu); vcpu_notify_event_locked(vcpu); vcpu_unlock(vcpu); } +struct vmspace * +vm_vmspace(struct vm *vm) +{ + return (vm->vmspace); +} + +struct vm_mem * +vm_mem(struct vm *vm) +{ + return (&vm->mem); +} + static void restore_guest_fpustate(struct vcpu *vcpu) { /* flush host state to the pcb */ vfp_save_state(curthread, curthread->td_pcb); /* Ensure the VFP state will be re-loaded when exiting the guest */ PCPU_SET(fpcurthread, NULL); /* restore guest FPU state */ vfp_enable(); vfp_restore(vcpu->guestfpu); /* * The FPU is now "dirty" with the guest's state so turn on emulation * to trap any access to the FPU by the host. */ vfp_disable(); } static void save_guest_fpustate(struct vcpu *vcpu) { if ((READ_SPECIALREG(cpacr_el1) & CPACR_FPEN_MASK) != CPACR_FPEN_TRAP_ALL1) panic("VFP not enabled in host!"); /* save guest FPU state */ vfp_enable(); vfp_store(vcpu->guestfpu); vfp_disable(); KASSERT(PCPU_GET(fpcurthread) == NULL, ("%s: fpcurthread set with guest registers", __func__)); } static int vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) { int error; vcpu_assert_locked(vcpu); /* * State transitions from the vmmdev_ioctl() must always begin from * the VCPU_IDLE state. This guarantees that there is only a single * ioctl() operating on a vcpu at any point. */ if (from_idle) { while (vcpu->state != VCPU_IDLE) { vcpu_notify_event_locked(vcpu); msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); } } else { KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " "vcpu idle state")); } if (vcpu->state == VCPU_RUNNING) { KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " "mismatch for running vcpu", curcpu, vcpu->hostcpu)); } else { KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " "vcpu that is not running", vcpu->hostcpu)); } /* * The following state transitions are allowed: * IDLE -> FROZEN -> IDLE * FROZEN -> RUNNING -> FROZEN * FROZEN -> SLEEPING -> FROZEN */ switch (vcpu->state) { case VCPU_IDLE: case VCPU_RUNNING: case VCPU_SLEEPING: error = (newstate != VCPU_FROZEN); break; case VCPU_FROZEN: error = (newstate == VCPU_FROZEN); break; default: error = 1; break; } if (error) return (EBUSY); vcpu->state = newstate; if (newstate == VCPU_RUNNING) vcpu->hostcpu = curcpu; else vcpu->hostcpu = NOCPU; if (newstate == VCPU_IDLE) wakeup(&vcpu->state); return (0); } static void vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate) { int error; if ((error = vcpu_set_state(vcpu, newstate, false)) != 0) panic("Error %d setting state to %d\n", error, newstate); } static void vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) { int error; if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) panic("Error %d setting state to %d", error, newstate); } int vm_get_capability(struct vcpu *vcpu, int type, int *retval) { if (type < 0 || type >= VM_CAP_MAX) return (EINVAL); return (vmmops_getcap(vcpu->cookie, type, retval)); } int vm_set_capability(struct vcpu *vcpu, int type, int val) { if (type < 0 || type >= VM_CAP_MAX) return (EINVAL); return (vmmops_setcap(vcpu->cookie, type, val)); } struct vm * vcpu_vm(struct vcpu *vcpu) { return (vcpu->vm); } int vcpu_vcpuid(struct vcpu *vcpu) { return (vcpu->vcpuid); } void * vcpu_get_cookie(struct vcpu *vcpu) { return (vcpu->cookie); } struct vcpu * vm_vcpu(struct vm *vm, int vcpuid) { return (vm->vcpu[vcpuid]); } int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) { int error; vcpu_lock(vcpu); error = vcpu_set_state_locked(vcpu, newstate, from_idle); vcpu_unlock(vcpu); return (error); } enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu) { enum vcpu_state state; vcpu_lock(vcpu); state = vcpu->state; if (hostcpu != NULL) *hostcpu = vcpu->hostcpu; vcpu_unlock(vcpu); return (state); } -static void * -_vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, - void **cookie) -{ - int i, count, pageoff; - struct mem_map *mm; - vm_page_t m; - - pageoff = gpa & PAGE_MASK; - if (len > PAGE_SIZE - pageoff) - panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); - - count = 0; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (sysmem_mapping(vm, mm) && gpa >= mm->gpa && - gpa < mm->gpa + mm->len) { - count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, - trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); - break; - } - } - - if (count == 1) { - *cookie = m; - return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); - } else { - *cookie = NULL; - return (NULL); - } -} - -void * -vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot, - void **cookie) -{ -#ifdef INVARIANTS - /* - * The current vcpu should be frozen to ensure 'vm_memmap[]' - * stability. - */ - int state = vcpu_get_state(vcpu, NULL); - KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", - __func__, state)); -#endif - return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie)); -} - -void * -vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, - void **cookie) -{ - sx_assert(&vm->mem_segs_lock, SX_LOCKED); - return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie)); -} - -void -vm_gpa_release(void *cookie) -{ - vm_page_t m = cookie; - - vm_page_unwire(m, PQ_ACTIVE); -} - int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval) { if (reg >= VM_REG_LAST) return (EINVAL); return (vmmops_getreg(vcpu->cookie, reg, retval)); } int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) { int error; if (reg >= VM_REG_LAST) return (EINVAL); error = vmmops_setreg(vcpu->cookie, reg, val); if (error || reg != VM_REG_GUEST_PC) return (error); vcpu->nextpc = val; return (0); } void * vm_get_cookie(struct vm *vm) { return (vm->cookie); } int vm_inject_exception(struct vcpu *vcpu, uint64_t esr, uint64_t far) { return (vmmops_exception(vcpu->cookie, esr, far)); } int vm_attach_vgic(struct vm *vm, struct vm_vgic_descr *descr) { return (vgic_attach_to_vm(vm->cookie, descr)); } int vm_assert_irq(struct vm *vm, uint32_t irq) { return (vgic_inject_irq(vm->cookie, -1, irq, true)); } int vm_deassert_irq(struct vm *vm, uint32_t irq) { return (vgic_inject_irq(vm->cookie, -1, irq, false)); } int vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot, int func) { /* TODO: Should we raise an SError? */ return (vgic_inject_msi(vm->cookie, msg, addr)); } static int vm_handle_smccc_call(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) { struct hypctx *hypctx; int i; hypctx = vcpu_get_cookie(vcpu); if ((hypctx->tf.tf_esr & ESR_ELx_ISS_MASK) != 0) return (1); vme->exitcode = VM_EXITCODE_SMCCC; vme->u.smccc_call.func_id = hypctx->tf.tf_x[0]; for (i = 0; i < nitems(vme->u.smccc_call.args); i++) vme->u.smccc_call.args[i] = hypctx->tf.tf_x[i + 1]; *retu = true; return (0); } static int vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) { vcpu_lock(vcpu); while (1) { if (vgic_has_pending_irq(vcpu->cookie)) break; if (vcpu_should_yield(vcpu)) break; vcpu_require_state_locked(vcpu, VCPU_SLEEPING); /* * XXX msleep_spin() cannot be interrupted by signals so * wake up periodically to check pending signals. */ msleep_spin(vcpu, &vcpu->mtx, "vmidle", hz); vcpu_require_state_locked(vcpu, VCPU_FROZEN); } vcpu_unlock(vcpu); *retu = false; return (0); } static int vm_handle_paging(struct vcpu *vcpu, bool *retu) { struct vm *vm = vcpu->vm; struct vm_exit *vme; struct vm_map *map; uint64_t addr, esr; pmap_t pmap; int ftype, rv; vme = &vcpu->exitinfo; pmap = vmspace_pmap(vcpu->vm->vmspace); addr = vme->u.paging.gpa; esr = vme->u.paging.esr; /* The page exists, but the page table needs to be updated. */ if (pmap_fault(pmap, esr, addr) == KERN_SUCCESS) return (0); switch (ESR_ELx_EXCEPTION(esr)) { case EXCP_INSN_ABORT_L: case EXCP_DATA_ABORT_L: ftype = VM_PROT_EXECUTE | VM_PROT_READ | VM_PROT_WRITE; break; default: panic("%s: Invalid exception (esr = %lx)", __func__, esr); } map = &vm->vmspace->vm_map; rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL); if (rv != KERN_SUCCESS) return (EFAULT); return (0); } static int vm_handle_suspend(struct vcpu *vcpu, bool *retu) { struct vm *vm = vcpu->vm; int error, i; struct thread *td; error = 0; td = curthread; CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); /* * Wait until all 'active_cpus' have suspended themselves. * * Since a VM may be suspended at any time including when one or * more vcpus are doing a rendezvous we need to call the rendezvous * handler while we are waiting to prevent a deadlock. */ vcpu_lock(vcpu); while (error == 0) { if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) break; vcpu_require_state_locked(vcpu, VCPU_SLEEPING); msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); vcpu_require_state_locked(vcpu, VCPU_FROZEN); if (td_ast_pending(td, TDA_SUSPEND)) { vcpu_unlock(vcpu); error = thread_check_susp(td, false); vcpu_lock(vcpu); } } vcpu_unlock(vcpu); /* * Wakeup the other sleeping vcpus and return to userspace. */ for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->suspended_cpus)) { vcpu_notify_event(vm_vcpu(vm, i)); } } *retu = true; return (error); } int vm_run(struct vcpu *vcpu) { struct vm *vm = vcpu->vm; struct vm_eventinfo evinfo; int error, vcpuid; struct vm_exit *vme; bool retu; pmap_t pmap; vcpuid = vcpu->vcpuid; if (!CPU_ISSET(vcpuid, &vm->active_cpus)) return (EINVAL); if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) return (EINVAL); pmap = vmspace_pmap(vm->vmspace); vme = &vcpu->exitinfo; evinfo.rptr = NULL; evinfo.sptr = &vm->suspend; evinfo.iptr = NULL; restart: critical_enter(); restore_guest_fpustate(vcpu); vcpu_require_state(vcpu, VCPU_RUNNING); error = vmmops_run(vcpu->cookie, vcpu->nextpc, pmap, &evinfo); vcpu_require_state(vcpu, VCPU_FROZEN); save_guest_fpustate(vcpu); critical_exit(); if (error == 0) { retu = false; switch (vme->exitcode) { case VM_EXITCODE_INST_EMUL: vcpu->nextpc = vme->pc + vme->inst_length; error = vm_handle_inst_emul(vcpu, &retu); break; case VM_EXITCODE_REG_EMUL: vcpu->nextpc = vme->pc + vme->inst_length; error = vm_handle_reg_emul(vcpu, &retu); break; case VM_EXITCODE_HVC: /* * The HVC instruction saves the address for the * next instruction as the return address. */ vcpu->nextpc = vme->pc; /* * The PSCI call can change the exit information in the * case of suspend/reset/poweroff/cpu off/cpu on. */ error = vm_handle_smccc_call(vcpu, vme, &retu); break; case VM_EXITCODE_WFI: vcpu->nextpc = vme->pc + vme->inst_length; error = vm_handle_wfi(vcpu, vme, &retu); break; case VM_EXITCODE_PAGING: vcpu->nextpc = vme->pc; error = vm_handle_paging(vcpu, &retu); break; case VM_EXITCODE_SUSPENDED: vcpu->nextpc = vme->pc; error = vm_handle_suspend(vcpu, &retu); break; default: /* Handle in userland */ vcpu->nextpc = vme->pc; retu = true; break; } } if (error == 0 && retu == false) goto restart; return (error); } diff --git a/sys/arm64/vmm/vmm_arm64.c b/sys/arm64/vmm/vmm_arm64.c index 43b2ba7802d7..de2425aae0a1 100644 --- a/sys/arm64/vmm/vmm_arm64.c +++ b/sys/arm64/vmm/vmm_arm64.c @@ -1,1408 +1,1410 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (C) 2015 Mihai Carabas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include + #include "mmu.h" #include "arm64.h" #include "hyp.h" #include "reset.h" #include "io/vgic.h" #include "io/vgic_v3.h" #include "io/vtimer.h" #include "vmm_handlers.h" #include "vmm_stat.h" #define HANDLED 1 #define UNHANDLED 0 /* Number of bits in an EL2 virtual address */ #define EL2_VIRT_BITS 48 CTASSERT((1ul << EL2_VIRT_BITS) >= HYP_VM_MAX_ADDRESS); /* TODO: Move the host hypctx off the stack */ #define VMM_STACK_PAGES 4 #define VMM_STACK_SIZE (VMM_STACK_PAGES * PAGE_SIZE) static int vmm_pmap_levels, vmm_virt_bits, vmm_max_ipa_bits; /* Register values passed to arm_setup_vectors to set in the hypervisor */ struct vmm_init_regs { uint64_t tcr_el2; uint64_t vtcr_el2; }; MALLOC_DEFINE(M_HYP, "ARM VMM HYP", "ARM VMM HYP"); extern char hyp_init_vectors[]; extern char hyp_vectors[]; extern char hyp_stub_vectors[]; static vm_paddr_t hyp_code_base; static size_t hyp_code_len; static char *stack[MAXCPU]; static vm_offset_t stack_hyp_va[MAXCPU]; static vmem_t *el2_mem_alloc; static void arm_setup_vectors(void *arg); DPCPU_DEFINE_STATIC(struct hypctx *, vcpu); static inline void arm64_set_active_vcpu(struct hypctx *hypctx) { DPCPU_SET(vcpu, hypctx); } struct hypctx * arm64_get_active_vcpu(void) { return (DPCPU_GET(vcpu)); } static void arm_setup_vectors(void *arg) { struct vmm_init_regs *el2_regs; uintptr_t stack_top; uint32_t sctlr_el2; register_t daif; el2_regs = arg; arm64_set_active_vcpu(NULL); /* * Configure the system control register for EL2: * * SCTLR_EL2_M: MMU on * SCTLR_EL2_C: Data cacheability not affected * SCTLR_EL2_I: Instruction cacheability not affected * SCTLR_EL2_A: Instruction alignment check * SCTLR_EL2_SA: Stack pointer alignment check * SCTLR_EL2_WXN: Treat writable memory as execute never * ~SCTLR_EL2_EE: Data accesses are little-endian */ sctlr_el2 = SCTLR_EL2_RES1; sctlr_el2 |= SCTLR_EL2_M | SCTLR_EL2_C | SCTLR_EL2_I; sctlr_el2 |= SCTLR_EL2_A | SCTLR_EL2_SA; sctlr_el2 |= SCTLR_EL2_WXN; sctlr_el2 &= ~SCTLR_EL2_EE; daif = intr_disable(); if (in_vhe()) { WRITE_SPECIALREG(vtcr_el2, el2_regs->vtcr_el2); } else { /* * Install the temporary vectors which will be responsible for * initializing the VMM when we next trap into EL2. * * x0: the exception vector table responsible for hypervisor * initialization on the next call. */ vmm_call_hyp(vtophys(&vmm_hyp_code)); /* Create and map the hypervisor stack */ stack_top = stack_hyp_va[PCPU_GET(cpuid)] + VMM_STACK_SIZE; /* Special call to initialize EL2 */ vmm_call_hyp(vmmpmap_to_ttbr0(), stack_top, el2_regs->tcr_el2, sctlr_el2, el2_regs->vtcr_el2); } intr_restore(daif); } static void arm_teardown_vectors(void *arg) { register_t daif; /* * vmm_cleanup() will disable the MMU. For the next few instructions, * before the hardware disables the MMU, one of the following is * possible: * * a. The instruction addresses are fetched with the MMU disabled, * and they must represent the actual physical addresses. This will work * because we call the vmm_cleanup() function by its physical address. * * b. The instruction addresses are fetched using the old translation * tables. This will work because we have an identity mapping in place * in the translation tables and vmm_cleanup() is called by its physical * address. */ daif = intr_disable(); /* TODO: Invalidate the cache */ vmm_call_hyp(HYP_CLEANUP, vtophys(hyp_stub_vectors)); intr_restore(daif); arm64_set_active_vcpu(NULL); } static uint64_t vmm_vtcr_el2_sl(u_int levels) { #if PAGE_SIZE == PAGE_SIZE_4K switch (levels) { case 2: return (VTCR_EL2_SL0_4K_LVL2); case 3: return (VTCR_EL2_SL0_4K_LVL1); case 4: return (VTCR_EL2_SL0_4K_LVL0); default: panic("%s: Invalid number of page table levels %u", __func__, levels); } #elif PAGE_SIZE == PAGE_SIZE_16K switch (levels) { case 2: return (VTCR_EL2_SL0_16K_LVL2); case 3: return (VTCR_EL2_SL0_16K_LVL1); case 4: return (VTCR_EL2_SL0_16K_LVL0); default: panic("%s: Invalid number of page table levels %u", __func__, levels); } #else #error Unsupported page size #endif } int vmmops_modinit(int ipinum) { struct vmm_init_regs el2_regs; vm_offset_t next_hyp_va; vm_paddr_t vmm_base; uint64_t id_aa64mmfr0_el1, pa_range_bits, pa_range_field; uint64_t cnthctl_el2; int cpu, i; bool rv __diagused; if (!has_hyp()) { printf( "vmm: Processor doesn't have support for virtualization\n"); return (ENXIO); } if (!vgic_present()) { printf("vmm: No vgic found\n"); return (ENODEV); } if (!get_kernel_reg(ID_AA64MMFR0_EL1, &id_aa64mmfr0_el1)) { printf("vmm: Unable to read ID_AA64MMFR0_EL1\n"); return (ENXIO); } pa_range_field = ID_AA64MMFR0_PARange_VAL(id_aa64mmfr0_el1); /* * Use 3 levels to give us up to 39 bits with 4k pages, or * 47 bits with 16k pages. */ /* TODO: Check the number of levels for 64k pages */ vmm_pmap_levels = 3; switch (pa_range_field) { case ID_AA64MMFR0_PARange_4G: printf("vmm: Not enough physical address bits\n"); return (ENXIO); case ID_AA64MMFR0_PARange_64G: vmm_virt_bits = 36; #if PAGE_SIZE == PAGE_SIZE_16K vmm_pmap_levels = 2; #endif break; default: vmm_virt_bits = 39; break; } pa_range_bits = pa_range_field >> ID_AA64MMFR0_PARange_SHIFT; if (!in_vhe()) { /* Initialise the EL2 MMU */ if (!vmmpmap_init()) { printf("vmm: Failed to init the EL2 MMU\n"); return (ENOMEM); } } /* Set up the stage 2 pmap callbacks */ MPASS(pmap_clean_stage2_tlbi == NULL); pmap_clean_stage2_tlbi = vmm_clean_s2_tlbi; pmap_stage2_invalidate_range = vmm_s2_tlbi_range; pmap_stage2_invalidate_all = vmm_s2_tlbi_all; if (!in_vhe()) { /* * Create an allocator for the virtual address space used by * EL2. EL2 code is identity-mapped; the allocator is used to * find space for VM structures. */ el2_mem_alloc = vmem_create("VMM EL2", 0, 0, PAGE_SIZE, 0, M_WAITOK); /* Create the mappings for the hypervisor translation table. */ hyp_code_len = round_page(&vmm_hyp_code_end - &vmm_hyp_code); /* We need an physical identity mapping for when we activate the MMU */ hyp_code_base = vmm_base = vtophys(&vmm_hyp_code); rv = vmmpmap_enter(vmm_base, hyp_code_len, vmm_base, VM_PROT_READ | VM_PROT_EXECUTE); MPASS(rv); next_hyp_va = roundup2(vmm_base + hyp_code_len, L2_SIZE); /* Create a per-CPU hypervisor stack */ CPU_FOREACH(cpu) { stack[cpu] = malloc(VMM_STACK_SIZE, M_HYP, M_WAITOK | M_ZERO); stack_hyp_va[cpu] = next_hyp_va; for (i = 0; i < VMM_STACK_PAGES; i++) { rv = vmmpmap_enter(stack_hyp_va[cpu] + ptoa(i), PAGE_SIZE, vtophys(stack[cpu] + ptoa(i)), VM_PROT_READ | VM_PROT_WRITE); MPASS(rv); } next_hyp_va += L2_SIZE; } el2_regs.tcr_el2 = TCR_EL2_RES1; el2_regs.tcr_el2 |= min(pa_range_bits << TCR_EL2_PS_SHIFT, TCR_EL2_PS_52BITS); el2_regs.tcr_el2 |= TCR_EL2_T0SZ(64 - EL2_VIRT_BITS); el2_regs.tcr_el2 |= TCR_EL2_IRGN0_WBWA | TCR_EL2_ORGN0_WBWA; #if PAGE_SIZE == PAGE_SIZE_4K el2_regs.tcr_el2 |= TCR_EL2_TG0_4K; #elif PAGE_SIZE == PAGE_SIZE_16K el2_regs.tcr_el2 |= TCR_EL2_TG0_16K; #else #error Unsupported page size #endif #ifdef SMP el2_regs.tcr_el2 |= TCR_EL2_SH0_IS; #endif } switch (pa_range_bits << TCR_EL2_PS_SHIFT) { case TCR_EL2_PS_32BITS: vmm_max_ipa_bits = 32; break; case TCR_EL2_PS_36BITS: vmm_max_ipa_bits = 36; break; case TCR_EL2_PS_40BITS: vmm_max_ipa_bits = 40; break; case TCR_EL2_PS_42BITS: vmm_max_ipa_bits = 42; break; case TCR_EL2_PS_44BITS: vmm_max_ipa_bits = 44; break; case TCR_EL2_PS_48BITS: vmm_max_ipa_bits = 48; break; case TCR_EL2_PS_52BITS: default: vmm_max_ipa_bits = 52; break; } /* * Configure the Stage 2 translation control register: * * VTCR_IRGN0_WBWA: Translation table walks access inner cacheable * normal memory * VTCR_ORGN0_WBWA: Translation table walks access outer cacheable * normal memory * VTCR_EL2_TG0_4K/16K: Stage 2 uses the same page size as the kernel * VTCR_EL2_SL0_4K_LVL1: Stage 2 uses concatenated level 1 tables * VTCR_EL2_SH0_IS: Memory associated with Stage 2 walks is inner * shareable */ el2_regs.vtcr_el2 = VTCR_EL2_RES1; el2_regs.vtcr_el2 |= VTCR_EL2_IRGN0_WBWA | VTCR_EL2_ORGN0_WBWA; el2_regs.vtcr_el2 |= VTCR_EL2_T0SZ(64 - vmm_virt_bits); el2_regs.vtcr_el2 |= vmm_vtcr_el2_sl(vmm_pmap_levels); #if PAGE_SIZE == PAGE_SIZE_4K el2_regs.vtcr_el2 |= VTCR_EL2_TG0_4K; #elif PAGE_SIZE == PAGE_SIZE_16K el2_regs.vtcr_el2 |= VTCR_EL2_TG0_16K; #else #error Unsupported page size #endif #ifdef SMP el2_regs.vtcr_el2 |= VTCR_EL2_SH0_IS; #endif /* * If FEAT_LPA2 is enabled in the host then we need to enable it here * so the page tables created by pmap.c are correct. The meaning of * the shareability field changes to become address bits when this * is set. */ if ((READ_SPECIALREG(tcr_el1) & TCR_DS) != 0) { el2_regs.vtcr_el2 |= VTCR_EL2_DS; el2_regs.vtcr_el2 |= min(pa_range_bits << VTCR_EL2_PS_SHIFT, VTCR_EL2_PS_52BIT); } else { el2_regs.vtcr_el2 |= min(pa_range_bits << VTCR_EL2_PS_SHIFT, VTCR_EL2_PS_48BIT); } smp_rendezvous(NULL, arm_setup_vectors, NULL, &el2_regs); if (!in_vhe()) { /* Add memory to the vmem allocator (checking there is space) */ if (vmm_base > (L2_SIZE + PAGE_SIZE)) { /* * Ensure there is an L2 block before the vmm code to check * for buffer overflows on earlier data. Include the PAGE_SIZE * of the minimum we can allocate. */ vmm_base -= L2_SIZE + PAGE_SIZE; vmm_base = rounddown2(vmm_base, L2_SIZE); /* * Check there is memory before the vmm code to add. * * Reserve the L2 block at address 0 so NULL dereference will * raise an exception. */ if (vmm_base > L2_SIZE) vmem_add(el2_mem_alloc, L2_SIZE, vmm_base - L2_SIZE, M_WAITOK); } /* * Add the memory after the stacks. There is most of an L2 block * between the last stack and the first allocation so this should * be safe without adding more padding. */ if (next_hyp_va < HYP_VM_MAX_ADDRESS - PAGE_SIZE) vmem_add(el2_mem_alloc, next_hyp_va, HYP_VM_MAX_ADDRESS - next_hyp_va, M_WAITOK); } cnthctl_el2 = vmm_read_reg(HYP_REG_CNTHCTL); vgic_init(); vtimer_init(cnthctl_el2); return (0); } int vmmops_modcleanup(void) { int cpu; if (!in_vhe()) { smp_rendezvous(NULL, arm_teardown_vectors, NULL, NULL); CPU_FOREACH(cpu) { vmmpmap_remove(stack_hyp_va[cpu], VMM_STACK_PAGES * PAGE_SIZE, false); } vmmpmap_remove(hyp_code_base, hyp_code_len, false); } vtimer_cleanup(); if (!in_vhe()) { vmmpmap_fini(); CPU_FOREACH(cpu) free(stack[cpu], M_HYP); } pmap_clean_stage2_tlbi = NULL; pmap_stage2_invalidate_range = NULL; pmap_stage2_invalidate_all = NULL; return (0); } static vm_size_t el2_hyp_size(struct vm *vm) { return (round_page(sizeof(struct hyp) + sizeof(struct hypctx *) * vm_get_maxcpus(vm))); } static vm_size_t el2_hypctx_size(void) { return (round_page(sizeof(struct hypctx))); } static vm_offset_t el2_map_enter(vm_offset_t data, vm_size_t size, vm_prot_t prot) { vmem_addr_t addr; int err __diagused; bool rv __diagused; err = vmem_alloc(el2_mem_alloc, size, M_NEXTFIT | M_WAITOK, &addr); MPASS(err == 0); rv = vmmpmap_enter(addr, size, vtophys(data), prot); MPASS(rv); return (addr); } void * vmmops_init(struct vm *vm, pmap_t pmap) { struct hyp *hyp; vm_size_t size; size = el2_hyp_size(vm); hyp = malloc_aligned(size, PAGE_SIZE, M_HYP, M_WAITOK | M_ZERO); hyp->vm = vm; hyp->vgic_attached = false; vtimer_vminit(hyp); vgic_vminit(hyp); if (!in_vhe()) hyp->el2_addr = el2_map_enter((vm_offset_t)hyp, size, VM_PROT_READ | VM_PROT_WRITE); return (hyp); } void * vmmops_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) { struct hyp *hyp = vmi; struct hypctx *hypctx; vm_size_t size; size = el2_hypctx_size(); hypctx = malloc_aligned(size, PAGE_SIZE, M_HYP, M_WAITOK | M_ZERO); KASSERT(vcpuid >= 0 && vcpuid < vm_get_maxcpus(hyp->vm), ("%s: Invalid vcpuid %d", __func__, vcpuid)); hyp->ctx[vcpuid] = hypctx; hypctx->hyp = hyp; hypctx->vcpu = vcpu1; reset_vm_el01_regs(hypctx); reset_vm_el2_regs(hypctx); vtimer_cpuinit(hypctx); vgic_cpuinit(hypctx); if (!in_vhe()) hypctx->el2_addr = el2_map_enter((vm_offset_t)hypctx, size, VM_PROT_READ | VM_PROT_WRITE); return (hypctx); } static int arm_vmm_pinit(pmap_t pmap) { pmap_pinit_stage(pmap, PM_STAGE2, vmm_pmap_levels); return (1); } struct vmspace * vmmops_vmspace_alloc(vm_offset_t min, vm_offset_t max) { return (vmspace_alloc(min, max, arm_vmm_pinit)); } void vmmops_vmspace_free(struct vmspace *vmspace) { pmap_remove_pages(vmspace_pmap(vmspace)); vmspace_free(vmspace); } static inline void arm64_print_hyp_regs(struct vm_exit *vme) { printf("esr_el2: 0x%016lx\n", vme->u.hyp.esr_el2); printf("far_el2: 0x%016lx\n", vme->u.hyp.far_el2); printf("hpfar_el2: 0x%016lx\n", vme->u.hyp.hpfar_el2); printf("elr_el2: 0x%016lx\n", vme->pc); } static void arm64_gen_inst_emul_data(struct hypctx *hypctx, uint32_t esr_iss, struct vm_exit *vme_ret) { struct vm_guest_paging *paging; struct vie *vie; uint32_t esr_sas, reg_num; /* * Get the page address from HPFAR_EL2. */ vme_ret->u.inst_emul.gpa = HPFAR_EL2_FIPA_ADDR(hypctx->exit_info.hpfar_el2); /* Bits [11:0] are the same as bits [11:0] from the virtual address. */ vme_ret->u.inst_emul.gpa += hypctx->exit_info.far_el2 & FAR_EL2_HPFAR_PAGE_MASK; esr_sas = (esr_iss & ISS_DATA_SAS_MASK) >> ISS_DATA_SAS_SHIFT; reg_num = (esr_iss & ISS_DATA_SRT_MASK) >> ISS_DATA_SRT_SHIFT; vie = &vme_ret->u.inst_emul.vie; vie->access_size = 1 << esr_sas; vie->sign_extend = (esr_iss & ISS_DATA_SSE) ? 1 : 0; vie->dir = (esr_iss & ISS_DATA_WnR) ? VM_DIR_WRITE : VM_DIR_READ; vie->reg = reg_num; paging = &vme_ret->u.inst_emul.paging; paging->ttbr0_addr = hypctx->ttbr0_el1 & ~(TTBR_ASID_MASK | TTBR_CnP); paging->ttbr1_addr = hypctx->ttbr1_el1 & ~(TTBR_ASID_MASK | TTBR_CnP); paging->tcr_el1 = hypctx->tcr_el1; paging->tcr2_el1 = hypctx->tcr2_el1; paging->flags = hypctx->tf.tf_spsr & (PSR_M_MASK | PSR_M_32); if ((hypctx->sctlr_el1 & SCTLR_M) != 0) paging->flags |= VM_GP_MMU_ENABLED; } static void arm64_gen_reg_emul_data(uint32_t esr_iss, struct vm_exit *vme_ret) { uint32_t reg_num; struct vre *vre; /* u.hyp member will be replaced by u.reg_emul */ vre = &vme_ret->u.reg_emul.vre; vre->inst_syndrome = esr_iss; /* ARMv8 Architecture Manual, p. D7-2273: 1 means read */ vre->dir = (esr_iss & ISS_MSR_DIR) ? VM_DIR_READ : VM_DIR_WRITE; reg_num = ISS_MSR_Rt(esr_iss); vre->reg = reg_num; } void raise_data_insn_abort(struct hypctx *hypctx, uint64_t far, bool dabort, int fsc) { uint64_t esr; if ((hypctx->tf.tf_spsr & PSR_M_MASK) == PSR_M_EL0t) esr = EXCP_INSN_ABORT_L << ESR_ELx_EC_SHIFT; else esr = EXCP_INSN_ABORT << ESR_ELx_EC_SHIFT; /* Set the bit that changes from insn -> data abort */ if (dabort) esr |= EXCP_DATA_ABORT_L << ESR_ELx_EC_SHIFT; /* Set the IL bit if set by hardware */ esr |= hypctx->tf.tf_esr & ESR_ELx_IL; vmmops_exception(hypctx, esr | fsc, far); } static int handle_el1_sync_excp(struct hypctx *hypctx, struct vm_exit *vme_ret, pmap_t pmap) { uint64_t gpa; uint32_t esr_ec, esr_iss; esr_ec = ESR_ELx_EXCEPTION(hypctx->tf.tf_esr); esr_iss = hypctx->tf.tf_esr & ESR_ELx_ISS_MASK; switch (esr_ec) { case EXCP_UNKNOWN: vmm_stat_incr(hypctx->vcpu, VMEXIT_UNKNOWN, 1); arm64_print_hyp_regs(vme_ret); vme_ret->exitcode = VM_EXITCODE_HYP; break; case EXCP_TRAP_WFI_WFE: if ((hypctx->tf.tf_esr & 0x3) == 0) { /* WFI */ vmm_stat_incr(hypctx->vcpu, VMEXIT_WFI, 1); vme_ret->exitcode = VM_EXITCODE_WFI; } else { vmm_stat_incr(hypctx->vcpu, VMEXIT_WFE, 1); vme_ret->exitcode = VM_EXITCODE_HYP; } break; case EXCP_HVC: vmm_stat_incr(hypctx->vcpu, VMEXIT_HVC, 1); vme_ret->exitcode = VM_EXITCODE_HVC; break; case EXCP_MSR: vmm_stat_incr(hypctx->vcpu, VMEXIT_MSR, 1); arm64_gen_reg_emul_data(esr_iss, vme_ret); vme_ret->exitcode = VM_EXITCODE_REG_EMUL; break; case EXCP_BRK: vmm_stat_incr(hypctx->vcpu, VMEXIT_BRK, 1); vme_ret->exitcode = VM_EXITCODE_BRK; break; case EXCP_SOFTSTP_EL0: vmm_stat_incr(hypctx->vcpu, VMEXIT_SS, 1); vme_ret->exitcode = VM_EXITCODE_SS; break; case EXCP_INSN_ABORT_L: case EXCP_DATA_ABORT_L: vmm_stat_incr(hypctx->vcpu, esr_ec == EXCP_DATA_ABORT_L ? VMEXIT_DATA_ABORT : VMEXIT_INSN_ABORT, 1); switch (hypctx->tf.tf_esr & ISS_DATA_DFSC_MASK) { case ISS_DATA_DFSC_TF_L0: case ISS_DATA_DFSC_TF_L1: case ISS_DATA_DFSC_TF_L2: case ISS_DATA_DFSC_TF_L3: case ISS_DATA_DFSC_AFF_L1: case ISS_DATA_DFSC_AFF_L2: case ISS_DATA_DFSC_AFF_L3: case ISS_DATA_DFSC_PF_L1: case ISS_DATA_DFSC_PF_L2: case ISS_DATA_DFSC_PF_L3: gpa = HPFAR_EL2_FIPA_ADDR(hypctx->exit_info.hpfar_el2); /* Check the IPA is valid */ if (gpa >= (1ul << vmm_max_ipa_bits)) { raise_data_insn_abort(hypctx, hypctx->exit_info.far_el2, esr_ec == EXCP_DATA_ABORT_L, ISS_DATA_DFSC_ASF_L0); vme_ret->inst_length = 0; return (HANDLED); } if (vm_mem_allocated(hypctx->vcpu, gpa)) { vme_ret->exitcode = VM_EXITCODE_PAGING; vme_ret->inst_length = 0; vme_ret->u.paging.esr = hypctx->tf.tf_esr; vme_ret->u.paging.gpa = gpa; } else if (esr_ec == EXCP_INSN_ABORT_L) { /* * Raise an external abort. Device memory is * not executable */ raise_data_insn_abort(hypctx, hypctx->exit_info.far_el2, false, ISS_DATA_DFSC_EXT); vme_ret->inst_length = 0; return (HANDLED); } else { arm64_gen_inst_emul_data(hypctx, esr_iss, vme_ret); vme_ret->exitcode = VM_EXITCODE_INST_EMUL; } break; default: arm64_print_hyp_regs(vme_ret); vme_ret->exitcode = VM_EXITCODE_HYP; break; } break; default: vmm_stat_incr(hypctx->vcpu, VMEXIT_UNHANDLED_SYNC, 1); arm64_print_hyp_regs(vme_ret); vme_ret->exitcode = VM_EXITCODE_HYP; break; } /* We don't don't do any instruction emulation here */ return (UNHANDLED); } static int arm64_handle_world_switch(struct hypctx *hypctx, int excp_type, struct vm_exit *vme, pmap_t pmap) { int handled; switch (excp_type) { case EXCP_TYPE_EL1_SYNC: /* The exit code will be set by handle_el1_sync_excp(). */ handled = handle_el1_sync_excp(hypctx, vme, pmap); break; case EXCP_TYPE_EL1_IRQ: case EXCP_TYPE_EL1_FIQ: /* The host kernel will handle IRQs and FIQs. */ vmm_stat_incr(hypctx->vcpu, excp_type == EXCP_TYPE_EL1_IRQ ? VMEXIT_IRQ : VMEXIT_FIQ,1); vme->exitcode = VM_EXITCODE_BOGUS; handled = UNHANDLED; break; case EXCP_TYPE_EL1_ERROR: case EXCP_TYPE_EL2_SYNC: case EXCP_TYPE_EL2_IRQ: case EXCP_TYPE_EL2_FIQ: case EXCP_TYPE_EL2_ERROR: vmm_stat_incr(hypctx->vcpu, VMEXIT_UNHANDLED_EL2, 1); vme->exitcode = VM_EXITCODE_BOGUS; handled = UNHANDLED; break; default: vmm_stat_incr(hypctx->vcpu, VMEXIT_UNHANDLED, 1); vme->exitcode = VM_EXITCODE_BOGUS; handled = UNHANDLED; break; } return (handled); } static void ptp_release(void **cookie) { if (*cookie != NULL) { vm_gpa_release(*cookie); *cookie = NULL; } } static void * ptp_hold(struct vcpu *vcpu, vm_paddr_t ptpphys, size_t len, void **cookie) { void *ptr; ptp_release(cookie); ptr = vm_gpa_hold(vcpu, ptpphys, len, VM_PROT_RW, cookie); return (ptr); } /* log2 of the number of bytes in a page table entry */ #define PTE_SHIFT 3 int vmmops_gla2gpa(void *vcpui, struct vm_guest_paging *paging, uint64_t gla, int prot, uint64_t *gpa, int *is_fault) { struct hypctx *hypctx; void *cookie; uint64_t mask, *ptep, pte, pte_addr; int address_bits, granule_shift, ia_bits, levels, pte_shift, tsz; bool is_el0; /* Check if the MMU is off */ if ((paging->flags & VM_GP_MMU_ENABLED) == 0) { *is_fault = 0; *gpa = gla; return (0); } is_el0 = (paging->flags & PSR_M_MASK) == PSR_M_EL0t; if (ADDR_IS_KERNEL(gla)) { /* If address translation is disabled raise an exception */ if ((paging->tcr_el1 & TCR_EPD1) != 0) { *is_fault = 1; return (0); } if (is_el0 && (paging->tcr_el1 & TCR_E0PD1) != 0) { *is_fault = 1; return (0); } pte_addr = paging->ttbr1_addr; tsz = (paging->tcr_el1 & TCR_T1SZ_MASK) >> TCR_T1SZ_SHIFT; /* Clear the top byte if TBI is on */ if ((paging->tcr_el1 & TCR_TBI1) != 0) gla |= (0xfful << 56); switch (paging->tcr_el1 & TCR_TG1_MASK) { case TCR_TG1_4K: granule_shift = PAGE_SHIFT_4K; break; case TCR_TG1_16K: granule_shift = PAGE_SHIFT_16K; break; case TCR_TG1_64K: granule_shift = PAGE_SHIFT_64K; break; default: *is_fault = 1; return (EINVAL); } } else { /* If address translation is disabled raise an exception */ if ((paging->tcr_el1 & TCR_EPD0) != 0) { *is_fault = 1; return (0); } if (is_el0 && (paging->tcr_el1 & TCR_E0PD0) != 0) { *is_fault = 1; return (0); } pte_addr = paging->ttbr0_addr; tsz = (paging->tcr_el1 & TCR_T0SZ_MASK) >> TCR_T0SZ_SHIFT; /* Clear the top byte if TBI is on */ if ((paging->tcr_el1 & TCR_TBI0) != 0) gla &= ~(0xfful << 56); switch (paging->tcr_el1 & TCR_TG0_MASK) { case TCR_TG0_4K: granule_shift = PAGE_SHIFT_4K; break; case TCR_TG0_16K: granule_shift = PAGE_SHIFT_16K; break; case TCR_TG0_64K: granule_shift = PAGE_SHIFT_64K; break; default: *is_fault = 1; return (EINVAL); } } /* * TODO: Support FEAT_TTST for smaller tsz values and FEAT_LPA2 * for larger values. */ switch (granule_shift) { case PAGE_SHIFT_4K: case PAGE_SHIFT_16K: /* * See "Table D8-11 4KB granule, determining stage 1 initial * lookup level" and "Table D8-21 16KB granule, determining * stage 1 initial lookup level" from the "Arm Architecture * Reference Manual for A-Profile architecture" revision I.a * for the minimum and maximum values. * * TODO: Support less than 16 when FEAT_LPA2 is implemented * and TCR_EL1.DS == 1 * TODO: Support more than 39 when FEAT_TTST is implemented */ if (tsz < 16 || tsz > 39) { *is_fault = 1; return (EINVAL); } break; case PAGE_SHIFT_64K: /* TODO: Support 64k granule. It will probably work, but is untested */ default: *is_fault = 1; return (EINVAL); } /* * Calculate the input address bits. These are 64 bit in an address * with the top tsz bits being all 0 or all 1. */ ia_bits = 64 - tsz; /* * Calculate the number of address bits used in the page table * calculation. This is ia_bits minus the bottom granule_shift * bits that are passed to the output address. */ address_bits = ia_bits - granule_shift; /* * Calculate the number of levels. Each level uses * granule_shift - PTE_SHIFT bits of the input address. * This is because the table is 1 << granule_shift and each * entry is 1 << PTE_SHIFT bytes. */ levels = howmany(address_bits, granule_shift - PTE_SHIFT); /* Mask of the upper unused bits in the virtual address */ gla &= (1ul << ia_bits) - 1; hypctx = (struct hypctx *)vcpui; cookie = NULL; /* TODO: Check if the level supports block descriptors */ for (;levels > 0; levels--) { int idx; pte_shift = (levels - 1) * (granule_shift - PTE_SHIFT) + granule_shift; idx = (gla >> pte_shift) & ((1ul << (granule_shift - PTE_SHIFT)) - 1); while (idx > PAGE_SIZE / sizeof(pte)) { idx -= PAGE_SIZE / sizeof(pte); pte_addr += PAGE_SIZE; } ptep = ptp_hold(hypctx->vcpu, pte_addr, PAGE_SIZE, &cookie); if (ptep == NULL) goto error; pte = ptep[idx]; /* Calculate the level we are looking at */ switch (levels) { default: goto fault; /* TODO: Level -1 when FEAT_LPA2 is implemented */ case 4: /* Level 0 */ if ((pte & ATTR_DESCR_MASK) != L0_TABLE) goto fault; /* FALLTHROUGH */ case 3: /* Level 1 */ case 2: /* Level 2 */ switch (pte & ATTR_DESCR_MASK) { /* Use L1 macro as all levels are the same */ case L1_TABLE: /* Check if EL0 can access this address space */ if (is_el0 && (pte & TATTR_AP_TABLE_NO_EL0) != 0) goto fault; /* Check if the address space is writable */ if ((prot & PROT_WRITE) != 0 && (pte & TATTR_AP_TABLE_RO) != 0) goto fault; if ((prot & PROT_EXEC) != 0) { /* Check the table exec attribute */ if ((is_el0 && (pte & TATTR_UXN_TABLE) != 0) || (!is_el0 && (pte & TATTR_PXN_TABLE) != 0)) goto fault; } pte_addr = pte & ~ATTR_MASK; break; case L1_BLOCK: goto done; default: goto fault; } break; case 1: /* Level 3 */ if ((pte & ATTR_DESCR_MASK) == L3_PAGE) goto done; goto fault; } } done: /* Check if EL0 has access to the block/page */ if (is_el0 && (pte & ATTR_S1_AP(ATTR_S1_AP_USER)) == 0) goto fault; if ((prot & PROT_WRITE) != 0 && (pte & ATTR_S1_AP_RW_BIT) != 0) goto fault; if ((prot & PROT_EXEC) != 0) { if ((is_el0 && (pte & ATTR_S1_UXN) != 0) || (!is_el0 && (pte & ATTR_S1_PXN) != 0)) goto fault; } mask = (1ul << pte_shift) - 1; *gpa = (pte & ~ATTR_MASK) | (gla & mask); *is_fault = 0; ptp_release(&cookie); return (0); error: ptp_release(&cookie); return (EFAULT); fault: *is_fault = 1; ptp_release(&cookie); return (0); } int vmmops_run(void *vcpui, register_t pc, pmap_t pmap, struct vm_eventinfo *evinfo) { uint64_t excp_type; int handled; register_t daif; struct hyp *hyp; struct hypctx *hypctx; struct vcpu *vcpu; struct vm_exit *vme; int mode; hypctx = (struct hypctx *)vcpui; hyp = hypctx->hyp; vcpu = hypctx->vcpu; vme = vm_exitinfo(vcpu); hypctx->tf.tf_elr = (uint64_t)pc; for (;;) { if (hypctx->has_exception) { hypctx->has_exception = false; hypctx->elr_el1 = hypctx->tf.tf_elr; mode = hypctx->tf.tf_spsr & (PSR_M_MASK | PSR_M_32); if (mode == PSR_M_EL1t) { hypctx->tf.tf_elr = hypctx->vbar_el1 + 0x0; } else if (mode == PSR_M_EL1h) { hypctx->tf.tf_elr = hypctx->vbar_el1 + 0x200; } else if ((mode & PSR_M_32) == PSR_M_64) { /* 64-bit EL0 */ hypctx->tf.tf_elr = hypctx->vbar_el1 + 0x400; } else { /* 32-bit EL0 */ hypctx->tf.tf_elr = hypctx->vbar_el1 + 0x600; } /* Set the new spsr */ hypctx->spsr_el1 = hypctx->tf.tf_spsr; /* Set the new cpsr */ hypctx->tf.tf_spsr = hypctx->spsr_el1 & PSR_FLAGS; hypctx->tf.tf_spsr |= PSR_DAIF | PSR_M_EL1h; /* * Update fields that may change on exeption entry * based on how sctlr_el1 is configured. */ if ((hypctx->sctlr_el1 & SCTLR_SPAN) == 0) hypctx->tf.tf_spsr |= PSR_PAN; if ((hypctx->sctlr_el1 & SCTLR_DSSBS) == 0) hypctx->tf.tf_spsr &= ~PSR_SSBS; else hypctx->tf.tf_spsr |= PSR_SSBS; } daif = intr_disable(); /* Check if the vcpu is suspended */ if (vcpu_suspended(evinfo)) { intr_restore(daif); vm_exit_suspended(vcpu, pc); break; } if (vcpu_debugged(vcpu)) { intr_restore(daif); vm_exit_debug(vcpu, pc); break; } /* Activate the stage2 pmap so the vmid is valid */ pmap_activate_vm(pmap); hyp->vttbr_el2 = pmap_to_ttbr0(pmap); /* * TODO: What happens if a timer interrupt is asserted exactly * here, but for the previous VM? */ arm64_set_active_vcpu(hypctx); vgic_flush_hwstate(hypctx); /* Call into EL2 to switch to the guest */ excp_type = vmm_enter_guest(hyp, hypctx); vgic_sync_hwstate(hypctx); vtimer_sync_hwstate(hypctx); /* * Deactivate the stage2 pmap. */ PCPU_SET(curvmpmap, NULL); intr_restore(daif); vmm_stat_incr(vcpu, VMEXIT_COUNT, 1); if (excp_type == EXCP_TYPE_MAINT_IRQ) continue; vme->pc = hypctx->tf.tf_elr; vme->inst_length = INSN_SIZE; vme->u.hyp.exception_nr = excp_type; vme->u.hyp.esr_el2 = hypctx->tf.tf_esr; vme->u.hyp.far_el2 = hypctx->exit_info.far_el2; vme->u.hyp.hpfar_el2 = hypctx->exit_info.hpfar_el2; handled = arm64_handle_world_switch(hypctx, excp_type, vme, pmap); if (handled == UNHANDLED) /* Exit loop to emulate instruction. */ break; else /* Resume guest execution from the next instruction. */ hypctx->tf.tf_elr += vme->inst_length; } return (0); } static void arm_pcpu_vmcleanup(void *arg) { struct hyp *hyp; int i, maxcpus; hyp = arg; maxcpus = vm_get_maxcpus(hyp->vm); for (i = 0; i < maxcpus; i++) { if (arm64_get_active_vcpu() == hyp->ctx[i]) { arm64_set_active_vcpu(NULL); break; } } } void vmmops_vcpu_cleanup(void *vcpui) { struct hypctx *hypctx = vcpui; vtimer_cpucleanup(hypctx); vgic_cpucleanup(hypctx); if (!in_vhe()) vmmpmap_remove(hypctx->el2_addr, el2_hypctx_size(), true); free(hypctx, M_HYP); } void vmmops_cleanup(void *vmi) { struct hyp *hyp = vmi; vtimer_vmcleanup(hyp); vgic_vmcleanup(hyp); smp_rendezvous(NULL, arm_pcpu_vmcleanup, NULL, hyp); if (!in_vhe()) vmmpmap_remove(hyp->el2_addr, el2_hyp_size(hyp->vm), true); free(hyp, M_HYP); } /* * Return register value. Registers have different sizes and an explicit cast * must be made to ensure proper conversion. */ static uint64_t * hypctx_regptr(struct hypctx *hypctx, int reg) { switch (reg) { case VM_REG_GUEST_X0 ... VM_REG_GUEST_X29: return (&hypctx->tf.tf_x[reg]); case VM_REG_GUEST_LR: return (&hypctx->tf.tf_lr); case VM_REG_GUEST_SP: return (&hypctx->tf.tf_sp); case VM_REG_GUEST_CPSR: return (&hypctx->tf.tf_spsr); case VM_REG_GUEST_PC: return (&hypctx->tf.tf_elr); case VM_REG_GUEST_SCTLR_EL1: return (&hypctx->sctlr_el1); case VM_REG_GUEST_TTBR0_EL1: return (&hypctx->ttbr0_el1); case VM_REG_GUEST_TTBR1_EL1: return (&hypctx->ttbr1_el1); case VM_REG_GUEST_TCR_EL1: return (&hypctx->tcr_el1); case VM_REG_GUEST_TCR2_EL1: return (&hypctx->tcr2_el1); default: break; } return (NULL); } int vmmops_getreg(void *vcpui, int reg, uint64_t *retval) { uint64_t *regp; int running, hostcpu; struct hypctx *hypctx = vcpui; running = vcpu_is_running(hypctx->vcpu, &hostcpu); if (running && hostcpu != curcpu) panic("arm_getreg: %s%d is running", vm_name(hypctx->hyp->vm), vcpu_vcpuid(hypctx->vcpu)); regp = hypctx_regptr(hypctx, reg); if (regp == NULL) return (EINVAL); *retval = *regp; return (0); } int vmmops_setreg(void *vcpui, int reg, uint64_t val) { uint64_t *regp; struct hypctx *hypctx = vcpui; int running, hostcpu; running = vcpu_is_running(hypctx->vcpu, &hostcpu); if (running && hostcpu != curcpu) panic("arm_setreg: %s%d is running", vm_name(hypctx->hyp->vm), vcpu_vcpuid(hypctx->vcpu)); regp = hypctx_regptr(hypctx, reg); if (regp == NULL) return (EINVAL); *regp = val; return (0); } int vmmops_exception(void *vcpui, uint64_t esr, uint64_t far) { struct hypctx *hypctx = vcpui; int running, hostcpu; running = vcpu_is_running(hypctx->vcpu, &hostcpu); if (running && hostcpu != curcpu) panic("%s: %s%d is running", __func__, vm_name(hypctx->hyp->vm), vcpu_vcpuid(hypctx->vcpu)); hypctx->far_el1 = far; hypctx->esr_el1 = esr; hypctx->has_exception = true; return (0); } int vmmops_getcap(void *vcpui, int num, int *retval) { struct hypctx *hypctx = vcpui; int ret; ret = ENOENT; switch (num) { case VM_CAP_UNRESTRICTED_GUEST: *retval = 1; ret = 0; break; case VM_CAP_BRK_EXIT: case VM_CAP_SS_EXIT: case VM_CAP_MASK_HWINTR: *retval = (hypctx->setcaps & (1ul << num)) != 0; break; default: break; } return (ret); } int vmmops_setcap(void *vcpui, int num, int val) { struct hypctx *hypctx = vcpui; int ret; ret = 0; switch (num) { case VM_CAP_BRK_EXIT: if ((val != 0) == ((hypctx->setcaps & (1ul << num)) != 0)) break; if (val != 0) hypctx->mdcr_el2 |= MDCR_EL2_TDE; else hypctx->mdcr_el2 &= ~MDCR_EL2_TDE; break; case VM_CAP_SS_EXIT: if ((val != 0) == ((hypctx->setcaps & (1ul << num)) != 0)) break; if (val != 0) { hypctx->debug_spsr |= (hypctx->tf.tf_spsr & PSR_SS); hypctx->debug_mdscr |= hypctx->mdscr_el1 & (MDSCR_SS | MDSCR_KDE); hypctx->tf.tf_spsr |= PSR_SS; hypctx->mdscr_el1 |= MDSCR_SS | MDSCR_KDE; hypctx->mdcr_el2 |= MDCR_EL2_TDE; } else { hypctx->tf.tf_spsr &= ~PSR_SS; hypctx->tf.tf_spsr |= hypctx->debug_spsr; hypctx->debug_spsr &= ~PSR_SS; hypctx->mdscr_el1 &= ~(MDSCR_SS | MDSCR_KDE); hypctx->mdscr_el1 |= hypctx->debug_mdscr; hypctx->debug_mdscr &= ~(MDSCR_SS | MDSCR_KDE); hypctx->mdcr_el2 &= ~MDCR_EL2_TDE; } break; case VM_CAP_MASK_HWINTR: if ((val != 0) == ((hypctx->setcaps & (1ul << num)) != 0)) break; if (val != 0) { hypctx->debug_spsr |= (hypctx->tf.tf_spsr & (PSR_I | PSR_F)); hypctx->tf.tf_spsr |= PSR_I | PSR_F; } else { hypctx->tf.tf_spsr &= ~(PSR_I | PSR_F); hypctx->tf.tf_spsr |= (hypctx->debug_spsr & (PSR_I | PSR_F)); hypctx->debug_spsr &= ~(PSR_I | PSR_F); } break; default: ret = ENOENT; break; } if (ret == 0) { if (val == 0) hypctx->setcaps &= ~(1ul << num); else hypctx->setcaps |= (1ul << num); } return (ret); } diff --git a/sys/arm64/vmm/vmm_dev_machdep.c b/sys/arm64/vmm/vmm_dev_machdep.c index a8e0ee50fd81..926a74fa528b 100644 --- a/sys/arm64/vmm/vmm_dev_machdep.c +++ b/sys/arm64/vmm/vmm_dev_machdep.c @@ -1,137 +1,138 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 NetApp, Inc. * Copyright (C) 2015 Mihai Carabas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include "io/vgic.h" const struct vmmdev_ioctl vmmdev_machdep_ioctls[] = { VMMDEV_IOCTL(VM_RUN, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_INJECT_EXCEPTION, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_GLA2GPA_NOFAULT, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_ATTACH_VGIC, VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), VMMDEV_IOCTL(VM_GET_VGIC_VERSION, 0), VMMDEV_IOCTL(VM_RAISE_MSI, 0), VMMDEV_IOCTL(VM_ASSERT_IRQ, 0), VMMDEV_IOCTL(VM_DEASSERT_IRQ, 0), }; const size_t vmmdev_machdep_ioctl_count = nitems(vmmdev_machdep_ioctls); int vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct vm_run *vmrun; struct vm_vgic_version *vgv; struct vm_vgic_descr *vgic; struct vm_irq *vi; struct vm_exception *vmexc; struct vm_gla2gpa *gg; struct vm_msi *vmsi; int error; error = 0; switch (cmd) { case VM_RUN: { struct vm_exit *vme; vmrun = (struct vm_run *)data; vme = vm_exitinfo(vcpu); error = vm_run(vcpu); if (error != 0) break; error = copyout(vme, vmrun->vm_exit, sizeof(*vme)); if (error != 0) break; break; } case VM_INJECT_EXCEPTION: vmexc = (struct vm_exception *)data; error = vm_inject_exception(vcpu, vmexc->esr, vmexc->far); break; case VM_GLA2GPA_NOFAULT: gg = (struct vm_gla2gpa *)data; error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla, gg->prot, &gg->gpa, &gg->fault); KASSERT(error == 0 || error == EFAULT, ("%s: vm_gla2gpa unknown error %d", __func__, error)); break; case VM_GET_VGIC_VERSION: vgv = (struct vm_vgic_version *)data; /* TODO: Query the vgic driver for this */ vgv->version = 3; vgv->flags = 0; error = 0; break; case VM_ATTACH_VGIC: vgic = (struct vm_vgic_descr *)data; error = vm_attach_vgic(vm, vgic); break; case VM_RAISE_MSI: vmsi = (struct vm_msi *)data; error = vm_raise_msi(vm, vmsi->msg, vmsi->addr, vmsi->bus, vmsi->slot, vmsi->func); break; case VM_ASSERT_IRQ: vi = (struct vm_irq *)data; error = vm_assert_irq(vm, vi->irq); break; case VM_DEASSERT_IRQ: vi = (struct vm_irq *)data; error = vm_deassert_irq(vm, vi->irq); break; default: error = ENOTTY; break; } return (error); } diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64 index 43da6e757b1c..13c7d9b1875d 100644 --- a/sys/conf/files.arm64 +++ b/sys/conf/files.arm64 @@ -1,773 +1,774 @@ ## ## Kernel ## kern/msi_if.m optional intrng kern/pic_if.m optional intrng kern/subr_devmap.c standard kern/subr_intr.c optional intrng kern/subr_physmem.c standard libkern/strlen.c standard libkern/arm64/crc32c_armv8.S standard arm/arm/generic_timer.c standard arm/arm/gic.c standard arm/arm/gic_acpi.c optional acpi arm/arm/gic_fdt.c optional fdt arm/arm/gic_if.m standard arm/arm/pmu.c standard arm/arm/pmu_acpi.c optional acpi arm/arm/pmu_fdt.c optional fdt arm64/acpica/acpi_iort.c optional acpi arm64/acpica/acpi_machdep.c optional acpi arm64/acpica/OsdEnvironment.c optional acpi arm64/acpica/acpi_wakeup.c optional acpi arm64/acpica/pci_cfgreg.c optional acpi pci arm64/arm64/autoconf.c standard arm64/arm64/bus_machdep.c standard arm64/arm64/bus_space_asm.S standard arm64/arm64/busdma_bounce.c standard arm64/arm64/busdma_machdep.c standard arm64/arm64/clock.c standard arm64/arm64/copyinout.S standard arm64/arm64/cpu_errata.c standard arm64/arm64/cpu_feat.c standard arm64/arm64/cpufunc_asm.S standard arm64/arm64/db_disasm.c optional ddb arm64/arm64/db_interface.c optional ddb arm64/arm64/db_trace.c optional ddb arm64/arm64/debug_monitor.c standard arm64/arm64/disassem.c optional ddb arm64/arm64/dump_machdep.c standard arm64/arm64/efirt_machdep.c optional efirt arm64/arm64/efirt_support.S optional efirt arm64/arm64/elf32_machdep.c optional compat_freebsd32 arm64/arm64/elf_machdep.c standard arm64/arm64/exception.S standard arm64/arm64/exec_machdep.c standard arm64/arm64/freebsd32_machdep.c optional compat_freebsd32 arm64/arm64/gdb_machdep.c optional gdb arm64/arm64/gicv3_its.c optional intrng fdt arm64/arm64/gic_v3.c standard arm64/arm64/gic_v3_acpi.c optional acpi arm64/arm64/gic_v3_fdt.c optional fdt arm64/arm64/hyp_stub.S standard arm64/arm64/identcpu.c standard arm64/arm64/locore.S standard no-obj arm64/arm64/machdep.c standard arm64/arm64/machdep_boot.c standard arm64/arm64/mem.c standard arm64/arm64/memcmp.S standard arm64/arm64/memcpy.S standard arm64/arm64/memset.S standard arm64/arm64/minidump_machdep.c standard arm64/arm64/mp_machdep.c optional smp arm64/arm64/nexus.c standard arm64/arm64/ofw_machdep.c optional fdt arm64/arm64/pl031_rtc.c optional fdt pl031 arm64/arm64/ptrauth.c standard \ compile-with "${NORMAL_C:N-mbranch-protection*} -mbranch-protection=bti" arm64/arm64/pmap.c standard arm64/arm64/ptrace_machdep.c standard arm64/arm64/sdt_machdep.c optional kdtrace_hooks arm64/arm64/sigtramp.S standard arm64/arm64/stack_machdep.c optional ddb | stack arm64/arm64/strcmp.S standard arm64/arm64/strncmp.S standard arm64/arm64/support_ifunc.c standard arm64/arm64/support.S standard arm64/arm64/swtch.S standard arm64/arm64/sys_machdep.c standard arm64/arm64/trap.c standard arm64/arm64/uio_machdep.c standard arm64/arm64/undefined.c standard arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack \ compile-with "${NOSAN_C}" arm64/arm64/vfp.c standard arm64/arm64/vm_machdep.c standard arm64/coresight/coresight.c standard arm64/coresight/coresight_acpi.c optional acpi arm64/coresight/coresight_fdt.c optional fdt arm64/coresight/coresight_if.m standard arm64/coresight/coresight_cmd.c standard arm64/coresight/coresight_cpu_debug.c optional fdt arm64/coresight/coresight_etm4x.c standard arm64/coresight/coresight_etm4x_acpi.c optional acpi arm64/coresight/coresight_etm4x_fdt.c optional fdt arm64/coresight/coresight_funnel.c standard arm64/coresight/coresight_funnel_acpi.c optional acpi arm64/coresight/coresight_funnel_fdt.c optional fdt arm64/coresight/coresight_replicator.c standard arm64/coresight/coresight_replicator_acpi.c optional acpi arm64/coresight/coresight_replicator_fdt.c optional fdt arm64/coresight/coresight_tmc.c standard arm64/coresight/coresight_tmc_acpi.c optional acpi arm64/coresight/coresight_tmc_fdt.c optional fdt dev/smbios/smbios_subr.c standard arm64/iommu/iommu.c optional iommu arm64/iommu/iommu_if.m optional iommu arm64/iommu/iommu_pmap.c optional iommu arm64/iommu/smmu.c optional iommu arm64/iommu/smmu_acpi.c optional iommu acpi arm64/iommu/smmu_fdt.c optional iommu fdt arm64/iommu/smmu_quirks.c optional iommu dev/iommu/busdma_iommu.c optional iommu dev/iommu/iommu_gas.c optional iommu arm64/vmm/vmm.c optional vmm arm64/vmm/vmm_dev_machdep.c optional vmm arm64/vmm/vmm_instruction_emul.c optional vmm arm64/vmm/vmm_arm64.c optional vmm arm64/vmm/vmm_reset.c optional vmm arm64/vmm/vmm_handlers.c optional vmm arm64/vmm/vmm_call.S optional vmm arm64/vmm/vmm_nvhe_exception.S optional vmm \ compile-with "${NOSAN_C} -fpie" \ no-obj arm64/vmm/vmm_nvhe.c optional vmm \ compile-with "${NOSAN_C} -fpie" \ no-obj vmm_hyp_blob.elf.full optional vmm \ dependency "vmm_nvhe.o vmm_nvhe_exception.o" \ compile-with "${SYSTEM_LD_BASECMD} -o ${.TARGET} ${.ALLSRC} --defsym=_start='0x0' --defsym=text_start='0x0'" \ no-obj no-implicit-rule vmm_hyp_blob.elf optional vmm \ dependency "vmm_hyp_blob.elf.full" \ compile-with "${OBJCOPY} --strip-debug ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule vmm_hyp_blob.bin optional vmm \ dependency vmm_hyp_blob.elf \ compile-with "${OBJCOPY} --output-target=binary ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule arm64/vmm/vmm_hyp_el2.S optional vmm \ dependency vmm_hyp_blob.bin arm64/vmm/vmm_mmu.c optional vmm arm64/vmm/vmm_vhe.c optional vmm arm64/vmm/vmm_vhe_exception.S optional vmm arm64/vmm/io/vgic.c optional vmm arm64/vmm/io/vgic_v3.c optional vmm arm64/vmm/io/vgic_if.m optional vmm arm64/vmm/io/vtimer.c optional vmm dev/vmm/vmm_dev.c optional vmm +dev/vmm/vmm_mem.c optional vmm dev/vmm/vmm_stat.c optional vmm crypto/armv8/armv8_crypto.c optional armv8crypto armv8_crypto_wrap.o optional armv8crypto \ dependency "$S/crypto/armv8/armv8_crypto_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8 ${WERROR} ${NO_WCAST_QUAL} ${CFLAGS:M-march=*:S/^$/-march=armv8-a/}+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "armv8_crypto_wrap.o" aesv8-armx.o optional armv8crypto | ossl \ dependency "$S/crypto/openssl/aarch64/aesv8-armx.S" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8 -I$S/crypto/openssl ${WERROR} ${NO_WCAST_QUAL} ${CFLAGS:M-march=*:S/^$/-march=armv8-a/}+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "aesv8-armx.o" ghashv8-armx.o optional armv8crypto \ dependency "$S/crypto/openssl/aarch64/ghashv8-armx.S" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8 -I$S/crypto/openssl ${WERROR} ${NO_WCAST_QUAL} ${CFLAGS:M-march=*:S/^$/-march=armv8-a/}+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "ghashv8-armx.o" crypto/des/des_enc.c optional netsmb crypto/openssl/ossl_aarch64.c optional ossl crypto/openssl/aarch64/chacha-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/poly1305-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/sha1-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/sha256-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/sha512-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/vpaes-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" dev/acpica/acpi_bus_if.m optional acpi dev/acpica/acpi_if.m optional acpi dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pxm.c optional acpi dev/ahci/ahci_generic.c optional ahci cddl/dev/dtrace/aarch64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/aarch64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/aarch64/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" # zfs blake3 hash support contrib/openzfs/module/icp/asm-aarch64/blake3/b3_aarch64_sse2.S optional zfs compile-with "${ZFS_S:N-mgeneral-regs-only}" contrib/openzfs/module/icp/asm-aarch64/blake3/b3_aarch64_sse41.S optional zfs compile-with "${ZFS_S:N-mgeneral-regs-only}" # zfs sha2 hash support zfs-sha256-armv8.o optional zfs \ dependency "$S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha256-armv8.S" \ compile-with "${CC} -c ${ZFS_ASM_CFLAGS:N-mgeneral-regs-only} -o ${.TARGET} ${WERROR} $S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha256-armv8.S" \ no-implicit-rule \ clean "zfs-sha256-armv8.o" zfs-sha512-armv8.o optional zfs \ dependency "$S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha512-armv8.S" \ compile-with "${CC} -c ${ZFS_ASM_CFLAGS:N-mgeneral-regs-only} -o ${.TARGET} ${WERROR} $S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha512-armv8.S" \ no-implicit-rule \ clean "zfs-sha512-armv8.o" ## ## ASoC support ## dev/sound/fdt/audio_dai_if.m optional sound fdt dev/sound/fdt/audio_soc.c optional sound fdt dev/sound/fdt/dummy_codec.c optional sound fdt dev/sound/fdt/simple_amplifier.c optional sound fdt ## ## Device drivers ## dev/axgbe/if_axgbe.c optional axa fdt dev/axgbe/xgbe-desc.c optional axa fdt dev/axgbe/xgbe-dev.c optional axa fdt dev/axgbe/xgbe-drv.c optional axa fdt dev/axgbe/xgbe-mdio.c optional axa fdt dev/axgbe/xgbe-sysctl.c optional axa fdt dev/axgbe/xgbe-txrx.c optional axa fdt dev/axgbe/xgbe_osdep.c optional axa fdt dev/axgbe/xgbe-phy-v1.c optional axa fdt dev/cpufreq/cpufreq_dt.c optional cpufreq fdt dev/dpaa2/dpaa2_bp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_buf.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_channel.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_cmd_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_con.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_console.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/dpaa2_io.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mac.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mc.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mc_acpi.c optional soc_nxp_ls dpaa2 acpi dev/dpaa2/dpaa2_mc_fdt.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/dpaa2_mc_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mcp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_ni.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_rc.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_swp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_swp_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_types.c optional soc_nxp_ls dpaa2 dev/dpaa2/memac_mdio_acpi.c optional soc_nxp_ls dpaa2 acpi dev/dpaa2/memac_mdio_common.c optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt dev/dpaa2/memac_mdio_fdt.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/memac_mdio_if.m optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt # Synopsys DesignWare Ethernet Controller dev/dwc/if_dwc_rk.c optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 dev/dwc/if_dwc_socfpga.c optional fdt dwc_socfpga dev/enetc/enetc_mdio.c optional enetc soc_nxp_ls dev/enetc/if_enetc.c optional enetc iflib pci fdt soc_nxp_ls dev/eqos/if_eqos.c optional eqos dev/eqos/if_eqos_if.m optional eqos dev/eqos/if_eqos_fdt.c optional eqos fdt dev/etherswitch/felix/felix.c optional enetc etherswitch fdt felix pci soc_nxp_ls dev/firmware/arm/scmi.c optional fdt scmi dev/firmware/arm/scmi_clk.c optional fdt scmi dev/firmware/arm/scmi_if.m optional fdt scmi dev/firmware/arm/scmi_mailbox.c optional fdt scmi dev/firmware/arm/scmi_smc.c optional fdt scmi dev/firmware/arm/scmi_virtio.c optional fdt scmi virtio dev/firmware/arm/scmi_shmem.c optional fdt scmi dev/gpio/pl061.c optional pl061 gpio dev/gpio/pl061_acpi.c optional pl061 gpio acpi dev/gpio/pl061_fdt.c optional pl061 gpio fdt dev/gpio/qoriq_gpio.c optional soc_nxp_ls gpio fdt dev/hwpmc/hwpmc_arm64.c optional hwpmc dev/hwpmc/hwpmc_arm64_md.c optional hwpmc dev/hwpmc/hwpmc_cmn600.c optional hwpmc acpi arm64/arm64/cmn600.c optional hwpmc acpi dev/hwpmc/hwpmc_dmc620.c optional hwpmc acpi dev/hwpmc/pmu_dmc620.c optional hwpmc acpi # Microsoft Hyper-V dev/hyperv/vmbus/hyperv.c optional hyperv acpi dev/hyperv/vmbus/aarch64/hyperv_aarch64.c optional hyperv acpi dev/hyperv/vmbus/vmbus.c optional hyperv acpi pci dev/hyperv/vmbus/aarch64/vmbus_aarch64.c optional hyperv acpi dev/hyperv/vmbus/vmbus_if.m optional hyperv acpi dev/hyperv/vmbus/vmbus_res.c optional hyperv acpi dev/hyperv/vmbus/vmbus_xact.c optional hyperv acpi dev/hyperv/vmbus/aarch64/hyperv_machdep.c optional hyperv acpi dev/hyperv/vmbus/vmbus_chan.c optional hyperv acpi dev/hyperv/vmbus/hyperv_busdma.c optional hyperv acpi dev/hyperv/vmbus/vmbus_br.c optional hyperv acpi dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c optional hyperv acpi dev/hyperv/utilities/vmbus_timesync.c optional hyperv acpi dev/hyperv/utilities/vmbus_heartbeat.c optional hyperv acpi dev/hyperv/utilities/vmbus_ic.c optional hyperv acpi dev/hyperv/utilities/vmbus_shutdown.c optional hyperv acpi dev/hyperv/utilities/hv_kvp.c optional hyperv acpi dev/hyperv/input/hv_kbd.c optional hyperv acpi dev/hyperv/input/hv_kbdc.c optional hyperv acpi dev/hyperv/netvsc/hn_nvs.c optional hyperv acpi dev/hyperv/netvsc/hn_rndis.c optional hyperv acpi dev/hyperv/netvsc/if_hn.c optional hyperv acpi dev/hyperv/pcib/vmbus_pcib.c optional hyperv pci acpi dev/ice/if_ice_iflib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_lib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_osdep.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_resmgr.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_strings.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_recovery_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_controlq.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_dcb.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flex_pipe.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flow.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_nvm.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_sched.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_switch.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_vlan_mode.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fw_logging.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fwlog.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_rdma.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/irdma_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/irdma_di_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/ice_ddp_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01032900 -mice_ddp -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ice_ddp.c" ice_ddp.fwo optional ice_ddp \ dependency "ice_ddp.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp \ dependency "$S/contrib/dev/ice/ice-1.3.41.0.pkg" \ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.41.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/iicbus/controller/twsi/mv_twsi.c optional twsi fdt dev/iicbus/controller/twsi/a10_twsi.c optional twsi fdt dev/iicbus/controller/twsi/twsi.c optional twsi fdt dev/iicbus/controller/rockchip/rk_i2c.c optional rk_i2c fdt dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_bt.c optional ipmi dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_pci.c optional ipmi pci dev/ipmi/ipmi_smbios.c optional ipmi dev/ipmi/ipmi_smbus.c optional ipmi smbus dev/ipmi/ipmi_smic.c optional ipmi dev/ipmi/ipmi_ssif.c optional ipmi smbus dev/mailbox/arm/arm_doorbell.c optional fdt arm_doorbell dev/mbox/mbox_if.m optional soc_brcm_bcm2837 dev/mmc/host/dwmmc_altera.c optional dwmmc dwmmc_altera fdt dev/mmc/host/dwmmc_hisi.c optional dwmmc dwmmc_hisi fdt dev/mmc/host/dwmmc_rockchip.c optional dwmmc rk_dwmmc fdt dev/neta/if_mvneta_fdt.c optional neta fdt dev/neta/if_mvneta.c optional neta mdio mii fdt dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofw_pci.c optional fdt pci dev/ofw/ofw_pcib.c optional fdt pci dev/pci/controller/pci_n1sdp.c optional pci_n1sdp acpi dev/pci/pci_host_generic.c optional pci dev/pci/pci_host_generic_acpi.c optional pci acpi dev/pci/pci_host_generic_den0115.c optional pci acpi dev/pci/pci_host_generic_fdt.c optional pci fdt dev/pci/pci_dw_mv.c optional pci fdt dev/pci/pci_dw.c optional pci fdt dev/pci/pci_dw_if.m optional pci fdt dev/psci/psci.c standard dev/psci/smccc_arm64.S standard dev/psci/smccc_errata.c standard dev/psci/smccc_trng.c standard dev/psci/smccc.c standard dev/pwm/controller/allwinner/aw_pwm.c optional fdt aw_pwm dev/pwm/controller//rockchip/rk_pwm.c optional fdt rk_pwm dev/random/armv8rng.c optional armv8_rng !random_loadable dev/safexcel/safexcel.c optional safexcel fdt dev/sdhci/sdhci_xenon.c optional sdhci_xenon sdhci dev/sdhci/sdhci_xenon_acpi.c optional sdhci_xenon sdhci acpi dev/sdhci/sdhci_xenon_fdt.c optional sdhci_xenon sdhci fdt dev/sdhci/sdhci_fdt_xilinx.c optional sdhci_xilinx sdhci fdt dev/sdhci/sdhci_fdt_rockchip.c optional sdhci fdt soc_rockchip dev/sram/mmio_sram.c optional fdt mmio_sram dev/sram/mmio_sram_if.m optional fdt mmio_sram dev/spibus/controller/allwinner/aw_spi.c optional fdt aw_spi dev/spibus/controller/rockchip/rk_spi.c optional fdt rk_spi dev/uart/uart_cpu_arm64.c optional uart dev/uart/uart_dev_mu.c optional uart uart_mu fdt dev/uart/uart_dev_pl011.c optional uart pl011 dev/usb/controller/dwc_otg_hisi.c optional dwcotg fdt soc_hisi_hi6220 dev/usb/controller/ehci_mv.c optional ehci_mv fdt dev/usb/controller/generic_ehci.c optional ehci dev/usb/controller/generic_ehci_acpi.c optional ehci acpi dev/usb/controller/generic_ehci_fdt.c optional ehci fdt dev/usb/controller/generic_ohci.c optional ohci fdt dev/usb/controller/generic_usb_if.m optional ohci fdt dev/usb/controller/musb_otg_allwinner.c optional musb fdt soc_allwinner_a64 dev/usb/controller/usb_nop_xceiv.c optional fdt dev/usb/controller/generic_xhci.c optional xhci dev/usb/controller/generic_xhci_acpi.c optional xhci acpi dev/usb/controller/generic_xhci_fdt.c optional xhci fdt dev/usb/controller/dwc3/dwc3.c optional xhci acpi dwc3 | xhci fdt dwc3 dev/usb/controller/dwc3/aw_dwc3.c optional xhci fdt dwc3 aw_dwc3 dev/usb/controller/dwc3/rk_dwc3.c optional xhci fdt dwc3 rk_dwc3 dev/vnic/mrml_bridge.c optional vnic fdt dev/vnic/nic_main.c optional vnic pci dev/vnic/nicvf_main.c optional vnic pci pci_iov dev/vnic/nicvf_queues.c optional vnic pci pci_iov dev/vnic/thunder_bgx_fdt.c optional soc_cavm_thunderx pci vnic fdt dev/vnic/thunder_bgx.c optional soc_cavm_thunderx pci vnic pci dev/vnic/thunder_mdio_fdt.c optional soc_cavm_thunderx pci vnic fdt dev/vnic/thunder_mdio.c optional soc_cavm_thunderx pci vnic dev/vnic/lmac_if.m optional inet | inet6 | vnic ## ## SoC Support ## # Allwinner common files arm/allwinner/a10_codec.c optional sound a10_codec fdt arm/allwinner/a31_dmac.c optional a31_dmac fdt arm/allwinner/a33_codec.c optional fdt sound a33_codec arm/allwinner/a64/sun50i_a64_acodec.c optional fdt sound a64_codec arm/allwinner/sunxi_dma_if.m optional a31_dmac arm/allwinner/aw_cir.c optional evdev aw_cir fdt arm/allwinner/aw_gpio.c optional gpio aw_gpio fdt arm/allwinner/aw_i2s.c optional fdt sound aw_i2s arm/allwinner/aw_mmc.c optional mmc aw_mmc fdt | mmccam aw_mmc fdt arm/allwinner/aw_nmi.c optional aw_nmi fdt \ compile-with "${NORMAL_C} -I$S/contrib/device-tree/include" arm/allwinner/aw_r_intc.c optional aw_r_intc fdt arm/allwinner/aw_rsb.c optional aw_rsb fdt arm/allwinner/aw_rtc.c optional aw_rtc fdt arm/allwinner/aw_sid.c optional aw_sid nvmem fdt arm/allwinner/aw_syscon.c optional aw_syscon syscon fdt arm/allwinner/aw_timer.c optional aw_timer fdt arm/allwinner/aw_thermal.c optional aw_thermal nvmem fdt arm/allwinner/aw_usbphy.c optional ehci aw_usbphy fdt arm/allwinner/aw_usb3phy.c optional xhci aw_usbphy fdt arm/allwinner/aw_wdog.c optional aw_wdog fdt arm/allwinner/axp81x.c optional axp81x fdt arm/allwinner/if_awg.c optional awg syscon aw_sid nvmem fdt # Allwinner clock driver dev/clk/allwinner/aw_ccung.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_frac.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_m.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_mipi.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_nkmp.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_nm.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_nmm.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_np.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_prediv_mux.c optional aw_ccu fdt dev/clk/allwinner/ccu_a64.c optional soc_allwinner_a64 aw_ccu fdt dev/clk/allwinner/ccu_h3.c optional soc_allwinner_h5 aw_ccu fdt dev/clk/allwinner/ccu_h6.c optional soc_allwinner_h6 aw_ccu fdt dev/clk/allwinner/ccu_h6_r.c optional soc_allwinner_h6 aw_ccu fdt dev/clk/allwinner/ccu_sun8i_r.c optional aw_ccu fdt dev/clk/allwinner/ccu_de2.c optional aw_ccu fdt # Allwinner padconf files arm/allwinner/a64/a64_padconf.c optional soc_allwinner_a64 fdt arm/allwinner/a64/a64_r_padconf.c optional soc_allwinner_a64 fdt arm/allwinner/h3/h3_padconf.c optional soc_allwinner_h5 fdt arm/allwinner/h3/h3_r_padconf.c optional soc_allwinner_h5 fdt arm/allwinner/h6/h6_padconf.c optional soc_allwinner_h6 fdt arm/allwinner/h6/h6_r_padconf.c optional soc_allwinner_h6 fdt # Altera/Intel arm64/intel/stratix10-soc-fpga-mgr.c optional soc_intel_stratix10 fdt arm64/intel/stratix10-svc.c optional soc_intel_stratix10 fdt # Annapurna arm/annapurna/alpine/alpine_ccu.c optional al_ccu fdt arm/annapurna/alpine/alpine_nb_service.c optional al_nb_service fdt arm/annapurna/alpine/alpine_pci.c optional al_pci fdt arm/annapurna/alpine/alpine_pci_msix.c optional al_pci fdt arm/annapurna/alpine/alpine_serdes.c optional al_serdes fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" # Apple arm64/apple/apple_aic.c optional soc_apple_t8103 fdt arm64/apple/apple_wdog.c optional soc_apple_t8103 fdt # Broadcom arm64/broadcom/brcmmdio/mdio_mux_iproc.c optional soc_brcm_ns2 fdt arm64/broadcom/brcmmdio/mdio_nexus_iproc.c optional soc_brcm_ns2 fdt arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c optional soc_brcm_ns2 fdt pci arm64/broadcom/genet/if_genet.c optional soc_brcm_bcm2838 fdt genet arm/broadcom/bcm2835/bcm2835_audio.c optional sound vchiq fdt \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" arm/broadcom/bcm2835/bcm2835_bsc.c optional bcm2835_bsc fdt arm/broadcom/bcm2835/bcm2835_clkman.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_cpufreq.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_dma.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_fbd.c optional vt soc_brcm_bcm2837 fdt | vt soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_firmware.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_ft5406.c optional evdev bcm2835_ft5406 fdt arm/broadcom/bcm2835/bcm2835_gpio.c optional gpio soc_brcm_bcm2837 fdt | gpio soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_intr.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_mbox.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_rng.c optional !random_loadable soc_brcm_bcm2837 fdt | !random_loadable soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_sdhci.c optional sdhci soc_brcm_bcm2837 fdt | sdhci soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_sdhost.c optional sdhci soc_brcm_bcm2837 fdt | sdhci soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_spi.c optional bcm2835_spi fdt arm/broadcom/bcm2835/bcm2835_vcbus.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_vcio.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_wdog.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2836.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm283x_dwc_fdt.c optional dwcotg fdt soc_brcm_bcm2837 | dwcotg fdt soc_brcm_bcm2838 arm/broadcom/bcm2835/bcm2838_pci.c optional soc_brcm_bcm2838 fdt pci arm/broadcom/bcm2835/bcm2838_xhci.c optional soc_brcm_bcm2838 fdt pci xhci arm/broadcom/bcm2835/raspberrypi_gpio.c optional soc_brcm_bcm2837 gpio fdt | soc_brcm_bcm2838 gpio fdt contrib/vchiq/interface/compat/vchi_bsd.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_arm.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_connected.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_core.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_kern_lib.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_shim.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_util.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" # Cavium arm64/cavium/thunder_pcie_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_pem.c optional soc_cavm_thunderx pci arm64/cavium/thunder_pcie_pem_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_common.c optional soc_cavm_thunderx pci # i.MX8 Clock support arm64/freescale/imx/imx_ccm.c optional fdt soc_freescale_imx8 arm64/freescale/imx/imx8mp_ccm.c optional fdt soc_freescale_imx8 arm64/freescale/imx/imx8mq_ccm.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_gate.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_mux.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_composite.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_sscg_pll.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_frac_pll.c optional fdt soc_freescale_imx8 # iMX drivers arm/freescale/imx/imx_gpio.c optional gpio soc_freescale_imx8 fdt arm/freescale/imx/imx_i2c.c optional fdt fsliic arm/freescale/imx/imx_machdep.c optional fdt soc_freescale_imx8 arm64/freescale/imx/imx7gpc.c optional fdt soc_freescale_imx8 dev/ffec/if_ffec.c optional fdt ffec # Marvell arm/mv/a37x0_gpio.c optional a37x0_gpio gpio fdt arm/mv/a37x0_iic.c optional a37x0_iic iicbus fdt arm/mv/a37x0_spi.c optional a37x0_spi spibus fdt arm/mv/clk/a37x0_tbg.c optional a37x0_tbg clk fdt syscon arm/mv/clk/a37x0_xtal.c optional a37x0_xtal clk fdt syscon arm/mv/armada38x/armada38x_rtc.c optional mv_rtc fdt arm/mv/gpio.c optional mv_gpio fdt arm/mv/mvebu_gpio.c optional mv_gpio fdt arm/mv/mvebu_pinctrl.c optional mvebu_pinctrl fdt arm/mv/mv_ap806_clock.c optional soc_marvell_8k fdt arm/mv/mv_ap806_gicp.c optional mv_ap806_gicp fdt arm/mv/mv_ap806_sei.c optional mv_ap806_sei fdt arm/mv/mv_cp110_clock.c optional soc_marvell_8k fdt arm/mv/mv_cp110_icu.c optional mv_cp110_icu fdt arm/mv/mv_cp110_icu_bus.c optional mv_cp110_icu fdt arm/mv/mv_thermal.c optional soc_marvell_8k mv_thermal fdt arm/mv/clk/a37x0_tbg_pll.c optional a37x0_tbg clk fdt syscon arm/mv/clk/a37x0_periph_clk_driver.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/a37x0_nb_periph_clk_driver.c optional a37x0_nb_periph clk fdt syscon arm/mv/clk/a37x0_sb_periph_clk_driver.c optional a37x0_sb_periph clk fdt syscon arm/mv/clk/periph.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_d.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_fixed.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_gate.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_mux_gate.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon # NVidia arm/nvidia/tegra_abpmisc.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_ahci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_efuse.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_ehci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_gpio.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_i2c.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_lic.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_mc.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_pcie.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_sdhci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_soctherm_if.m optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_soctherm.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_uart.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_usbphy.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_xhci.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_gpio.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_regulators.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_rtc.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_car.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_per.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_pll.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_super.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_coretemp.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_cpufreq.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_pinmux.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_pmc.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_xusbpadctl.c optional fdt soc_nvidia_tegra210 # Nvidia firmware for Tegra tegra210_xusb_fw.c optional tegra210_xusb_fw \ dependency "$S/conf/files.arm64" \ compile-with "${AWK} -f $S/tools/fw_stub.awk tegra210_xusb.fw:tegra210_xusb_fw -mtegra210_xusb_fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "tegra210_xusb_fw.c" tegra210_xusb.fwo optional tegra210_xusb_fw \ dependency "tegra210_xusb.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "tegra210_xusb.fwo" tegra210_xusb.fw optional tegra210_xusb_fw \ dependency "$S/contrib/dev/nvidia/tegra210_xusb.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "tegra210_xusb.fw" # NXP dev/iicbus/controller/vybrid/vf_i2c.c optional vf_i2c iicbus soc_nxp_ls dev/iicbus/controller/vybrid/vf_i2c_acpi.c optional vf_i2c iicbus acpi soc_nxp_ls dev/iicbus/controller/vybrid/vf_i2c_fdt.c optional vf_i2c iicbus fdt soc_nxp_ls arm64/qoriq/qoriq_dw_pci.c optional pci fdt soc_nxp_ls arm64/qoriq/qoriq_gpio_pic.c optional gpio fdt soc_nxp_ls arm64/qoriq/qoriq_therm.c optional pci fdt soc_nxp_ls arm64/qoriq/qoriq_therm_if.m optional pci fdt soc_nxp_ls arm64/qoriq/clk/ls1028a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/ls1028a_flexspi_clk.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/ls1046a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/ls1088a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/lx2160a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/qoriq_clk_pll.c optional clk soc_nxp_ls arm64/qoriq/clk/qoriq_clkgen.c optional clk soc_nxp_ls fdt dev/ahci/ahci_fsl_fdt.c optional soc_nxp_ls ahci fdt dev/flash/flexspi/flex_spi.c optional clk flex_spi soc_nxp_ls fdt # Qualcomm arm64/qualcomm/qcom_gcc.c optional qcom_gcc fdt dev/qcom_mdio/qcom_mdio_ipq4018.c optional qcom_mdio fdt mdio mii # RockChip Drivers arm64/rockchip/rk3328_codec.c optional fdt rk3328codec soc_rockchip_rk3328 arm64/rockchip/rk3399_emmcphy.c optional fdt rk_emmcphy soc_rockchip_rk3399 arm64/rockchip/rk3568_combphy.c optional fdt rk_combphy soc_rockchip_rk3568 arm64/rockchip/rk3568_pcie.c optional fdt pci soc_rockchip_rk3568 arm64/rockchip/rk3568_pciephy.c optional fdt pci soc_rockchip_rk3568 arm64/rockchip/rk_i2s.c optional fdt sound soc_rockchip_rk3328 | fdt sound soc_rockchip_rk3399 arm64/rockchip/rk_otp.c optional fdt soc_rockchip_rk3568 arm64/rockchip/rk_otp_if.m optional fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk8xx.c optional fdt rk8xx soc_rockchip dev/iicbus/pmic/rockchip/rk8xx_clocks.c optional fdt rk8xx soc_rockchip dev/iicbus/pmic/rockchip/rk8xx_regulators.c optional fdt rk8xx soc_rockchip dev/iicbus/pmic/rockchip/rk8xx_rtc.c optional fdt rk8xx soc_rockchip dev/iicbus/pmic/rockchip/rk805.c optional fdt rk805 soc_rockchip_rk3328 dev/iicbus/pmic/rockchip/rk808.c optional fdt rk808 soc_rockchip_rk3399 dev/iicbus/pmic/rockchip/rk817.c optional fdt rk817 soc_rockchip_rk3568 arm64/rockchip/rk_grf.c optional fdt soc_rockchip arm64/rockchip/rk_pinctrl.c optional fdt rk_pinctrl soc_rockchip arm64/rockchip/rk_gpio.c optional fdt rk_gpio soc_rockchip arm64/rockchip/rk_iodomain.c optional fdt rk_iodomain arm64/rockchip/rk_usb2phy.c optional fdt rk_usb2phy soc_rockchip arm64/rockchip/rk_typec_phy.c optional fdt rk_typec_phy soc_rockchip_rk3399 arm64/rockchip/rk_tsadc_if.m optional fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_tsadc.c optional fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_pcie.c optional fdt pci soc_rockchip_rk3399 arm64/rockchip/rk_pcie_phy.c optional fdt pci soc_rockchip_rk3399 # RockChip Clock support dev/clk/rockchip/rk_cru.c optional fdt soc_rockchip dev/clk/rockchip/rk_clk_armclk.c optional fdt soc_rockchip dev/clk/rockchip/rk_clk_composite.c optional fdt soc_rockchip dev/clk/rockchip/rk_clk_fract.c optional fdt soc_rockchip dev/clk/rockchip/rk_clk_gate.c optional fdt soc_rockchip dev/clk/rockchip/rk_clk_mux.c optional fdt soc_rockchip dev/clk/rockchip/rk_clk_pll.c optional fdt soc_rockchip dev/clk/rockchip/rk3328_cru.c optional fdt soc_rockchip_rk3328 dev/clk/rockchip/rk3399_cru.c optional fdt soc_rockchip_rk3399 dev/clk/rockchip/rk3399_pmucru.c optional fdt soc_rockchip_rk3399 dev/clk/rockchip/rk3568_cru.c optional fdt soc_rockchip_rk3568 dev/clk/rockchip/rk3568_pmucru.c optional fdt soc_rockchip_rk3568 # Xilinx arm/xilinx/uart_dev_cdnc.c optional uart soc_xilinx_zynq fdt arm/xilinx/zy7_gpio.c optional gpio soc_xilinx_zynq fdt dev/iicbus/controller/cadence/cdnc_i2c.c optional cdnc_i2c iicbus soc_xilinx_zynq fdt dev/usb/controller/xlnx_dwc3.c optional xhci soc_xilinx_zynq fdt dev/firmware/xilinx/zynqmp_firmware.c optional fdt soc_xilinx_zynq dev/firmware/xilinx/zynqmp_firmware_if.m optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clock.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clk_div.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clk_fixed.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clk_gate.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clk_mux.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clk_pll.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_reset.c optional fdt soc_xilinx_zynq diff --git a/sys/conf/files.riscv b/sys/conf/files.riscv index e84f1367680f..cc18ecb6eb36 100644 --- a/sys/conf/files.riscv +++ b/sys/conf/files.riscv @@ -1,99 +1,100 @@ cddl/dev/dtrace/riscv/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/riscv/dtrace_isa.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/dtrace/riscv/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/dtrace/riscv/instr_size.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/riscv/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" crypto/des/des_enc.c optional netsmb dev/cpufreq/cpufreq_dt.c optional cpufreq fdt dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofw_pcib.c optional pci fdt dev/pci/pci_dw.c optional pci fdt dev/pci/pci_dw_if.m optional pci fdt dev/pci/pci_host_generic.c optional pci dev/pci/pci_host_generic_fdt.c optional pci fdt dev/uart/uart_cpu_fdt.c optional uart fdt dev/uart/uart_dev_lowrisc.c optional uart_lowrisc dev/usb/controller/generic_ehci.c optional ehci dev/usb/controller/generic_ehci_fdt.c optional ehci fdt dev/usb/controller/generic_ohci.c optional ohci fdt dev/usb/controller/generic_usb_if.m optional ohci fdt dev/usb/controller/generic_xhci.c optional xhci dev/usb/controller/generic_xhci_fdt.c optional xhci fdt dev/vmm/vmm_dev.c optional vmm +dev/vmm/vmm_mem.c optional vmm dev/vmm/vmm_stat.c optional vmm dev/xilinx/axi_quad_spi.c optional xilinx_spi dev/xilinx/axidma.c optional axidma xdma dev/xilinx/if_xae.c optional xae dev/xilinx/xlnx_pcib.c optional pci fdt xlnx_pcib kern/msi_if.m standard kern/pic_if.m standard kern/subr_devmap.c standard kern/subr_dummy_vdso_tc.c standard kern/subr_intr.c standard kern/subr_physmem.c standard libkern/bcopy.c standard libkern/memcmp.c standard libkern/memset.c standard libkern/strcmp.c standard libkern/strlen.c standard libkern/strncmp.c standard riscv/riscv/aplic.c standard riscv/riscv/autoconf.c standard riscv/riscv/bus_machdep.c standard riscv/riscv/bus_space_asm.S standard riscv/riscv/busdma_bounce.c standard riscv/riscv/busdma_machdep.c standard riscv/riscv/cache.c standard riscv/riscv/clock.c standard riscv/riscv/copyinout.S standard riscv/riscv/cpufunc_asm.S standard riscv/riscv/db_disasm.c optional ddb riscv/riscv/db_interface.c optional ddb riscv/riscv/db_trace.c optional ddb riscv/riscv/dump_machdep.c standard riscv/riscv/elf_machdep.c standard riscv/riscv/exception.S standard riscv/riscv/exec_machdep.c standard riscv/riscv/fpe.c standard riscv/riscv/gdb_machdep.c optional gdb riscv/riscv/intc.c standard riscv/riscv/identcpu.c standard riscv/riscv/locore.S standard no-obj riscv/riscv/machdep.c standard riscv/riscv/minidump_machdep.c standard riscv/riscv/mp_machdep.c optional smp riscv/riscv/mem.c standard riscv/riscv/nexus.c standard riscv/riscv/ofw_machdep.c optional fdt riscv/riscv/plic.c standard riscv/riscv/pmap.c standard riscv/riscv/ptrace_machdep.c standard riscv/riscv/riscv_console.c optional rcons riscv/riscv/riscv_syscon.c optional syscon riscv_syscon fdt riscv/riscv/sigtramp.S standard riscv/riscv/sbi.c standard riscv/riscv/sbi_ipi.c optional smp riscv/riscv/sdt_machdep.c optional kdtrace_hooks riscv/riscv/stack_machdep.c optional ddb | stack riscv/riscv/support.S standard riscv/riscv/swtch.S standard riscv/riscv/sys_machdep.c standard riscv/riscv/trap.c standard riscv/riscv/timer.c standard riscv/riscv/uio_machdep.c standard riscv/riscv/unwind.c optional ddb | kdtrace_hooks | stack riscv/riscv/vm_machdep.c standard riscv/vmm/vmm.c optional vmm riscv/vmm/vmm_aplic.c optional vmm riscv/vmm/vmm_dev_machdep.c optional vmm riscv/vmm/vmm_fence.c optional vmm riscv/vmm/vmm_instruction_emul.c optional vmm riscv/vmm/vmm_riscv.c optional vmm riscv/vmm/vmm_sbi.c optional vmm riscv/vmm/vmm_switch.S optional vmm riscv/vmm/vmm_vtimer.c optional vmm riscv/thead/thead.c standard # Zstd contrib/zstd/lib/freebsd/zstd_kfreebsd.c optional zstdio compile-with ${ZSTD_C} diff --git a/sys/dev/vmm/vmm_dev.c b/sys/dev/vmm/vmm_dev.c index 27c960c8ef2e..1ffa15dd157b 100644 --- a/sys/dev/vmm/vmm_dev.c +++ b/sys/dev/vmm/vmm_dev.c @@ -1,1109 +1,1110 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 NetApp, Inc. * Copyright (C) 2015 Mihai Carabas * All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #if defined(__amd64__) && defined(COMPAT_FREEBSD12) struct vm_memseg_12 { int segid; size_t len; char name[64]; }; _Static_assert(sizeof(struct vm_memseg_12) == 80, "COMPAT_FREEBSD12 ABI"); #define VM_ALLOC_MEMSEG_12 \ _IOW('v', IOCNUM_ALLOC_MEMSEG, struct vm_memseg_12) #define VM_GET_MEMSEG_12 \ _IOWR('v', IOCNUM_GET_MEMSEG, struct vm_memseg_12) #endif struct devmem_softc { int segid; char *name; struct cdev *cdev; struct vmmdev_softc *sc; SLIST_ENTRY(devmem_softc) link; }; struct vmmdev_softc { struct vm *vm; /* vm instance cookie */ struct cdev *cdev; struct ucred *ucred; SLIST_ENTRY(vmmdev_softc) link; SLIST_HEAD(, devmem_softc) devmem; int flags; }; static SLIST_HEAD(, vmmdev_softc) head; static unsigned pr_allow_flag; static struct sx vmmdev_mtx; SX_SYSINIT(vmmdev_mtx, &vmmdev_mtx, "vmm device mutex"); static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev"); SYSCTL_DECL(_hw_vmm); static void devmem_destroy(void *arg); static int devmem_create_cdev(struct vmmdev_softc *sc, int id, char *devmem); static int vmm_priv_check(struct ucred *ucred) { if (jailed(ucred) && !(ucred->cr_prison->pr_allow & pr_allow_flag)) return (EPERM); return (0); } static int vcpu_lock_one(struct vcpu *vcpu) { return (vcpu_set_state(vcpu, VCPU_FROZEN, true)); } static void vcpu_unlock_one(struct vcpu *vcpu) { enum vcpu_state state; state = vcpu_get_state(vcpu, NULL); if (state != VCPU_FROZEN) { panic("vcpu %s(%d) has invalid state %d", vm_name(vcpu_vm(vcpu)), vcpu_vcpuid(vcpu), state); } vcpu_set_state(vcpu, VCPU_IDLE, false); } static int vcpu_lock_all(struct vmmdev_softc *sc) { struct vcpu *vcpu; int error; uint16_t i, j, maxcpus; error = 0; vm_slock_vcpus(sc->vm); maxcpus = vm_get_maxcpus(sc->vm); for (i = 0; i < maxcpus; i++) { vcpu = vm_vcpu(sc->vm, i); if (vcpu == NULL) continue; error = vcpu_lock_one(vcpu); if (error) break; } if (error) { for (j = 0; j < i; j++) { vcpu = vm_vcpu(sc->vm, j); if (vcpu == NULL) continue; vcpu_unlock_one(vcpu); } vm_unlock_vcpus(sc->vm); } return (error); } static void vcpu_unlock_all(struct vmmdev_softc *sc) { struct vcpu *vcpu; uint16_t i, maxcpus; maxcpus = vm_get_maxcpus(sc->vm); for (i = 0; i < maxcpus; i++) { vcpu = vm_vcpu(sc->vm, i); if (vcpu == NULL) continue; vcpu_unlock_one(vcpu); } vm_unlock_vcpus(sc->vm); } static struct vmmdev_softc * vmmdev_lookup(const char *name, struct ucred *cred) { struct vmmdev_softc *sc; sx_assert(&vmmdev_mtx, SA_XLOCKED); SLIST_FOREACH(sc, &head, link) { if (strcmp(name, vm_name(sc->vm)) == 0) break; } if (sc == NULL) return (NULL); if (cr_cansee(cred, sc->ucred)) return (NULL); return (sc); } static struct vmmdev_softc * vmmdev_lookup2(struct cdev *cdev) { return (cdev->si_drv1); } static int vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags) { int error, off, c, prot; vm_paddr_t gpa, maxaddr; void *hpa, *cookie; struct vmmdev_softc *sc; sc = vmmdev_lookup2(cdev); if (sc == NULL) return (ENXIO); /* * Get a read lock on the guest memory map. */ vm_slock_memsegs(sc->vm); error = 0; prot = (uio->uio_rw == UIO_WRITE ? VM_PROT_WRITE : VM_PROT_READ); maxaddr = vmm_sysmem_maxaddr(sc->vm); while (uio->uio_resid > 0 && error == 0) { gpa = uio->uio_offset; off = gpa & PAGE_MASK; c = min(uio->uio_resid, PAGE_SIZE - off); /* * The VM has a hole in its physical memory map. If we want to * use 'dd' to inspect memory beyond the hole we need to * provide bogus data for memory that lies in the hole. * * Since this device does not support lseek(2), dd(1) will * read(2) blocks of data to simulate the lseek(2). */ hpa = vm_gpa_hold_global(sc->vm, gpa, c, prot, &cookie); if (hpa == NULL) { if (uio->uio_rw == UIO_READ && gpa < maxaddr) error = uiomove(__DECONST(void *, zero_region), c, uio); else error = EFAULT; } else { error = uiomove(hpa, c, uio); vm_gpa_release(cookie); } } vm_unlock_memsegs(sc->vm); return (error); } CTASSERT(sizeof(((struct vm_memseg *)0)->name) >= VM_MAX_SUFFIXLEN + 1); static int get_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) { struct devmem_softc *dsc; int error; bool sysmem; error = vm_get_memseg(sc->vm, mseg->segid, &mseg->len, &sysmem, NULL); if (error || mseg->len == 0) return (error); if (!sysmem) { SLIST_FOREACH(dsc, &sc->devmem, link) { if (dsc->segid == mseg->segid) break; } KASSERT(dsc != NULL, ("%s: devmem segment %d not found", __func__, mseg->segid)); error = copystr(dsc->name, mseg->name, len, NULL); } else { bzero(mseg->name, len); } return (error); } static int alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) { char *name; int error; bool sysmem; error = 0; name = NULL; sysmem = true; /* * The allocation is lengthened by 1 to hold a terminating NUL. It'll * by stripped off when devfs processes the full string. */ if (VM_MEMSEG_NAME(mseg)) { sysmem = false; name = malloc(len, M_VMMDEV, M_WAITOK); error = copystr(mseg->name, name, len, NULL); if (error) goto done; } error = vm_alloc_memseg(sc->vm, mseg->segid, mseg->len, sysmem); if (error) goto done; if (VM_MEMSEG_NAME(mseg)) { error = devmem_create_cdev(sc, mseg->segid, name); if (error) vm_free_memseg(sc->vm, mseg->segid); else name = NULL; /* freed when 'cdev' is destroyed */ } done: free(name, M_VMMDEV); return (error); } static int vm_get_register_set(struct vcpu *vcpu, unsigned int count, int *regnum, uint64_t *regval) { int error, i; error = 0; for (i = 0; i < count; i++) { error = vm_get_register(vcpu, regnum[i], ®val[i]); if (error) break; } return (error); } static int vm_set_register_set(struct vcpu *vcpu, unsigned int count, int *regnum, uint64_t *regval) { int error, i; error = 0; for (i = 0; i < count; i++) { error = vm_set_register(vcpu, regnum[i], regval[i]); if (error) break; } return (error); } static int vmmdev_open(struct cdev *dev, int flags, int fmt, struct thread *td) { int error; /* * A jail without vmm access shouldn't be able to access vmm device * files at all, but check here just to be thorough. */ error = vmm_priv_check(td->td_ucred); if (error != 0) return (error); return (0); } static const struct vmmdev_ioctl vmmdev_ioctls[] = { VMMDEV_IOCTL(VM_GET_REGISTER, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_SET_REGISTER, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_GET_REGISTER_SET, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_SET_REGISTER_SET, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_GET_CAPABILITY, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_SET_CAPABILITY, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_ACTIVATE_CPU, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_INJECT_EXCEPTION, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_STATS, VMMDEV_IOCTL_LOCK_ONE_VCPU), #if defined(__amd64__) && defined(COMPAT_FREEBSD12) VMMDEV_IOCTL(VM_ALLOC_MEMSEG_12, VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), #endif VMMDEV_IOCTL(VM_ALLOC_MEMSEG, VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), VMMDEV_IOCTL(VM_MMAP_MEMSEG, VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), VMMDEV_IOCTL(VM_MUNMAP_MEMSEG, VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), VMMDEV_IOCTL(VM_REINIT, VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), #if defined(__amd64__) && defined(COMPAT_FREEBSD12) VMMDEV_IOCTL(VM_GET_MEMSEG_12, VMMDEV_IOCTL_SLOCK_MEMSEGS), #endif VMMDEV_IOCTL(VM_GET_MEMSEG, VMMDEV_IOCTL_SLOCK_MEMSEGS), VMMDEV_IOCTL(VM_MMAP_GETNEXT, VMMDEV_IOCTL_SLOCK_MEMSEGS), VMMDEV_IOCTL(VM_SUSPEND_CPU, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU), VMMDEV_IOCTL(VM_RESUME_CPU, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU), VMMDEV_IOCTL(VM_SUSPEND, 0), VMMDEV_IOCTL(VM_GET_CPUS, 0), VMMDEV_IOCTL(VM_GET_TOPOLOGY, 0), VMMDEV_IOCTL(VM_SET_TOPOLOGY, 0), }; static int vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct vmmdev_softc *sc; struct vcpu *vcpu; const struct vmmdev_ioctl *ioctl; int error, vcpuid; sc = vmmdev_lookup2(cdev); if (sc == NULL) return (ENXIO); ioctl = NULL; for (size_t i = 0; i < nitems(vmmdev_ioctls); i++) { if (vmmdev_ioctls[i].cmd == cmd) { ioctl = &vmmdev_ioctls[i]; break; } } if (ioctl == NULL) { for (size_t i = 0; i < vmmdev_machdep_ioctl_count; i++) { if (vmmdev_machdep_ioctls[i].cmd == cmd) { ioctl = &vmmdev_machdep_ioctls[i]; break; } } } if (ioctl == NULL) return (ENOTTY); if ((ioctl->flags & VMMDEV_IOCTL_XLOCK_MEMSEGS) != 0) vm_xlock_memsegs(sc->vm); else if ((ioctl->flags & VMMDEV_IOCTL_SLOCK_MEMSEGS) != 0) vm_slock_memsegs(sc->vm); vcpu = NULL; vcpuid = -1; if ((ioctl->flags & (VMMDEV_IOCTL_LOCK_ONE_VCPU | VMMDEV_IOCTL_ALLOC_VCPU | VMMDEV_IOCTL_MAYBE_ALLOC_VCPU)) != 0) { vcpuid = *(int *)data; if (vcpuid == -1) { if ((ioctl->flags & VMMDEV_IOCTL_MAYBE_ALLOC_VCPU) == 0) { error = EINVAL; goto lockfail; } } else { vcpu = vm_alloc_vcpu(sc->vm, vcpuid); if (vcpu == NULL) { error = EINVAL; goto lockfail; } if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ONE_VCPU) != 0) { error = vcpu_lock_one(vcpu); if (error) goto lockfail; } } } if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ALL_VCPUS) != 0) { error = vcpu_lock_all(sc); if (error) goto lockfail; } switch (cmd) { case VM_SUSPEND: { struct vm_suspend *vmsuspend; vmsuspend = (struct vm_suspend *)data; error = vm_suspend(sc->vm, vmsuspend->how); break; } case VM_REINIT: error = vm_reinit(sc->vm); break; case VM_STAT_DESC: { struct vm_stat_desc *statdesc; statdesc = (struct vm_stat_desc *)data; error = vmm_stat_desc_copy(statdesc->index, statdesc->desc, sizeof(statdesc->desc)); break; } case VM_STATS: { struct vm_stats *vmstats; vmstats = (struct vm_stats *)data; getmicrotime(&vmstats->tv); error = vmm_stat_copy(vcpu, vmstats->index, nitems(vmstats->statbuf), &vmstats->num_entries, vmstats->statbuf); break; } case VM_MMAP_GETNEXT: { struct vm_memmap *mm; mm = (struct vm_memmap *)data; error = vm_mmap_getnext(sc->vm, &mm->gpa, &mm->segid, &mm->segoff, &mm->len, &mm->prot, &mm->flags); break; } case VM_MMAP_MEMSEG: { struct vm_memmap *mm; mm = (struct vm_memmap *)data; error = vm_mmap_memseg(sc->vm, mm->gpa, mm->segid, mm->segoff, mm->len, mm->prot, mm->flags); break; } case VM_MUNMAP_MEMSEG: { struct vm_munmap *mu; mu = (struct vm_munmap *)data; error = vm_munmap_memseg(sc->vm, mu->gpa, mu->len); break; } #if defined(__amd64__) && defined(COMPAT_FREEBSD12) case VM_ALLOC_MEMSEG_12: error = alloc_memseg(sc, (struct vm_memseg *)data, sizeof(((struct vm_memseg_12 *)0)->name)); break; case VM_GET_MEMSEG_12: error = get_memseg(sc, (struct vm_memseg *)data, sizeof(((struct vm_memseg_12 *)0)->name)); break; #endif case VM_ALLOC_MEMSEG: error = alloc_memseg(sc, (struct vm_memseg *)data, sizeof(((struct vm_memseg *)0)->name)); break; case VM_GET_MEMSEG: error = get_memseg(sc, (struct vm_memseg *)data, sizeof(((struct vm_memseg *)0)->name)); break; case VM_GET_REGISTER: { struct vm_register *vmreg; vmreg = (struct vm_register *)data; error = vm_get_register(vcpu, vmreg->regnum, &vmreg->regval); break; } case VM_SET_REGISTER: { struct vm_register *vmreg; vmreg = (struct vm_register *)data; error = vm_set_register(vcpu, vmreg->regnum, vmreg->regval); break; } case VM_GET_REGISTER_SET: { struct vm_register_set *vmregset; uint64_t *regvals; int *regnums; vmregset = (struct vm_register_set *)data; if (vmregset->count > VM_REG_LAST) { error = EINVAL; break; } regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV, M_WAITOK); regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV, M_WAITOK); error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) * vmregset->count); if (error == 0) error = vm_get_register_set(vcpu, vmregset->count, regnums, regvals); if (error == 0) error = copyout(regvals, vmregset->regvals, sizeof(regvals[0]) * vmregset->count); free(regvals, M_VMMDEV); free(regnums, M_VMMDEV); break; } case VM_SET_REGISTER_SET: { struct vm_register_set *vmregset; uint64_t *regvals; int *regnums; vmregset = (struct vm_register_set *)data; if (vmregset->count > VM_REG_LAST) { error = EINVAL; break; } regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV, M_WAITOK); regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV, M_WAITOK); error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) * vmregset->count); if (error == 0) error = copyin(vmregset->regvals, regvals, sizeof(regvals[0]) * vmregset->count); if (error == 0) error = vm_set_register_set(vcpu, vmregset->count, regnums, regvals); free(regvals, M_VMMDEV); free(regnums, M_VMMDEV); break; } case VM_GET_CAPABILITY: { struct vm_capability *vmcap; vmcap = (struct vm_capability *)data; error = vm_get_capability(vcpu, vmcap->captype, &vmcap->capval); break; } case VM_SET_CAPABILITY: { struct vm_capability *vmcap; vmcap = (struct vm_capability *)data; error = vm_set_capability(vcpu, vmcap->captype, vmcap->capval); break; } case VM_ACTIVATE_CPU: error = vm_activate_cpu(vcpu); break; case VM_GET_CPUS: { struct vm_cpuset *vm_cpuset; cpuset_t *cpuset; int size; error = 0; vm_cpuset = (struct vm_cpuset *)data; size = vm_cpuset->cpusetsize; if (size < 1 || size > CPU_MAXSIZE / NBBY) { error = ERANGE; break; } cpuset = malloc(max(size, sizeof(cpuset_t)), M_TEMP, M_WAITOK | M_ZERO); if (vm_cpuset->which == VM_ACTIVE_CPUS) *cpuset = vm_active_cpus(sc->vm); else if (vm_cpuset->which == VM_SUSPENDED_CPUS) *cpuset = vm_suspended_cpus(sc->vm); else if (vm_cpuset->which == VM_DEBUG_CPUS) *cpuset = vm_debug_cpus(sc->vm); else error = EINVAL; if (error == 0 && size < howmany(CPU_FLS(cpuset), NBBY)) error = ERANGE; if (error == 0) error = copyout(cpuset, vm_cpuset->cpus, size); free(cpuset, M_TEMP); break; } case VM_SUSPEND_CPU: error = vm_suspend_cpu(sc->vm, vcpu); break; case VM_RESUME_CPU: error = vm_resume_cpu(sc->vm, vcpu); break; case VM_SET_TOPOLOGY: { struct vm_cpu_topology *topology; topology = (struct vm_cpu_topology *)data; error = vm_set_topology(sc->vm, topology->sockets, topology->cores, topology->threads, topology->maxcpus); break; } case VM_GET_TOPOLOGY: { struct vm_cpu_topology *topology; topology = (struct vm_cpu_topology *)data; vm_get_topology(sc->vm, &topology->sockets, &topology->cores, &topology->threads, &topology->maxcpus); error = 0; break; } default: error = vmmdev_machdep_ioctl(sc->vm, vcpu, cmd, data, fflag, td); break; } if ((ioctl->flags & (VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_SLOCK_MEMSEGS)) != 0) vm_unlock_memsegs(sc->vm); if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ALL_VCPUS) != 0) vcpu_unlock_all(sc); else if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ONE_VCPU) != 0) vcpu_unlock_one(vcpu); /* * Make sure that no handler returns a kernel-internal * error value to userspace. */ KASSERT(error == ERESTART || error >= 0, ("vmmdev_ioctl: invalid error return %d", error)); return (error); lockfail: if ((ioctl->flags & (VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_SLOCK_MEMSEGS)) != 0) vm_unlock_memsegs(sc->vm); return (error); } static int vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t mapsize, struct vm_object **objp, int nprot) { struct vmmdev_softc *sc; vm_paddr_t gpa; size_t len; vm_ooffset_t segoff, first, last; int error, found, segid; bool sysmem; first = *offset; last = first + mapsize; if ((nprot & PROT_EXEC) || first < 0 || first >= last) return (EINVAL); sc = vmmdev_lookup2(cdev); if (sc == NULL) { /* virtual machine is in the process of being created */ return (EINVAL); } /* * Get a read lock on the guest memory map. */ vm_slock_memsegs(sc->vm); gpa = 0; found = 0; while (!found) { error = vm_mmap_getnext(sc->vm, &gpa, &segid, &segoff, &len, NULL, NULL); if (error) break; if (first >= gpa && last <= gpa + len) found = 1; else gpa += len; } if (found) { error = vm_get_memseg(sc->vm, segid, &len, &sysmem, objp); KASSERT(error == 0 && *objp != NULL, ("%s: invalid memory segment %d", __func__, segid)); if (sysmem) { vm_object_reference(*objp); *offset = segoff + (first - gpa); } else { error = EINVAL; } } vm_unlock_memsegs(sc->vm); return (error); } static void vmmdev_destroy(struct vmmdev_softc *sc) { struct devmem_softc *dsc; int error __diagused; KASSERT(sc->cdev == NULL, ("%s: cdev not free", __func__)); /* * Destroy all cdevs: * * - any new operations on the 'cdev' will return an error (ENXIO). * * - the 'devmem' cdevs are destroyed before the virtual machine 'cdev' */ SLIST_FOREACH(dsc, &sc->devmem, link) { KASSERT(dsc->cdev != NULL, ("devmem cdev already destroyed")); devmem_destroy(dsc); } vm_disable_vcpu_creation(sc->vm); error = vcpu_lock_all(sc); KASSERT(error == 0, ("%s: error %d freezing vcpus", __func__, error)); vm_unlock_vcpus(sc->vm); while ((dsc = SLIST_FIRST(&sc->devmem)) != NULL) { KASSERT(dsc->cdev == NULL, ("%s: devmem not free", __func__)); SLIST_REMOVE_HEAD(&sc->devmem, link); free(dsc->name, M_VMMDEV); free(dsc, M_VMMDEV); } if (sc->vm != NULL) vm_destroy(sc->vm); if (sc->ucred != NULL) crfree(sc->ucred); sx_xlock(&vmmdev_mtx); SLIST_REMOVE(&head, sc, vmmdev_softc, link); sx_xunlock(&vmmdev_mtx); free(sc, M_VMMDEV); } static int vmmdev_lookup_and_destroy(const char *name, struct ucred *cred) { struct cdev *cdev; struct vmmdev_softc *sc; sx_xlock(&vmmdev_mtx); sc = vmmdev_lookup(name, cred); if (sc == NULL || sc->cdev == NULL) { sx_xunlock(&vmmdev_mtx); return (EINVAL); } /* * Setting 'sc->cdev' to NULL is used to indicate that the VM * is scheduled for destruction. */ cdev = sc->cdev; sc->cdev = NULL; sx_xunlock(&vmmdev_mtx); destroy_dev(cdev); vmmdev_destroy(sc); return (0); } static int sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS) { char *buf; int error, buflen; error = vmm_priv_check(req->td->td_ucred); if (error) return (error); buflen = VM_MAX_NAMELEN + 1; buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO); strlcpy(buf, "beavis", buflen); error = sysctl_handle_string(oidp, buf, buflen, req); if (error == 0 && req->newptr != NULL) error = vmmdev_lookup_and_destroy(buf, req->td->td_ucred); free(buf, M_VMMDEV); return (error); } SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE, NULL, 0, sysctl_vmm_destroy, "A", NULL); static struct cdevsw vmmdevsw = { .d_name = "vmmdev", .d_version = D_VERSION, .d_open = vmmdev_open, .d_ioctl = vmmdev_ioctl, .d_mmap_single = vmmdev_mmap_single, .d_read = vmmdev_rw, .d_write = vmmdev_rw, }; static struct vmmdev_softc * vmmdev_alloc(struct vm *vm, struct ucred *cred) { struct vmmdev_softc *sc; sc = malloc(sizeof(*sc), M_VMMDEV, M_WAITOK | M_ZERO); SLIST_INIT(&sc->devmem); sc->vm = vm; sc->ucred = crhold(cred); return (sc); } static int vmmdev_create(const char *name, struct ucred *cred) { struct make_dev_args mda; struct cdev *cdev; struct vmmdev_softc *sc; struct vm *vm; int error; sx_xlock(&vmmdev_mtx); sc = vmmdev_lookup(name, cred); if (sc != NULL) { sx_xunlock(&vmmdev_mtx); return (EEXIST); } error = vm_create(name, &vm); if (error != 0) { sx_xunlock(&vmmdev_mtx); return (error); } sc = vmmdev_alloc(vm, cred); SLIST_INSERT_HEAD(&head, sc, link); make_dev_args_init(&mda); mda.mda_devsw = &vmmdevsw; mda.mda_cr = sc->ucred; mda.mda_uid = UID_ROOT; mda.mda_gid = GID_WHEEL; mda.mda_mode = 0600; mda.mda_si_drv1 = sc; mda.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; error = make_dev_s(&mda, &cdev, "vmm/%s", name); if (error != 0) { sx_xunlock(&vmmdev_mtx); vmmdev_destroy(sc); return (error); } sc->cdev = cdev; sx_xunlock(&vmmdev_mtx); return (0); } static int sysctl_vmm_create(SYSCTL_HANDLER_ARGS) { char *buf; int error, buflen; error = vmm_priv_check(req->td->td_ucred); if (error != 0) return (error); buflen = VM_MAX_NAMELEN + 1; buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO); strlcpy(buf, "beavis", buflen); error = sysctl_handle_string(oidp, buf, buflen, req); if (error == 0 && req->newptr != NULL) error = vmmdev_create(buf, req->td->td_ucred); free(buf, M_VMMDEV); return (error); } SYSCTL_PROC(_hw_vmm, OID_AUTO, create, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE, NULL, 0, sysctl_vmm_create, "A", NULL); static int vmmctl_open(struct cdev *cdev, int flags, int fmt, struct thread *td) { int error; error = vmm_priv_check(td->td_ucred); if (error != 0) return (error); if ((flags & FWRITE) == 0) return (EPERM); return (0); } static int vmmctl_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { int error; switch (cmd) { case VMMCTL_VM_CREATE: { struct vmmctl_vm_create *vmc; vmc = (struct vmmctl_vm_create *)data; vmc->name[VM_MAX_NAMELEN] = '\0'; for (size_t i = 0; i < nitems(vmc->reserved); i++) { if (vmc->reserved[i] != 0) { error = EINVAL; return (error); } } error = vmmdev_create(vmc->name, td->td_ucred); break; } case VMMCTL_VM_DESTROY: { struct vmmctl_vm_destroy *vmd; vmd = (struct vmmctl_vm_destroy *)data; vmd->name[VM_MAX_NAMELEN] = '\0'; for (size_t i = 0; i < nitems(vmd->reserved); i++) { if (vmd->reserved[i] != 0) { error = EINVAL; return (error); } } error = vmmdev_lookup_and_destroy(vmd->name, td->td_ucred); break; } default: error = ENOTTY; break; } return (error); } static struct cdev *vmmctl_cdev; static struct cdevsw vmmctlsw = { .d_name = "vmmctl", .d_version = D_VERSION, .d_open = vmmctl_open, .d_ioctl = vmmctl_ioctl, }; int vmmdev_init(void) { int error; sx_xlock(&vmmdev_mtx); error = make_dev_p(MAKEDEV_CHECKNAME, &vmmctl_cdev, &vmmctlsw, NULL, UID_ROOT, GID_WHEEL, 0600, "vmmctl"); if (error == 0) pr_allow_flag = prison_add_allow(NULL, "vmm", NULL, "Allow use of vmm in a jail."); sx_xunlock(&vmmdev_mtx); return (error); } int vmmdev_cleanup(void) { sx_xlock(&vmmdev_mtx); if (!SLIST_EMPTY(&head)) { sx_xunlock(&vmmdev_mtx); return (EBUSY); } if (vmmctl_cdev != NULL) { destroy_dev(vmmctl_cdev); vmmctl_cdev = NULL; } sx_xunlock(&vmmdev_mtx); return (0); } static int devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t len, struct vm_object **objp, int nprot) { struct devmem_softc *dsc; vm_ooffset_t first, last; size_t seglen; int error; bool sysmem; dsc = cdev->si_drv1; if (dsc == NULL) { /* 'cdev' has been created but is not ready for use */ return (ENXIO); } first = *offset; last = *offset + len; if ((nprot & PROT_EXEC) || first < 0 || first >= last) return (EINVAL); vm_slock_memsegs(dsc->sc->vm); error = vm_get_memseg(dsc->sc->vm, dsc->segid, &seglen, &sysmem, objp); KASSERT(error == 0 && !sysmem && *objp != NULL, ("%s: invalid devmem segment %d", __func__, dsc->segid)); if (seglen >= last) vm_object_reference(*objp); else error = EINVAL; vm_unlock_memsegs(dsc->sc->vm); return (error); } static struct cdevsw devmemsw = { .d_name = "devmem", .d_version = D_VERSION, .d_mmap_single = devmem_mmap_single, }; static int devmem_create_cdev(struct vmmdev_softc *sc, int segid, char *devname) { struct make_dev_args mda; struct devmem_softc *dsc; int error; sx_xlock(&vmmdev_mtx); dsc = malloc(sizeof(struct devmem_softc), M_VMMDEV, M_WAITOK | M_ZERO); dsc->segid = segid; dsc->name = devname; dsc->sc = sc; SLIST_INSERT_HEAD(&sc->devmem, dsc, link); make_dev_args_init(&mda); mda.mda_devsw = &devmemsw; mda.mda_cr = sc->ucred; mda.mda_uid = UID_ROOT; mda.mda_gid = GID_WHEEL; mda.mda_mode = 0600; mda.mda_si_drv1 = dsc; mda.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; error = make_dev_s(&mda, &dsc->cdev, "vmm.io/%s.%s", vm_name(sc->vm), devname); if (error != 0) { SLIST_REMOVE(&sc->devmem, dsc, devmem_softc, link); free(dsc->name, M_VMMDEV); free(dsc, M_VMMDEV); } sx_xunlock(&vmmdev_mtx); return (error); } static void devmem_destroy(void *arg) { struct devmem_softc *dsc = arg; destroy_dev(dsc->cdev); dsc->cdev = NULL; dsc->sc = NULL; } diff --git a/sys/dev/vmm/vmm_mem.c b/sys/dev/vmm/vmm_mem.c new file mode 100644 index 000000000000..c61ae2d44b96 --- /dev/null +++ b/sys/dev/vmm/vmm_mem.c @@ -0,0 +1,459 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2011 NetApp, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static void vm_free_memmap(struct vm *vm, int ident); + +void +vm_mem_init(struct vm_mem *mem) +{ + sx_init(&mem->mem_segs_lock, "vm_mem_segs"); +} + +static bool +sysmem_mapping(struct vm_mem *mem, int idx) +{ + if (mem->mem_maps[idx].len != 0 && + mem->mem_segs[mem->mem_maps[idx].segid].sysmem) + return (true); + else + return (false); +} + +bool +vm_memseg_sysmem(struct vm *vm, int ident) +{ + struct vm_mem *mem; + + mem = vm_mem(vm); + vm_assert_memseg_locked(vm); + + if (ident < 0 || ident >= VM_MAX_MEMSEGS) + return (false); + + return (mem->mem_segs[ident].sysmem); +} + +void +vm_mem_cleanup(struct vm *vm) +{ + struct vm_mem *mem; + + mem = vm_mem(vm); + + /* + * System memory is removed from the guest address space only when + * the VM is destroyed. This is because the mapping remains the same + * across VM reset. + * + * Device memory can be relocated by the guest (e.g. using PCI BARs) + * so those mappings are removed on a VM reset. + */ + for (int i = 0; i < VM_MAX_MEMMAPS; i++) { + if (!sysmem_mapping(mem, i)) + vm_free_memmap(vm, i); + } +} + +void +vm_mem_destroy(struct vm *vm) +{ + struct vm_mem *mem; + + mem = vm_mem(vm); + vm_assert_memseg_xlocked(vm); + + for (int i = 0; i < VM_MAX_MEMMAPS; i++) { + if (sysmem_mapping(mem, i)) + vm_free_memmap(vm, i); + } + + for (int i = 0; i < VM_MAX_MEMSEGS; i++) + vm_free_memseg(vm, i); + + sx_xunlock(&mem->mem_segs_lock); + sx_destroy(&mem->mem_segs_lock); +} + +void +vm_slock_memsegs(struct vm *vm) +{ + sx_slock(&vm_mem(vm)->mem_segs_lock); +} + +void +vm_xlock_memsegs(struct vm *vm) +{ + sx_xlock(&vm_mem(vm)->mem_segs_lock); +} + +void +vm_unlock_memsegs(struct vm *vm) +{ + sx_unlock(&vm_mem(vm)->mem_segs_lock); +} + +void +vm_assert_memseg_locked(struct vm *vm) +{ + sx_assert(&vm_mem(vm)->mem_segs_lock, SX_LOCKED); +} + +void +vm_assert_memseg_xlocked(struct vm *vm) +{ + sx_assert(&vm_mem(vm)->mem_segs_lock, SX_XLOCKED); +} + +/* + * Return 'true' if 'gpa' is allocated in the guest address space. + * + * This function is called in the context of a running vcpu which acts as + * an implicit lock on 'vm->mem_maps[]'. + */ +bool +vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa) +{ + struct vm *vm = vcpu_vm(vcpu); + struct vm_mem_map *mm; + int i; + +#ifdef INVARIANTS + int hostcpu, state; + state = vcpu_get_state(vcpu, &hostcpu); + KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, + ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); +#endif + + for (i = 0; i < VM_MAX_MEMMAPS; i++) { + mm = &vm_mem(vm)->mem_maps[i]; + if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) + return (true); /* 'gpa' is sysmem or devmem */ + } + + return (false); +} + +int +vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) +{ + struct vm_mem *mem; + struct vm_mem_seg *seg; + vm_object_t obj; + + mem = vm_mem(vm); + vm_assert_memseg_xlocked(vm); + + if (ident < 0 || ident >= VM_MAX_MEMSEGS) + return (EINVAL); + + if (len == 0 || (len & PAGE_MASK)) + return (EINVAL); + + seg = &mem->mem_segs[ident]; + if (seg->object != NULL) { + if (seg->len == len && seg->sysmem == sysmem) + return (EEXIST); + else + return (EINVAL); + } + + obj = vm_object_allocate(OBJT_SWAP, len >> PAGE_SHIFT); + if (obj == NULL) + return (ENOMEM); + + seg->len = len; + seg->object = obj; + seg->sysmem = sysmem; + return (0); +} + +int +vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, + vm_object_t *objptr) +{ + struct vm_mem *mem; + struct vm_mem_seg *seg; + + mem = vm_mem(vm); + + vm_assert_memseg_locked(vm); + + if (ident < 0 || ident >= VM_MAX_MEMSEGS) + return (EINVAL); + + seg = &mem->mem_segs[ident]; + if (len) + *len = seg->len; + if (sysmem) + *sysmem = seg->sysmem; + if (objptr) + *objptr = seg->object; + return (0); +} + +void +vm_free_memseg(struct vm *vm, int ident) +{ + struct vm_mem_seg *seg; + + KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, + ("%s: invalid memseg ident %d", __func__, ident)); + + seg = &vm_mem(vm)->mem_segs[ident]; + if (seg->object != NULL) { + vm_object_deallocate(seg->object); + bzero(seg, sizeof(struct vm_mem_seg)); + } +} + +int +vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, + size_t len, int prot, int flags) +{ + struct vm_mem *mem; + struct vm_mem_seg *seg; + struct vm_mem_map *m, *map; + struct vmspace *vmspace; + vm_ooffset_t last; + int i, error; + + if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0) + return (EINVAL); + + if (flags & ~VM_MEMMAP_F_WIRED) + return (EINVAL); + + if (segid < 0 || segid >= VM_MAX_MEMSEGS) + return (EINVAL); + + mem = vm_mem(vm); + seg = &mem->mem_segs[segid]; + if (seg->object == NULL) + return (EINVAL); + + last = first + len; + if (first < 0 || first >= last || last > seg->len) + return (EINVAL); + + if ((gpa | first | last) & PAGE_MASK) + return (EINVAL); + + map = NULL; + for (i = 0; i < VM_MAX_MEMMAPS; i++) { + m = &mem->mem_maps[i]; + if (m->len == 0) { + map = m; + break; + } + } + if (map == NULL) + return (ENOSPC); + + vmspace = vm_vmspace(vm); + error = vm_map_find(&vmspace->vm_map, seg->object, first, &gpa, + len, 0, VMFS_NO_SPACE, prot, prot, 0); + if (error != KERN_SUCCESS) + return (EFAULT); + + vm_object_reference(seg->object); + + if (flags & VM_MEMMAP_F_WIRED) { + error = vm_map_wire(&vmspace->vm_map, gpa, gpa + len, + VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); + if (error != KERN_SUCCESS) { + vm_map_remove(&vmspace->vm_map, gpa, gpa + len); + return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM : + EFAULT); + } + } + + map->gpa = gpa; + map->len = len; + map->segoff = first; + map->segid = segid; + map->prot = prot; + map->flags = flags; + return (0); +} + +int +vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) +{ + struct vm_mem *mem; + struct vm_mem_map *m; + int i; + + mem = vm_mem(vm); + for (i = 0; i < VM_MAX_MEMMAPS; i++) { + m = &mem->mem_maps[i]; +#ifdef VM_MEMMAP_F_IOMMU + if ((m->flags & VM_MEMMAP_F_IOMMU) != 0) + continue; +#endif + if (m->gpa == gpa && m->len == len) { + vm_free_memmap(vm, i); + return (0); + } + } + + return (EINVAL); +} + +int +vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, + vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) +{ + struct vm_mem *mem; + struct vm_mem_map *mm, *mmnext; + int i; + + mem = vm_mem(vm); + + mmnext = NULL; + for (i = 0; i < VM_MAX_MEMMAPS; i++) { + mm = &mem->mem_maps[i]; + if (mm->len == 0 || mm->gpa < *gpa) + continue; + if (mmnext == NULL || mm->gpa < mmnext->gpa) + mmnext = mm; + } + + if (mmnext != NULL) { + *gpa = mmnext->gpa; + if (segid) + *segid = mmnext->segid; + if (segoff) + *segoff = mmnext->segoff; + if (len) + *len = mmnext->len; + if (prot) + *prot = mmnext->prot; + if (flags) + *flags = mmnext->flags; + return (0); + } else { + return (ENOENT); + } +} + +static void +vm_free_memmap(struct vm *vm, int ident) +{ + struct vm_mem_map *mm; + int error __diagused; + + mm = &vm_mem(vm)->mem_maps[ident]; + if (mm->len) { + error = vm_map_remove(&vm_vmspace(vm)->vm_map, mm->gpa, + mm->gpa + mm->len); + KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d", + __func__, error)); + bzero(mm, sizeof(struct vm_mem_map)); + } +} + +vm_paddr_t +vmm_sysmem_maxaddr(struct vm *vm) +{ + struct vm_mem *mem; + struct vm_mem_map *mm; + vm_paddr_t maxaddr; + int i; + + mem = vm_mem(vm); + maxaddr = 0; + for (i = 0; i < VM_MAX_MEMMAPS; i++) { + mm = &mem->mem_maps[i]; + if (sysmem_mapping(mem, i)) { + if (maxaddr < mm->gpa + mm->len) + maxaddr = mm->gpa + mm->len; + } + } + return (maxaddr); +} + +static void * +_vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, + void **cookie) +{ + struct vm_mem_map *mm; + vm_page_t m; + int i, count, pageoff; + + pageoff = gpa & PAGE_MASK; + if (len > PAGE_SIZE - pageoff) + panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); + + count = 0; + for (i = 0; i < VM_MAX_MEMMAPS; i++) { + mm = &vm_mem(vm)->mem_maps[i]; + if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) { + count = vm_fault_quick_hold_pages( + &vm_vmspace(vm)->vm_map, trunc_page(gpa), + PAGE_SIZE, reqprot, &m, 1); + break; + } + } + + if (count == 1) { + *cookie = m; + return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); + } else { + *cookie = NULL; + return (NULL); + } +} + +void * +vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot, + void **cookie) +{ +#ifdef INVARIANTS + /* + * The current vcpu should be frozen to ensure 'vm_memmap[]' + * stability. + */ + int state = vcpu_get_state(vcpu, NULL); + KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", + __func__, state)); +#endif + return (_vm_gpa_hold(vcpu_vm(vcpu), gpa, len, reqprot, cookie)); +} + +void * +vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, + void **cookie) +{ + vm_assert_memseg_locked(vm); + return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie)); +} + +void +vm_gpa_release(void *cookie) +{ + vm_page_t m = cookie; + + vm_page_unwire(m, PQ_ACTIVE); +} diff --git a/sys/dev/vmm/vmm_mem.h b/sys/dev/vmm/vmm_mem.h new file mode 100644 index 000000000000..a4be4c1c57aa --- /dev/null +++ b/sys/dev/vmm/vmm_mem.h @@ -0,0 +1,84 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2011 NetApp, Inc. + * All rights reserved. + */ + +#ifndef _DEV_VMM_MEM_H_ +#define _DEV_VMM_MEM_H_ + +#ifdef _KERNEL + +#include +#include + +struct vm; +struct vm_object; + +struct vm_mem_seg { + size_t len; + bool sysmem; + struct vm_object *object; +}; + +struct vm_mem_map { + vm_paddr_t gpa; + size_t len; + vm_ooffset_t segoff; + int segid; + int prot; + int flags; +}; + +#define VM_MAX_MEMSEGS 4 +#define VM_MAX_MEMMAPS 8 + +struct vm_mem { + struct vm_mem_map mem_maps[VM_MAX_MEMMAPS]; + struct vm_mem_seg mem_segs[VM_MAX_MEMSEGS]; + struct sx mem_segs_lock; +}; + +void vm_mem_init(struct vm_mem *mem); +void vm_mem_cleanup(struct vm *vm); +void vm_mem_destroy(struct vm *vm); + +/* + * APIs that modify the guest memory map require all vcpus to be frozen. + */ +void vm_slock_memsegs(struct vm *vm); +void vm_xlock_memsegs(struct vm *vm); +void vm_unlock_memsegs(struct vm *vm); +void vm_assert_memseg_locked(struct vm *vm); +void vm_assert_memseg_xlocked(struct vm *vm); +int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off, + size_t len, int prot, int flags); +int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len); +int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem); +void vm_free_memseg(struct vm *vm, int ident); + +/* + * APIs that inspect the guest memory map require only a *single* vcpu to + * be frozen. This acts like a read lock on the guest memory map since any + * modification requires *all* vcpus to be frozen. + */ +int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, + vm_ooffset_t *segoff, size_t *len, int *prot, int *flags); +bool vm_memseg_sysmem(struct vm *vm, int ident); +int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, + struct vm_object **objptr); +vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm); +void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, + int prot, void **cookie); +void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, + int prot, void **cookie); +void vm_gpa_release(void *cookie); +bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa); + +int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, + uint64_t gla, int prot, uint64_t *gpa, int *is_fault); + +#endif /* _KERNEL */ + +#endif /* !_DEV_VMM_MEM_H_ */ diff --git a/sys/modules/vmm/Makefile b/sys/modules/vmm/Makefile index a6ea90567bb8..1e232779b4b7 100644 --- a/sys/modules/vmm/Makefile +++ b/sys/modules/vmm/Makefile @@ -1,163 +1,164 @@ .include KMOD= vmm .if ${MACHINE_CPUARCH} == "amd64" .endif SRCS+= acpi_if.h bus_if.h device_if.h pci_if.h pcib_if.h vnode_if.h CFLAGS+= -DVMM_KEEP_STATS CFLAGS+= -I${SRCTOP}/sys/${MACHINE}/vmm # generic vmm support .PATH: ${SRCTOP}/sys/dev/vmm ${SRCTOP}/sys/${MACHINE}/vmm SRCS+= vmm.c \ vmm_dev.c \ vmm_dev_machdep.c \ vmm_instruction_emul.c \ + vmm_mem.c \ vmm_stat.c .if ${MACHINE_CPUARCH} == "aarch64" CFLAGS+= -I${SRCTOP}/sys/${MACHINE}/vmm/io DPSRCS+= assym.inc # TODO: Add the new EL2 code SRCS+= vmm_arm64.c \ vmm_reset.c \ vmm_call.S \ vmm_handlers.c \ vmm_mmu.c \ vmm_vhe_exception.S \ vmm_vhe.c \ vmm_hyp_el2.S .PATH: ${SRCTOP}/sys/${MACHINE}/vmm/io SRCS+= vgic.c \ vgic_if.h \ vgic_if.c \ vgic_v3.c \ vtimer.c CLEANFILES+= vmm_nvhe_exception.o vmm_nvhe.o CLEANFILES+= vmm_hyp_blob.elf.full CLEANFILES+= vmm_hyp_blob.elf vmm_hyp_blob.bin vmm_nvhe_exception.o: vmm_nvhe_exception.S vmm_hyp_exception.S ${CC} -c -x assembler-with-cpp -DLOCORE \ ${NOSAN_CFLAGS:N-mbranch-protection*} ${.IMPSRC} -o ${.TARGET} -fpie vmm_nvhe.o: vmm_nvhe.c vmm_hyp.c ${CC} -c ${NOSAN_CFLAGS:N-mbranch-protection*} ${.IMPSRC} \ -o ${.TARGET} -fpie vmm_hyp_blob.elf.full: vmm_nvhe_exception.o vmm_nvhe.o ${LD} -m ${LD_EMULATION} -Bdynamic -L ${SYSDIR}/conf -T ${SYSDIR}/conf/ldscript.arm64 \ ${_LDFLAGS:N-zbti-report*} --no-warn-mismatch --warn-common --export-dynamic \ --dynamic-linker /red/herring -X -o ${.TARGET} ${.ALLSRC} \ --defsym=_start='0x0' --defsym=text_start='0x0' vmm_hyp_blob.elf: vmm_hyp_blob.elf.full ${OBJCOPY} --strip-debug ${.ALLSRC} ${.TARGET} vmm_hyp_blob.bin: vmm_hyp_blob.elf ${OBJCOPY} --output-target=binary ${.ALLSRC} ${.TARGET} vmm_hyp_el2.o: vmm_hyp_blob.bin .elif ${MACHINE_CPUARCH} == "amd64" CFLAGS+= -I${SRCTOP}/sys/${MACHINE}/vmm/io DPSRCS+= vmx_assym.h svm_assym.h DPSRCS+= vmx_genassym.c svm_genassym.c offset.inc CFLAGS+= -I${SRCTOP}/sys/amd64/vmm/intel CFLAGS+= -I${SRCTOP}/sys/amd64/vmm/amd SRCS+= opt_acpi.h \ opt_bhyve_snapshot.h \ opt_ddb.h SRCS+= vmm_host.c \ vmm_ioport.c \ vmm_lapic.c \ - vmm_mem.c \ + vmm_mem_machdep.c \ vmm_util.c \ x86.c .PATH: ${SRCTOP}/sys/${MACHINE}/vmm/io SRCS+= iommu.c \ ppt.c \ vatpic.c \ vatpit.c \ vhpet.c \ vioapic.c \ vlapic.c \ vpmtmr.c \ vrtc.c # intel-specific files .PATH: ${SRCTOP}/sys/amd64/vmm/intel SRCS+= ept.c \ vmcs.c \ vmx_msr.c \ vmx_support.S \ vmx.c \ vtd.c # amd-specific files .PATH: ${SRCTOP}/sys/amd64/vmm/amd SRCS+= vmcb.c \ amdviiommu.c \ ivhd_if.c \ ivhd_if.h \ svm.c \ svm_support.S \ npt.c \ ivrs_drv.c \ amdvi_hw.c \ svm_msr.c SRCS.BHYVE_SNAPSHOT= vmm_snapshot.c CLEANFILES+= vmx_assym.h vmx_genassym.o svm_assym.h svm_genassym.o OBJS_DEPEND_GUESS.vmx_support.o+= vmx_assym.h OBJS_DEPEND_GUESS.svm_support.o+= svm_assym.h vmx_assym.h: vmx_genassym.o sh ${SYSDIR}/kern/genassym.sh vmx_genassym.o > ${.TARGET} svm_assym.h: svm_genassym.o sh ${SYSDIR}/kern/genassym.sh svm_genassym.o > ${.TARGET} vmx_support.o: ${CC} -c -x assembler-with-cpp -DLOCORE ${CFLAGS} \ ${.IMPSRC} -o ${.TARGET} svm_support.o: ${CC} -c -x assembler-with-cpp -DLOCORE ${CFLAGS} \ ${.IMPSRC} -o ${.TARGET} hyp_genassym.o: offset.inc ${CC} -c ${NOSAN_CFLAGS:N-flto:N-fno-common} -fcommon ${.IMPSRC} vmx_genassym.o: offset.inc ${CC} -c ${NOSAN_CFLAGS:N-flto*:N-fno-common} -fcommon ${.IMPSRC} svm_genassym.o: offset.inc ${CC} -c ${NOSAN_CFLAGS:N-flto*:N-fno-common} -fcommon ${.IMPSRC} .elif ${MACHINE_CPUARCH} == "riscv" SRCS+= vmm_aplic.c \ vmm_fence.c \ vmm_riscv.c \ vmm_sbi.c \ vmm_switch.S \ vmm_vtimer.c .endif .include diff --git a/sys/riscv/include/vmm.h b/sys/riscv/include/vmm.h index 6c027f50e97a..1221521be368 100644 --- a/sys/riscv/include/vmm.h +++ b/sys/riscv/include/vmm.h @@ -1,328 +1,298 @@ /* * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015 Mihai Carabas * Copyright (c) 2024 Ruslan Bukin * * This software was developed by the University of Cambridge Computer * Laboratory (Department of Computer Science and Technology) under Innovate * UK project 105694, "Digital Security by Design (DSbD) Technology Platform * Prototype". * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _VMM_H_ #define _VMM_H_ #include #include #include #include #include "pte.h" #include "pmap.h" struct vcpu; enum vm_suspend_how { VM_SUSPEND_NONE, VM_SUSPEND_RESET, VM_SUSPEND_POWEROFF, VM_SUSPEND_HALT, VM_SUSPEND_LAST }; /* * Identifiers for architecturally defined registers. */ enum vm_reg_name { VM_REG_GUEST_ZERO = 0, VM_REG_GUEST_RA, VM_REG_GUEST_SP, VM_REG_GUEST_GP, VM_REG_GUEST_TP, VM_REG_GUEST_T0, VM_REG_GUEST_T1, VM_REG_GUEST_T2, VM_REG_GUEST_S0, VM_REG_GUEST_S1, VM_REG_GUEST_A0, VM_REG_GUEST_A1, VM_REG_GUEST_A2, VM_REG_GUEST_A3, VM_REG_GUEST_A4, VM_REG_GUEST_A5, VM_REG_GUEST_A6, VM_REG_GUEST_A7, VM_REG_GUEST_S2, VM_REG_GUEST_S3, VM_REG_GUEST_S4, VM_REG_GUEST_S5, VM_REG_GUEST_S6, VM_REG_GUEST_S7, VM_REG_GUEST_S8, VM_REG_GUEST_S9, VM_REG_GUEST_S10, VM_REG_GUEST_S11, VM_REG_GUEST_T3, VM_REG_GUEST_T4, VM_REG_GUEST_T5, VM_REG_GUEST_T6, VM_REG_GUEST_SEPC, VM_REG_LAST }; #define VM_INTINFO_VECTOR(info) ((info) & 0xff) #define VM_INTINFO_DEL_ERRCODE 0x800 #define VM_INTINFO_RSVD 0x7ffff000 #define VM_INTINFO_VALID 0x80000000 #define VM_INTINFO_TYPE 0x700 #define VM_INTINFO_HWINTR (0 << 8) #define VM_INTINFO_NMI (2 << 8) #define VM_INTINFO_HWEXCEPTION (3 << 8) #define VM_INTINFO_SWINTR (4 << 8) #define VM_MAX_NAMELEN 32 #define VM_MAX_SUFFIXLEN 15 #ifdef _KERNEL struct vm; struct vm_exception; struct vm_exit; struct vm_run; struct vm_object; struct vm_guest_paging; struct vm_aplic_descr; struct pmap; struct vm_eventinfo { void *rptr; /* rendezvous cookie */ int *sptr; /* suspend cookie */ int *iptr; /* reqidle cookie */ }; int vm_create(const char *name, struct vm **retvm); struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid); void vm_disable_vcpu_creation(struct vm *vm); void vm_slock_vcpus(struct vm *vm); void vm_unlock_vcpus(struct vm *vm); void vm_destroy(struct vm *vm); int vm_reinit(struct vm *vm); const char *vm_name(struct vm *vm); -/* - * APIs that modify the guest memory map require all vcpus to be frozen. - */ -void vm_slock_memsegs(struct vm *vm); -void vm_xlock_memsegs(struct vm *vm); -void vm_unlock_memsegs(struct vm *vm); -int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off, - size_t len, int prot, int flags); -int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len); -int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem); -void vm_free_memseg(struct vm *vm, int ident); - -/* - * APIs that inspect the guest memory map require only a *single* vcpu to - * be frozen. This acts like a read lock on the guest memory map since any - * modification requires *all* vcpus to be frozen. - */ -int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, - vm_ooffset_t *segoff, size_t *len, int *prot, int *flags); -int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, - struct vm_object **objptr); -vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm); -void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, - int prot, void **cookie); -void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, - int prot, void **cookie); -void vm_gpa_release(void *cookie); -bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa); - -int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, - uint64_t gla, int prot, uint64_t *gpa, int *is_fault); - uint16_t vm_get_maxcpus(struct vm *vm); void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus); int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus); int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval); int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val); int vm_run(struct vcpu *vcpu); int vm_suspend(struct vm *vm, enum vm_suspend_how how); void* vm_get_cookie(struct vm *vm); int vcpu_vcpuid(struct vcpu *vcpu); void *vcpu_get_cookie(struct vcpu *vcpu); struct vm *vcpu_vm(struct vcpu *vcpu); struct vcpu *vm_vcpu(struct vm *vm, int cpu); int vm_get_capability(struct vcpu *vcpu, int type, int *val); int vm_set_capability(struct vcpu *vcpu, int type, int val); int vm_activate_cpu(struct vcpu *vcpu); int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu); int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu); int vm_inject_exception(struct vcpu *vcpu, uint64_t scause); int vm_attach_aplic(struct vm *vm, struct vm_aplic_descr *descr); int vm_assert_irq(struct vm *vm, uint32_t irq); int vm_deassert_irq(struct vm *vm, uint32_t irq); int vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot, int func); struct vm_exit *vm_exitinfo(struct vcpu *vcpu); void vm_exit_suspended(struct vcpu *vcpu, uint64_t pc); void vm_exit_debug(struct vcpu *vcpu, uint64_t pc); void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t pc); void vm_exit_astpending(struct vcpu *vcpu, uint64_t pc); cpuset_t vm_active_cpus(struct vm *vm); cpuset_t vm_debug_cpus(struct vm *vm); cpuset_t vm_suspended_cpus(struct vm *vm); static __inline int vcpu_rendezvous_pending(struct vm_eventinfo *info) { return (*((uintptr_t *)(info->rptr)) != 0); } static __inline int vcpu_suspended(struct vm_eventinfo *info) { return (*info->sptr); } int vcpu_debugged(struct vcpu *vcpu); enum vcpu_state { VCPU_IDLE, VCPU_FROZEN, VCPU_RUNNING, VCPU_SLEEPING, }; int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle); enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu); static int __inline vcpu_is_running(struct vcpu *vcpu, int *hostcpu) { return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING); } #ifdef _SYS_PROC_H_ static int __inline vcpu_should_yield(struct vcpu *vcpu) { struct thread *td; td = curthread; return (td->td_ast != 0 || td->td_owepreempt != 0); } #endif void *vcpu_stats(struct vcpu *vcpu); void vcpu_notify_event(struct vcpu *vcpu); +struct vmspace *vm_vmspace(struct vm *vm); +struct vm_mem *vm_mem(struct vm *vm); enum vm_reg_name vm_segment_name(int seg_encoding); #endif /* _KERNEL */ #define VM_DIR_READ 0 #define VM_DIR_WRITE 1 #define VM_GP_M_MASK 0x1f #define VM_GP_MMU_ENABLED (1 << 5) struct vm_guest_paging { int flags; int padding; }; struct vie { uint8_t access_size:4, sign_extend:1, dir:1, unused:2; enum vm_reg_name reg; }; struct vre { uint32_t inst_syndrome; uint8_t dir:1, unused:7; enum vm_reg_name reg; }; /* * Identifiers for optional vmm capabilities */ enum vm_cap_type { VM_CAP_UNRESTRICTED_GUEST, VM_CAP_SSTC, VM_CAP_MAX }; enum vm_exitcode { VM_EXITCODE_BOGUS, VM_EXITCODE_ECALL, VM_EXITCODE_HYP, VM_EXITCODE_PAGING, VM_EXITCODE_SUSPENDED, VM_EXITCODE_DEBUG, VM_EXITCODE_INST_EMUL, VM_EXITCODE_WFI, VM_EXITCODE_MAX }; struct vm_exit { uint64_t scause; uint64_t sepc; uint64_t stval; uint64_t htval; uint64_t htinst; enum vm_exitcode exitcode; int inst_length; uint64_t pc; union { struct { uint64_t gpa; } paging; struct { uint64_t gpa; struct vm_guest_paging paging; struct vie vie; } inst_emul; struct { uint64_t args[8]; } ecall; struct { enum vm_suspend_how how; } suspended; struct { uint64_t scause; } hyp; } u; }; #endif /* _VMM_H_ */ diff --git a/sys/riscv/vmm/vmm.c b/sys/riscv/vmm/vmm.c index 96871fc88453..7528ef6e4698 100644 --- a/sys/riscv/vmm/vmm.c +++ b/sys/riscv/vmm/vmm.c @@ -1,1619 +1,1247 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015 Mihai Carabas * Copyright (c) 2024 Ruslan Bukin * * This software was developed by the University of Cambridge Computer * Laboratory (Department of Computer Science and Technology) under Innovate * UK project 105694, "Digital Security by Design (DSbD) Technology Platform * Prototype". * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include "vmm_stat.h" #include "riscv.h" #include "vmm_aplic.h" struct vcpu { int flags; enum vcpu_state state; struct mtx mtx; int hostcpu; /* host cpuid this vcpu last ran on */ int vcpuid; void *stats; struct vm_exit exitinfo; uint64_t nextpc; /* (x) next instruction to execute */ struct vm *vm; /* (o) */ void *cookie; /* (i) cpu-specific data */ struct fpreg *guestfpu; /* (a,i) guest fpu state */ }; #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx)) #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) -struct mem_seg { - uint64_t gpa; - size_t len; - bool wired; - bool sysmem; - vm_object_t object; -}; -#define VM_MAX_MEMSEGS 3 - -struct mem_map { - vm_paddr_t gpa; - size_t len; - vm_ooffset_t segoff; - int segid; - int prot; - int flags; -}; -#define VM_MAX_MEMMAPS 4 - struct vmm_mmio_region { uint64_t start; uint64_t end; mem_region_read_t read; mem_region_write_t write; }; #define VM_MAX_MMIO_REGIONS 4 /* * Initialization: * (o) initialized the first time the VM is created * (i) initialized when VM is created and when it is reinitialized * (x) initialized before use */ struct vm { void *cookie; /* (i) cpu-specific data */ volatile cpuset_t active_cpus; /* (i) active vcpus */ volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug*/ int suspend; /* (i) stop VM execution */ bool dying; /* (o) is dying */ volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ - struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */ - struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */ struct vmspace *vmspace; /* (o) guest's address space */ + struct vm_mem mem; /* (i) [m+v] guest memory */ char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ struct vcpu **vcpu; /* (i) guest vcpus */ struct vmm_mmio_region mmio_region[VM_MAX_MMIO_REGIONS]; /* (o) guest MMIO regions */ /* The following describe the vm cpu topology */ uint16_t sockets; /* (o) num of sockets */ uint16_t cores; /* (o) num of cores/socket */ uint16_t threads; /* (o) num of threads/core */ uint16_t maxcpus; /* (o) max pluggable cpus */ - struct sx mem_segs_lock; /* (o) */ struct sx vcpus_init_lock; /* (o) */ }; static bool vmm_initialized = false; static MALLOC_DEFINE(M_VMM, "vmm", "vmm"); /* statistics */ static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); static int vmm_ipinum; SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, "IPI vector used for vcpu notifications"); u_int vm_maxcpu; SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &vm_maxcpu, 0, "Maximum number of vCPUs"); -static void vm_free_memmap(struct vm *vm, int ident); -static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); static void vcpu_notify_event_locked(struct vcpu *vcpu); /* global statistics */ VMM_STAT(VMEXIT_COUNT, "total number of vm exits"); VMM_STAT(VMEXIT_IRQ, "number of vmexits for an irq"); VMM_STAT(VMEXIT_UNHANDLED, "number of vmexits for an unhandled exception"); /* * Upper limit on vm_maxcpu. We could increase this to 28 bits, but this * is a safe value for now. */ #define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE) static void vcpu_cleanup(struct vcpu *vcpu, bool destroy) { vmmops_vcpu_cleanup(vcpu->cookie); vcpu->cookie = NULL; if (destroy) { vmm_stat_free(vcpu->stats); fpu_save_area_free(vcpu->guestfpu); vcpu_lock_destroy(vcpu); } } static struct vcpu * vcpu_alloc(struct vm *vm, int vcpu_id) { struct vcpu *vcpu; KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, ("vcpu_alloc: invalid vcpu %d", vcpu_id)); vcpu = malloc(sizeof(*vcpu), M_VMM, M_WAITOK | M_ZERO); vcpu_lock_init(vcpu); vcpu->state = VCPU_IDLE; vcpu->hostcpu = NOCPU; vcpu->vcpuid = vcpu_id; vcpu->vm = vm; vcpu->guestfpu = fpu_save_area_alloc(); vcpu->stats = vmm_stat_alloc(); return (vcpu); } static void vcpu_init(struct vcpu *vcpu) { vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); MPASS(vcpu->cookie != NULL); fpu_save_area_reset(vcpu->guestfpu); vmm_stat_init(vcpu->stats); } struct vm_exit * vm_exitinfo(struct vcpu *vcpu) { return (&vcpu->exitinfo); } static int vmm_init(void) { vm_maxcpu = mp_ncpus; TUNABLE_INT_FETCH("hw.vmm.maxcpu", &vm_maxcpu); if (vm_maxcpu > VM_MAXCPU) { printf("vmm: vm_maxcpu clamped to %u\n", VM_MAXCPU); vm_maxcpu = VM_MAXCPU; } if (vm_maxcpu == 0) vm_maxcpu = 1; return (vmmops_modinit()); } static int vmm_handler(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: error = vmmdev_init(); if (error != 0) break; error = vmm_init(); if (error == 0) vmm_initialized = true; else (void)vmmdev_cleanup(); break; case MOD_UNLOAD: error = vmmdev_cleanup(); if (error == 0 && vmm_initialized) { error = vmmops_modcleanup(); if (error) { /* * Something bad happened - prevent new * VMs from being created */ vmm_initialized = false; } } break; default: error = 0; break; } return (error); } static moduledata_t vmm_kmod = { "vmm", vmm_handler, NULL }; /* * vmm initialization has the following dependencies: * * - vmm device initialization requires an initialized devfs. */ DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_DEVFS + 1, SI_ORDER_ANY); MODULE_VERSION(vmm, 1); static void vm_init(struct vm *vm, bool create) { int i; vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); MPASS(vm->cookie != NULL); CPU_ZERO(&vm->active_cpus); CPU_ZERO(&vm->debug_cpus); vm->suspend = 0; CPU_ZERO(&vm->suspended_cpus); memset(vm->mmio_region, 0, sizeof(vm->mmio_region)); if (!create) { for (i = 0; i < vm->maxcpus; i++) { if (vm->vcpu[i] != NULL) vcpu_init(vm->vcpu[i]); } } } void vm_disable_vcpu_creation(struct vm *vm) { sx_xlock(&vm->vcpus_init_lock); vm->dying = true; sx_xunlock(&vm->vcpus_init_lock); } struct vcpu * vm_alloc_vcpu(struct vm *vm, int vcpuid) { struct vcpu *vcpu; if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) return (NULL); /* Some interrupt controllers may have a CPU limit */ if (vcpuid >= aplic_max_cpu_count(vm->cookie)) return (NULL); vcpu = (struct vcpu *) atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); if (__predict_true(vcpu != NULL)) return (vcpu); sx_xlock(&vm->vcpus_init_lock); vcpu = vm->vcpu[vcpuid]; if (vcpu == NULL && !vm->dying) { vcpu = vcpu_alloc(vm, vcpuid); vcpu_init(vcpu); /* * Ensure vCPU is fully created before updating pointer * to permit unlocked reads above. */ atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], (uintptr_t)vcpu); } sx_xunlock(&vm->vcpus_init_lock); return (vcpu); } void vm_slock_vcpus(struct vm *vm) { sx_slock(&vm->vcpus_init_lock); } void vm_unlock_vcpus(struct vm *vm) { sx_unlock(&vm->vcpus_init_lock); } int vm_create(const char *name, struct vm **retvm) { struct vm *vm; struct vmspace *vmspace; /* * If vmm.ko could not be successfully initialized then don't attempt * to create the virtual machine. */ if (!vmm_initialized) return (ENXIO); if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) return (EINVAL); vmspace = vmmops_vmspace_alloc(0, 1ul << 39); if (vmspace == NULL) return (ENOMEM); vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO); strcpy(vm->name, name); vm->vmspace = vmspace; - sx_init(&vm->mem_segs_lock, "vm mem_segs"); + vm_mem_init(&vm->mem); sx_init(&vm->vcpus_init_lock, "vm vcpus"); vm->sockets = 1; vm->cores = 1; /* XXX backwards compatibility */ vm->threads = 1; /* XXX backwards compatibility */ vm->maxcpus = vm_maxcpu; vm->vcpu = malloc(sizeof(*vm->vcpu) * vm->maxcpus, M_VMM, M_WAITOK | M_ZERO); vm_init(vm, true); *retvm = vm; return (0); } void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus) { *sockets = vm->sockets; *cores = vm->cores; *threads = vm->threads; *maxcpus = vm->maxcpus; } uint16_t vm_get_maxcpus(struct vm *vm) { return (vm->maxcpus); } int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus) { /* Ignore maxcpus. */ if ((sockets * cores * threads) > vm->maxcpus) return (EINVAL); vm->sockets = sockets; vm->cores = cores; vm->threads = threads; return(0); } static void vm_cleanup(struct vm *vm, bool destroy) { - struct mem_map *mm; int i; + if (destroy) + vm_xlock_memsegs(vm); + else + vm_assert_memseg_xlocked(vm); + aplic_detach_from_vm(vm->cookie); for (i = 0; i < vm->maxcpus; i++) { if (vm->vcpu[i] != NULL) vcpu_cleanup(vm->vcpu[i], destroy); } vmmops_cleanup(vm->cookie); - /* - * System memory is removed from the guest address space only when - * the VM is destroyed. This is because the mapping remains the same - * across VM reset. - * - * Device memory can be relocated by the guest (e.g. using PCI BARs) - * so those mappings are removed on a VM reset. - */ - if (!destroy) { - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (destroy || !sysmem_mapping(vm, mm)) - vm_free_memmap(vm, i); - } - } - + vm_mem_cleanup(vm); if (destroy) { - for (i = 0; i < VM_MAX_MEMSEGS; i++) - vm_free_memseg(vm, i); + vm_mem_destroy(vm); vmmops_vmspace_free(vm->vmspace); vm->vmspace = NULL; for (i = 0; i < vm->maxcpus; i++) free(vm->vcpu[i], M_VMM); free(vm->vcpu, M_VMM); sx_destroy(&vm->vcpus_init_lock); - sx_destroy(&vm->mem_segs_lock); } } void vm_destroy(struct vm *vm) { vm_cleanup(vm, true); free(vm, M_VMM); } int vm_reinit(struct vm *vm) { int error; /* * A virtual machine can be reset only if all vcpus are suspended. */ if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { vm_cleanup(vm, false); vm_init(vm, false); error = 0; } else { error = EBUSY; } return (error); } const char * vm_name(struct vm *vm) { return (vm->name); } -void -vm_slock_memsegs(struct vm *vm) -{ - sx_slock(&vm->mem_segs_lock); -} - -void -vm_xlock_memsegs(struct vm *vm) -{ - sx_xlock(&vm->mem_segs_lock); -} - -void -vm_unlock_memsegs(struct vm *vm) -{ - sx_unlock(&vm->mem_segs_lock); -} - -/* - * Return 'true' if 'gpa' is allocated in the guest address space. - * - * This function is called in the context of a running vcpu which acts as - * an implicit lock on 'vm->mem_maps[]'. - */ -bool -vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa) -{ - struct vm *vm = vcpu->vm; - struct mem_map *mm; - int i; - -#ifdef INVARIANTS - int hostcpu, state; - state = vcpu_get_state(vcpu, &hostcpu); - KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, - ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); -#endif - - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) - return (true); /* 'gpa' is sysmem or devmem */ - } - - return (false); -} - -int -vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) -{ - struct mem_seg *seg; - vm_object_t obj; - - sx_assert(&vm->mem_segs_lock, SX_XLOCKED); - - if (ident < 0 || ident >= VM_MAX_MEMSEGS) - return (EINVAL); - - if (len == 0 || (len & PAGE_MASK)) - return (EINVAL); - - seg = &vm->mem_segs[ident]; - if (seg->object != NULL) { - if (seg->len == len && seg->sysmem == sysmem) - return (EEXIST); - else - return (EINVAL); - } - - obj = vm_object_allocate(OBJT_DEFAULT, len >> PAGE_SHIFT); - if (obj == NULL) - return (ENOMEM); - - seg->len = len; - seg->object = obj; - seg->sysmem = sysmem; - return (0); -} - -int -vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, - vm_object_t *objptr) -{ - struct mem_seg *seg; - - sx_assert(&vm->mem_segs_lock, SX_LOCKED); - - if (ident < 0 || ident >= VM_MAX_MEMSEGS) - return (EINVAL); - - seg = &vm->mem_segs[ident]; - if (len) - *len = seg->len; - if (sysmem) - *sysmem = seg->sysmem; - if (objptr) - *objptr = seg->object; - return (0); -} - -void -vm_free_memseg(struct vm *vm, int ident) -{ - struct mem_seg *seg; - - KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, - ("%s: invalid memseg ident %d", __func__, ident)); - - seg = &vm->mem_segs[ident]; - if (seg->object != NULL) { - vm_object_deallocate(seg->object); - bzero(seg, sizeof(struct mem_seg)); - } -} - -int -vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, - size_t len, int prot, int flags) -{ - struct mem_seg *seg; - struct mem_map *m, *map; - vm_ooffset_t last; - int i, error; - - dprintf("%s: gpa %lx first %lx len %lx\n", __func__, gpa, first, len); - - if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0) - return (EINVAL); - - if (flags & ~VM_MEMMAP_F_WIRED) - return (EINVAL); - - if (segid < 0 || segid >= VM_MAX_MEMSEGS) - return (EINVAL); - - seg = &vm->mem_segs[segid]; - if (seg->object == NULL) - return (EINVAL); - - last = first + len; - if (first < 0 || first >= last || last > seg->len) - return (EINVAL); - - if ((gpa | first | last) & PAGE_MASK) - return (EINVAL); - - map = NULL; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - m = &vm->mem_maps[i]; - if (m->len == 0) { - map = m; - break; - } - } - - if (map == NULL) - return (ENOSPC); - - error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa, - len, 0, VMFS_NO_SPACE, prot, prot, 0); - if (error != KERN_SUCCESS) - return (EFAULT); - - vm_object_reference(seg->object); - - if (flags & VM_MEMMAP_F_WIRED) { - error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len, - VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); - if (error != KERN_SUCCESS) { - vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len); - return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM : - EFAULT); - } - } - - map->gpa = gpa; - map->len = len; - map->segoff = first; - map->segid = segid; - map->prot = prot; - map->flags = flags; - return (0); -} - -int -vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) -{ - struct mem_map *m; - int i; - - dprintf("%s: gpa %lx len %lx\n", __func__, gpa, len); - - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - m = &vm->mem_maps[i]; - if (m->gpa == gpa && m->len == len) { - vm_free_memmap(vm, i); - return (0); - } - } - - return (EINVAL); -} - -int -vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, - vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) -{ - struct mem_map *mm, *mmnext; - int i; - - mmnext = NULL; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (mm->len == 0 || mm->gpa < *gpa) - continue; - if (mmnext == NULL || mm->gpa < mmnext->gpa) - mmnext = mm; - } - - if (mmnext != NULL) { - *gpa = mmnext->gpa; - if (segid) - *segid = mmnext->segid; - if (segoff) - *segoff = mmnext->segoff; - if (len) - *len = mmnext->len; - if (prot) - *prot = mmnext->prot; - if (flags) - *flags = mmnext->flags; - return (0); - } else { - return (ENOENT); - } -} - -static void -vm_free_memmap(struct vm *vm, int ident) -{ - struct mem_map *mm; - int error __diagused; - - mm = &vm->mem_maps[ident]; - if (mm->len) { - error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa, - mm->gpa + mm->len); - KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d", - __func__, error)); - bzero(mm, sizeof(struct mem_map)); - } -} - -static __inline bool -sysmem_mapping(struct vm *vm, struct mem_map *mm) -{ - - if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) - return (true); - else - return (false); -} - -vm_paddr_t -vmm_sysmem_maxaddr(struct vm *vm) -{ - struct mem_map *mm; - vm_paddr_t maxaddr; - int i; - - maxaddr = 0; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (sysmem_mapping(vm, mm)) { - if (maxaddr < mm->gpa + mm->len) - maxaddr = mm->gpa + mm->len; - } - } - return (maxaddr); -} - int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, uint64_t gla, int prot, uint64_t *gpa, int *is_fault) { - int error; - - error = vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault); - - return (error); + return (vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault)); } void vm_register_inst_handler(struct vm *vm, uint64_t start, uint64_t size, mem_region_read_t mmio_read, mem_region_write_t mmio_write) { int i; for (i = 0; i < nitems(vm->mmio_region); i++) { if (vm->mmio_region[i].start == 0 && vm->mmio_region[i].end == 0) { vm->mmio_region[i].start = start; vm->mmio_region[i].end = start + size; vm->mmio_region[i].read = mmio_read; vm->mmio_region[i].write = mmio_write; return; } } panic("%s: No free MMIO region", __func__); } void vm_deregister_inst_handler(struct vm *vm, uint64_t start, uint64_t size) { int i; for (i = 0; i < nitems(vm->mmio_region); i++) { if (vm->mmio_region[i].start == start && vm->mmio_region[i].end == start + size) { memset(&vm->mmio_region[i], 0, sizeof(vm->mmio_region[i])); return; } } panic("%s: Invalid MMIO region: %lx - %lx", __func__, start, start + size); } static int vm_handle_inst_emul(struct vcpu *vcpu, bool *retu) { struct vm *vm; struct vm_exit *vme; struct vie *vie; struct hyp *hyp; uint64_t fault_ipa; struct vm_guest_paging *paging; struct vmm_mmio_region *vmr; int error, i; vm = vcpu->vm; hyp = vm->cookie; if (!hyp->aplic_attached) goto out_user; vme = &vcpu->exitinfo; vie = &vme->u.inst_emul.vie; paging = &vme->u.inst_emul.paging; fault_ipa = vme->u.inst_emul.gpa; vmr = NULL; for (i = 0; i < nitems(vm->mmio_region); i++) { if (vm->mmio_region[i].start <= fault_ipa && vm->mmio_region[i].end > fault_ipa) { vmr = &vm->mmio_region[i]; break; } } if (vmr == NULL) goto out_user; error = vmm_emulate_instruction(vcpu, fault_ipa, vie, paging, vmr->read, vmr->write, retu); return (error); out_user: *retu = true; return (0); } int vm_suspend(struct vm *vm, enum vm_suspend_how how) { int i; if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) return (EINVAL); if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { VM_CTR2(vm, "virtual machine already suspended %d/%d", vm->suspend, how); return (EALREADY); } VM_CTR1(vm, "virtual machine successfully suspended %d", how); /* * Notify all active vcpus that they are now suspended. */ for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) vcpu_notify_event(vm_vcpu(vm, i)); } return (0); } void vm_exit_suspended(struct vcpu *vcpu, uint64_t pc) { struct vm *vm = vcpu->vm; struct vm_exit *vmexit; KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); vmexit = vm_exitinfo(vcpu); vmexit->pc = pc; vmexit->inst_length = 4; vmexit->exitcode = VM_EXITCODE_SUSPENDED; vmexit->u.suspended.how = vm->suspend; } void vm_exit_debug(struct vcpu *vcpu, uint64_t pc) { struct vm_exit *vmexit; vmexit = vm_exitinfo(vcpu); vmexit->pc = pc; vmexit->inst_length = 4; vmexit->exitcode = VM_EXITCODE_DEBUG; } int vm_activate_cpu(struct vcpu *vcpu) { struct vm *vm = vcpu->vm; if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) return (EBUSY); CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); return (0); } int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) { if (vcpu == NULL) { vm->debug_cpus = vm->active_cpus; for (int i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) vcpu_notify_event(vm_vcpu(vm, i)); } } else { if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) return (EINVAL); CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); vcpu_notify_event(vcpu); } return (0); } int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) { if (vcpu == NULL) { CPU_ZERO(&vm->debug_cpus); } else { if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) return (EINVAL); CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); } return (0); } int vcpu_debugged(struct vcpu *vcpu) { return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); } cpuset_t vm_active_cpus(struct vm *vm) { return (vm->active_cpus); } cpuset_t vm_debug_cpus(struct vm *vm) { return (vm->debug_cpus); } cpuset_t vm_suspended_cpus(struct vm *vm) { return (vm->suspended_cpus); } void * vcpu_stats(struct vcpu *vcpu) { return (vcpu->stats); } /* * This function is called to ensure that a vcpu "sees" a pending event * as soon as possible: * - If the vcpu thread is sleeping then it is woken up. * - If the vcpu is running on a different host_cpu then an IPI will be directed * to the host_cpu to cause the vcpu to trap into the hypervisor. */ static void vcpu_notify_event_locked(struct vcpu *vcpu) { int hostcpu; hostcpu = vcpu->hostcpu; if (vcpu->state == VCPU_RUNNING) { KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); if (hostcpu != curcpu) { ipi_cpu(hostcpu, vmm_ipinum); } else { /* * If the 'vcpu' is running on 'curcpu' then it must * be sending a notification to itself (e.g. SELF_IPI). * The pending event will be picked up when the vcpu * transitions back to guest context. */ } } else { KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " "with hostcpu %d", vcpu->state, hostcpu)); if (vcpu->state == VCPU_SLEEPING) wakeup_one(vcpu); } } void vcpu_notify_event(struct vcpu *vcpu) { vcpu_lock(vcpu); vcpu_notify_event_locked(vcpu); vcpu_unlock(vcpu); } +struct vmspace * +vm_vmspace(struct vm *vm) +{ + return (vm->vmspace); +} + +struct vm_mem * +vm_mem(struct vm *vm) +{ + return (&vm->mem); +} + static void restore_guest_fpustate(struct vcpu *vcpu) { /* Flush host state to the pcb. */ fpe_state_save(curthread); /* Ensure the VFP state will be re-loaded when exiting the guest. */ PCPU_SET(fpcurthread, NULL); /* restore guest FPU state */ fpe_enable(); fpe_restore(vcpu->guestfpu); /* * The FPU is now "dirty" with the guest's state so turn on emulation * to trap any access to the FPU by the host. */ fpe_disable(); } static void save_guest_fpustate(struct vcpu *vcpu) { /* Save guest FPE state. */ fpe_enable(); fpe_store(vcpu->guestfpu); fpe_disable(); KASSERT(PCPU_GET(fpcurthread) == NULL, ("%s: fpcurthread set with guest registers", __func__)); } static int vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) { int error; vcpu_assert_locked(vcpu); /* * State transitions from the vmmdev_ioctl() must always begin from * the VCPU_IDLE state. This guarantees that there is only a single * ioctl() operating on a vcpu at any point. */ if (from_idle) { while (vcpu->state != VCPU_IDLE) { vcpu_notify_event_locked(vcpu); msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); } } else { KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " "vcpu idle state")); } if (vcpu->state == VCPU_RUNNING) { KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " "mismatch for running vcpu", curcpu, vcpu->hostcpu)); } else { KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " "vcpu that is not running", vcpu->hostcpu)); } /* * The following state transitions are allowed: * IDLE -> FROZEN -> IDLE * FROZEN -> RUNNING -> FROZEN * FROZEN -> SLEEPING -> FROZEN */ switch (vcpu->state) { case VCPU_IDLE: case VCPU_RUNNING: case VCPU_SLEEPING: error = (newstate != VCPU_FROZEN); break; case VCPU_FROZEN: error = (newstate == VCPU_FROZEN); break; default: error = 1; break; } if (error) return (EBUSY); vcpu->state = newstate; if (newstate == VCPU_RUNNING) vcpu->hostcpu = curcpu; else vcpu->hostcpu = NOCPU; if (newstate == VCPU_IDLE) wakeup(&vcpu->state); return (0); } static void vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate) { int error; if ((error = vcpu_set_state(vcpu, newstate, false)) != 0) panic("Error %d setting state to %d\n", error, newstate); } static void vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) { int error; if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) panic("Error %d setting state to %d", error, newstate); } int vm_get_capability(struct vcpu *vcpu, int type, int *retval) { if (type < 0 || type >= VM_CAP_MAX) return (EINVAL); return (vmmops_getcap(vcpu->cookie, type, retval)); } int vm_set_capability(struct vcpu *vcpu, int type, int val) { if (type < 0 || type >= VM_CAP_MAX) return (EINVAL); return (vmmops_setcap(vcpu->cookie, type, val)); } struct vm * vcpu_vm(struct vcpu *vcpu) { return (vcpu->vm); } int vcpu_vcpuid(struct vcpu *vcpu) { return (vcpu->vcpuid); } void * vcpu_get_cookie(struct vcpu *vcpu) { return (vcpu->cookie); } struct vcpu * vm_vcpu(struct vm *vm, int vcpuid) { return (vm->vcpu[vcpuid]); } int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) { int error; vcpu_lock(vcpu); error = vcpu_set_state_locked(vcpu, newstate, from_idle); vcpu_unlock(vcpu); return (error); } enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu) { enum vcpu_state state; vcpu_lock(vcpu); state = vcpu->state; if (hostcpu != NULL) *hostcpu = vcpu->hostcpu; vcpu_unlock(vcpu); return (state); } -static void * -_vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, - void **cookie) -{ - int i, count, pageoff; - struct mem_map *mm; - vm_page_t m; - - pageoff = gpa & PAGE_MASK; - if (len > PAGE_SIZE - pageoff) - panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); - - count = 0; - for (i = 0; i < VM_MAX_MEMMAPS; i++) { - mm = &vm->mem_maps[i]; - if (sysmem_mapping(vm, mm) && gpa >= mm->gpa && - gpa < mm->gpa + mm->len) { - count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, - trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); - break; - } - } - - if (count == 1) { - *cookie = m; - return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); - } else { - *cookie = NULL; - return (NULL); - } -} - -void * -vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot, - void **cookie) -{ -#ifdef INVARIANTS - /* - * The current vcpu should be frozen to ensure 'vm_memmap[]' - * stability. - */ - int state = vcpu_get_state(vcpu, NULL); - KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", - __func__, state)); -#endif - return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie)); -} - -void * -vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, - void **cookie) -{ - sx_assert(&vm->mem_segs_lock, SX_LOCKED); - return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie)); -} - -void -vm_gpa_release(void *cookie) -{ - vm_page_t m = cookie; - - vm_page_unwire(m, PQ_ACTIVE); -} - int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval) { if (reg >= VM_REG_LAST) return (EINVAL); return (vmmops_getreg(vcpu->cookie, reg, retval)); } int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) { int error; if (reg >= VM_REG_LAST) return (EINVAL); error = vmmops_setreg(vcpu->cookie, reg, val); if (error || reg != VM_REG_GUEST_SEPC) return (error); vcpu->nextpc = val; return (0); } void * vm_get_cookie(struct vm *vm) { return (vm->cookie); } int vm_inject_exception(struct vcpu *vcpu, uint64_t scause) { return (vmmops_exception(vcpu->cookie, scause)); } int vm_attach_aplic(struct vm *vm, struct vm_aplic_descr *descr) { return (aplic_attach_to_vm(vm->cookie, descr)); } int vm_assert_irq(struct vm *vm, uint32_t irq) { return (aplic_inject_irq(vm->cookie, -1, irq, true)); } int vm_deassert_irq(struct vm *vm, uint32_t irq) { return (aplic_inject_irq(vm->cookie, -1, irq, false)); } int vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot, int func) { return (aplic_inject_msi(vm->cookie, msg, addr)); } static int vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) { vcpu_lock(vcpu); while (1) { if (aplic_check_pending(vcpu->cookie)) break; if (riscv_check_ipi(vcpu->cookie, false)) break; if (riscv_check_interrupts_pending(vcpu->cookie)) break; if (vcpu_should_yield(vcpu)) break; vcpu_require_state_locked(vcpu, VCPU_SLEEPING); /* * XXX msleep_spin() cannot be interrupted by signals so * wake up periodically to check pending signals. */ msleep_spin(vcpu, &vcpu->mtx, "vmidle", hz); vcpu_require_state_locked(vcpu, VCPU_FROZEN); } vcpu_unlock(vcpu); *retu = false; return (0); } static int vm_handle_paging(struct vcpu *vcpu, bool *retu) { struct vm *vm; struct vm_exit *vme; struct vm_map *map; uint64_t addr; pmap_t pmap; int ftype, rv; vm = vcpu->vm; vme = &vcpu->exitinfo; pmap = vmspace_pmap(vm->vmspace); addr = (vme->htval << 2) & ~(PAGE_SIZE - 1); dprintf("%s: %lx\n", __func__, addr); switch (vme->scause) { case SCAUSE_STORE_GUEST_PAGE_FAULT: ftype = VM_PROT_WRITE; break; case SCAUSE_FETCH_GUEST_PAGE_FAULT: ftype = VM_PROT_EXECUTE; break; case SCAUSE_LOAD_GUEST_PAGE_FAULT: ftype = VM_PROT_READ; break; default: panic("unknown page trap: %lu", vme->scause); } /* The page exists, but the page table needs to be updated. */ if (pmap_fault(pmap, addr, ftype)) return (0); map = &vm->vmspace->vm_map; rv = vm_fault(map, addr, ftype, VM_FAULT_NORMAL, NULL); if (rv != KERN_SUCCESS) { printf("%s: vm_fault failed, addr %lx, ftype %d, err %d\n", __func__, addr, ftype, rv); return (EFAULT); } return (0); } static int vm_handle_suspend(struct vcpu *vcpu, bool *retu) { struct vm *vm = vcpu->vm; int error, i; struct thread *td; error = 0; td = curthread; CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); /* * Wait until all 'active_cpus' have suspended themselves. * * Since a VM may be suspended at any time including when one or * more vcpus are doing a rendezvous we need to call the rendezvous * handler while we are waiting to prevent a deadlock. */ vcpu_lock(vcpu); while (error == 0) { if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) break; vcpu_require_state_locked(vcpu, VCPU_SLEEPING); msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); vcpu_require_state_locked(vcpu, VCPU_FROZEN); if (td_ast_pending(td, TDA_SUSPEND)) { vcpu_unlock(vcpu); error = thread_check_susp(td, false); vcpu_lock(vcpu); } } vcpu_unlock(vcpu); /* * Wakeup the other sleeping vcpus and return to userspace. */ for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->suspended_cpus)) { vcpu_notify_event(vm_vcpu(vm, i)); } } *retu = true; return (error); } int vm_run(struct vcpu *vcpu) { struct vm_eventinfo evinfo; struct vm_exit *vme; struct vm *vm; pmap_t pmap; int error; int vcpuid; bool retu; vm = vcpu->vm; dprintf("%s\n", __func__); vcpuid = vcpu->vcpuid; if (!CPU_ISSET(vcpuid, &vm->active_cpus)) return (EINVAL); if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) return (EINVAL); pmap = vmspace_pmap(vm->vmspace); vme = &vcpu->exitinfo; evinfo.rptr = NULL; evinfo.sptr = &vm->suspend; evinfo.iptr = NULL; restart: critical_enter(); restore_guest_fpustate(vcpu); vcpu_require_state(vcpu, VCPU_RUNNING); error = vmmops_run(vcpu->cookie, vcpu->nextpc, pmap, &evinfo); vcpu_require_state(vcpu, VCPU_FROZEN); save_guest_fpustate(vcpu); critical_exit(); if (error == 0) { retu = false; switch (vme->exitcode) { case VM_EXITCODE_INST_EMUL: vcpu->nextpc = vme->pc + vme->inst_length; error = vm_handle_inst_emul(vcpu, &retu); break; case VM_EXITCODE_WFI: vcpu->nextpc = vme->pc + vme->inst_length; error = vm_handle_wfi(vcpu, vme, &retu); break; case VM_EXITCODE_ECALL: /* Handle in userland. */ vcpu->nextpc = vme->pc + vme->inst_length; retu = true; break; case VM_EXITCODE_PAGING: vcpu->nextpc = vme->pc; error = vm_handle_paging(vcpu, &retu); break; case VM_EXITCODE_BOGUS: vcpu->nextpc = vme->pc; retu = false; error = 0; break; case VM_EXITCODE_SUSPENDED: vcpu->nextpc = vme->pc; error = vm_handle_suspend(vcpu, &retu); break; default: /* Handle in userland. */ vcpu->nextpc = vme->pc; retu = true; break; } } if (error == 0 && retu == false) goto restart; return (error); } diff --git a/sys/riscv/vmm/vmm_dev_machdep.c b/sys/riscv/vmm/vmm_dev_machdep.c index 889d83f0ce2e..ba15d8dcd79e 100644 --- a/sys/riscv/vmm/vmm_dev_machdep.c +++ b/sys/riscv/vmm/vmm_dev_machdep.c @@ -1,126 +1,127 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 NetApp, Inc. * Copyright (C) 2015 Mihai Carabas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include "vmm_aplic.h" const struct vmmdev_ioctl vmmdev_machdep_ioctls[] = { VMMDEV_IOCTL(VM_RUN, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_INJECT_EXCEPTION, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_GLA2GPA_NOFAULT, VMMDEV_IOCTL_LOCK_ONE_VCPU), VMMDEV_IOCTL(VM_ATTACH_APLIC, VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS), VMMDEV_IOCTL(VM_RAISE_MSI, 0), VMMDEV_IOCTL(VM_ASSERT_IRQ, 0), VMMDEV_IOCTL(VM_DEASSERT_IRQ, 0), }; const size_t vmmdev_machdep_ioctl_count = nitems(vmmdev_machdep_ioctls); int vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct vm_run *vmrun; struct vm_aplic_descr *aplic; struct vm_irq *vi; struct vm_exception *vmexc; struct vm_gla2gpa *gg; struct vm_msi *vmsi; int error; error = 0; switch (cmd) { case VM_RUN: { struct vm_exit *vme; vmrun = (struct vm_run *)data; vme = vm_exitinfo(vcpu); error = vm_run(vcpu); if (error != 0) break; error = copyout(vme, vmrun->vm_exit, sizeof(*vme)); break; } case VM_INJECT_EXCEPTION: vmexc = (struct vm_exception *)data; error = vm_inject_exception(vcpu, vmexc->scause); break; case VM_GLA2GPA_NOFAULT: gg = (struct vm_gla2gpa *)data; error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla, gg->prot, &gg->gpa, &gg->fault); KASSERT(error == 0 || error == EFAULT, ("%s: vm_gla2gpa unknown error %d", __func__, error)); break; case VM_ATTACH_APLIC: aplic = (struct vm_aplic_descr *)data; error = vm_attach_aplic(vm, aplic); break; case VM_RAISE_MSI: vmsi = (struct vm_msi *)data; error = vm_raise_msi(vm, vmsi->msg, vmsi->addr, vmsi->bus, vmsi->slot, vmsi->func); break; case VM_ASSERT_IRQ: vi = (struct vm_irq *)data; error = vm_assert_irq(vm, vi->irq); break; case VM_DEASSERT_IRQ: vi = (struct vm_irq *)data; error = vm_deassert_irq(vm, vi->irq); break; default: error = ENOTTY; break; } return (error); } diff --git a/sys/riscv/vmm/vmm_riscv.c b/sys/riscv/vmm/vmm_riscv.c index ca2ef50dbd24..0e46aca60fdf 100644 --- a/sys/riscv/vmm/vmm_riscv.c +++ b/sys/riscv/vmm/vmm_riscv.c @@ -1,949 +1,951 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2024-2025 Ruslan Bukin * * This software was developed by the University of Cambridge Computer * Laboratory (Department of Computer Science and Technology) under Innovate * UK project 105694, "Digital Security by Design (DSbD) Technology Platform * Prototype". * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include + #include "riscv.h" #include "vmm_aplic.h" #include "vmm_fence.h" #include "vmm_stat.h" MALLOC_DEFINE(M_HYP, "RISC-V VMM HYP", "RISC-V VMM HYP"); DPCPU_DEFINE_STATIC(struct hypctx *, vcpu); static int m_op(uint32_t insn, int match, int mask) { if (((insn ^ match) & mask) == 0) return (1); return (0); } static inline void riscv_set_active_vcpu(struct hypctx *hypctx) { DPCPU_SET(vcpu, hypctx); } struct hypctx * riscv_get_active_vcpu(void) { return (DPCPU_GET(vcpu)); } int vmmops_modinit(void) { if (!has_hyp) { printf("vmm: riscv hart doesn't support H-extension.\n"); return (ENXIO); } return (0); } int vmmops_modcleanup(void) { return (0); } void * vmmops_init(struct vm *vm, pmap_t pmap) { struct hyp *hyp; vm_size_t size; size = round_page(sizeof(struct hyp) + sizeof(struct hypctx *) * vm_get_maxcpus(vm)); hyp = malloc_aligned(size, PAGE_SIZE, M_HYP, M_WAITOK | M_ZERO); hyp->vm = vm; hyp->aplic_attached = false; aplic_vminit(hyp); return (hyp); } static void vmmops_delegate(void) { uint64_t hedeleg; uint64_t hideleg; hedeleg = (1UL << SCAUSE_INST_MISALIGNED); hedeleg |= (1UL << SCAUSE_ILLEGAL_INSTRUCTION); hedeleg |= (1UL << SCAUSE_BREAKPOINT); hedeleg |= (1UL << SCAUSE_ECALL_USER); hedeleg |= (1UL << SCAUSE_INST_PAGE_FAULT); hedeleg |= (1UL << SCAUSE_LOAD_PAGE_FAULT); hedeleg |= (1UL << SCAUSE_STORE_PAGE_FAULT); csr_write(hedeleg, hedeleg); hideleg = (1UL << IRQ_SOFTWARE_HYPERVISOR); hideleg |= (1UL << IRQ_TIMER_HYPERVISOR); hideleg |= (1UL << IRQ_EXTERNAL_HYPERVISOR); csr_write(hideleg, hideleg); } static void vmmops_vcpu_restore_csrs(struct hypctx *hypctx) { struct hypcsr *csrs; csrs = &hypctx->guest_csrs; csr_write(vsstatus, csrs->vsstatus); csr_write(vsie, csrs->vsie); csr_write(vstvec, csrs->vstvec); csr_write(vsscratch, csrs->vsscratch); csr_write(vsepc, csrs->vsepc); csr_write(vscause, csrs->vscause); csr_write(vstval, csrs->vstval); csr_write(hvip, csrs->hvip); csr_write(vsatp, csrs->vsatp); } static void vmmops_vcpu_save_csrs(struct hypctx *hypctx) { struct hypcsr *csrs; csrs = &hypctx->guest_csrs; csrs->vsstatus = csr_read(vsstatus); csrs->vsie = csr_read(vsie); csrs->vstvec = csr_read(vstvec); csrs->vsscratch = csr_read(vsscratch); csrs->vsepc = csr_read(vsepc); csrs->vscause = csr_read(vscause); csrs->vstval = csr_read(vstval); csrs->hvip = csr_read(hvip); csrs->vsatp = csr_read(vsatp); } void * vmmops_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) { struct hypctx *hypctx; struct hyp *hyp; vm_size_t size; hyp = vmi; dprintf("%s: hyp %p\n", __func__, hyp); KASSERT(vcpuid >= 0 && vcpuid < vm_get_maxcpus(hyp->vm), ("%s: Invalid vcpuid %d", __func__, vcpuid)); size = round_page(sizeof(struct hypctx)); hypctx = malloc_aligned(size, PAGE_SIZE, M_HYP, M_WAITOK | M_ZERO); hypctx->hyp = hyp; hypctx->vcpu = vcpu1; hypctx->guest_scounteren = HCOUNTEREN_CY | HCOUNTEREN_TM; /* Fence queue. */ hypctx->fence_queue = mallocarray(VMM_FENCE_QUEUE_SIZE, sizeof(struct vmm_fence), M_HYP, M_WAITOK | M_ZERO); mtx_init(&hypctx->fence_queue_mtx, "fence queue", NULL, MTX_SPIN); /* sstatus */ hypctx->guest_regs.hyp_sstatus = SSTATUS_SPP | SSTATUS_SPIE; hypctx->guest_regs.hyp_sstatus |= SSTATUS_FS_INITIAL; /* hstatus */ hypctx->guest_regs.hyp_hstatus = HSTATUS_SPV | HSTATUS_VTW; hypctx->guest_regs.hyp_hstatus |= HSTATUS_SPVP; hypctx->cpu_id = vcpuid; hyp->ctx[vcpuid] = hypctx; aplic_cpuinit(hypctx); vtimer_cpuinit(hypctx); return (hypctx); } static int riscv_vmm_pinit(pmap_t pmap) { dprintf("%s: pmap %p\n", __func__, pmap); pmap_pinit_stage(pmap, PM_STAGE2); return (1); } struct vmspace * vmmops_vmspace_alloc(vm_offset_t min, vm_offset_t max) { return (vmspace_alloc(min, max, riscv_vmm_pinit)); } void vmmops_vmspace_free(struct vmspace *vmspace) { pmap_remove_pages(vmspace_pmap(vmspace)); vmspace_free(vmspace); } static void riscv_unpriv_read(struct hypctx *hypctx, uintptr_t guest_addr, uint64_t *data, struct hyptrap *trap) { register struct hyptrap * htrap asm("a0"); uintptr_t old_hstatus; uintptr_t old_stvec; uintptr_t entry; uint64_t val; uint64_t tmp; int intr; entry = (uintptr_t)&vmm_unpriv_trap; htrap = trap; intr = intr_disable(); old_hstatus = csr_swap(hstatus, hypctx->guest_regs.hyp_hstatus); /* * Setup a temporary exception vector, so that if hlvx.hu raises * an exception we catch it in the vmm_unpriv_trap(). */ old_stvec = csr_swap(stvec, entry); /* * Read first two bytes of instruction assuming it could be a * compressed one. */ __asm __volatile(".option push\n" ".option norvc\n" "hlvx.hu %[val], (%[addr])\n" ".option pop\n" : [val] "=r" (val) : [addr] "r" (guest_addr), "r" (htrap) : "a1", "memory"); /* * Check if previous hlvx.hu did not raise an exception, and then * read the rest of instruction if it is a full-length one. */ if (trap->scause == -1 && (val & 0x3) == 0x3) { guest_addr += 2; __asm __volatile(".option push\n" ".option norvc\n" "hlvx.hu %[tmp], (%[addr])\n" ".option pop\n" : [tmp] "=r" (tmp) : [addr] "r" (guest_addr), "r" (htrap) : "a1", "memory"); val |= (tmp << 16); } csr_write(hstatus, old_hstatus); csr_write(stvec, old_stvec); intr_restore(intr); *data = val; } static int riscv_gen_inst_emul_data(struct hypctx *hypctx, struct vm_exit *vme_ret, struct hyptrap *trap) { uintptr_t guest_addr; struct vie *vie; uint64_t insn; int reg_num; int rs2, rd; int direction; int sign_extend; int access_size; guest_addr = vme_ret->sepc; KASSERT(vme_ret->scause == SCAUSE_FETCH_GUEST_PAGE_FAULT || vme_ret->scause == SCAUSE_LOAD_GUEST_PAGE_FAULT || vme_ret->scause == SCAUSE_STORE_GUEST_PAGE_FAULT, ("Invalid scause")); direction = vme_ret->scause == SCAUSE_STORE_GUEST_PAGE_FAULT ? VM_DIR_WRITE : VM_DIR_READ; sign_extend = 1; bzero(trap, sizeof(struct hyptrap)); trap->scause = -1; riscv_unpriv_read(hypctx, guest_addr, &insn, trap); if (trap->scause != -1) return (-1); if ((insn & 0x3) == 0x3) { rs2 = (insn & RS2_MASK) >> RS2_SHIFT; rd = (insn & RD_MASK) >> RD_SHIFT; if (direction == VM_DIR_WRITE) { if (m_op(insn, MATCH_SB, MASK_SB)) access_size = 1; else if (m_op(insn, MATCH_SH, MASK_SH)) access_size = 2; else if (m_op(insn, MATCH_SW, MASK_SW)) access_size = 4; else if (m_op(insn, MATCH_SD, MASK_SD)) access_size = 8; else { printf("unknown store instr at %lx", guest_addr); return (-2); } reg_num = rs2; } else { if (m_op(insn, MATCH_LB, MASK_LB)) access_size = 1; else if (m_op(insn, MATCH_LH, MASK_LH)) access_size = 2; else if (m_op(insn, MATCH_LW, MASK_LW)) access_size = 4; else if (m_op(insn, MATCH_LD, MASK_LD)) access_size = 8; else if (m_op(insn, MATCH_LBU, MASK_LBU)) { access_size = 1; sign_extend = 0; } else if (m_op(insn, MATCH_LHU, MASK_LHU)) { access_size = 2; sign_extend = 0; } else if (m_op(insn, MATCH_LWU, MASK_LWU)) { access_size = 4; sign_extend = 0; } else { printf("unknown load instr at %lx", guest_addr); return (-3); } reg_num = rd; } vme_ret->inst_length = 4; } else { rs2 = (insn >> 7) & 0x7; rs2 += 0x8; rd = (insn >> 2) & 0x7; rd += 0x8; if (direction == VM_DIR_WRITE) { if (m_op(insn, MATCH_C_SW, MASK_C_SW)) access_size = 4; else if (m_op(insn, MATCH_C_SD, MASK_C_SD)) access_size = 8; else { printf("unknown compressed store instr at %lx", guest_addr); return (-4); } } else { if (m_op(insn, MATCH_C_LW, MASK_C_LW)) access_size = 4; else if (m_op(insn, MATCH_C_LD, MASK_C_LD)) access_size = 8; else { printf("unknown load instr at %lx", guest_addr); return (-5); } } reg_num = rd; vme_ret->inst_length = 2; } vme_ret->u.inst_emul.gpa = (vme_ret->htval << 2) | (vme_ret->stval & 0x3); dprintf("guest_addr %lx insn %lx, reg %d, gpa %lx\n", guest_addr, insn, reg_num, vme_ret->u.inst_emul.gpa); vie = &vme_ret->u.inst_emul.vie; vie->dir = direction; vie->reg = reg_num; vie->sign_extend = sign_extend; vie->access_size = access_size; return (0); } static bool riscv_handle_world_switch(struct hypctx *hypctx, struct vm_exit *vme, pmap_t pmap) { struct hyptrap trap; uint64_t insn; uint64_t gpa; bool handled; int ret; int i; handled = false; if (vme->scause & SCAUSE_INTR) { /* * Host interrupt? Leave critical section to handle. */ vmm_stat_incr(hypctx->vcpu, VMEXIT_IRQ, 1); vme->exitcode = VM_EXITCODE_BOGUS; vme->inst_length = 0; return (handled); } switch (vme->scause) { case SCAUSE_FETCH_GUEST_PAGE_FAULT: case SCAUSE_LOAD_GUEST_PAGE_FAULT: case SCAUSE_STORE_GUEST_PAGE_FAULT: gpa = (vme->htval << 2) | (vme->stval & 0x3); if (vm_mem_allocated(hypctx->vcpu, gpa)) { vme->exitcode = VM_EXITCODE_PAGING; vme->inst_length = 0; vme->u.paging.gpa = gpa; } else { ret = riscv_gen_inst_emul_data(hypctx, vme, &trap); if (ret != 0) { vme->exitcode = VM_EXITCODE_HYP; vme->u.hyp.scause = trap.scause; break; } vme->exitcode = VM_EXITCODE_INST_EMUL; } break; case SCAUSE_ILLEGAL_INSTRUCTION: /* * TODO: handle illegal instruction properly. */ printf("%s: Illegal instruction at %lx stval 0x%lx htval " "0x%lx\n", __func__, vme->sepc, vme->stval, vme->htval); vmm_stat_incr(hypctx->vcpu, VMEXIT_UNHANDLED, 1); vme->exitcode = VM_EXITCODE_BOGUS; handled = false; break; case SCAUSE_VIRTUAL_SUPERVISOR_ECALL: handled = vmm_sbi_ecall(hypctx->vcpu); if (handled == true) break; for (i = 0; i < nitems(vme->u.ecall.args); i++) vme->u.ecall.args[i] = hypctx->guest_regs.hyp_a[i]; vme->exitcode = VM_EXITCODE_ECALL; break; case SCAUSE_VIRTUAL_INSTRUCTION: insn = vme->stval; if (m_op(insn, MATCH_WFI, MASK_WFI)) vme->exitcode = VM_EXITCODE_WFI; else vme->exitcode = VM_EXITCODE_BOGUS; handled = false; break; default: printf("unknown scause %lx\n", vme->scause); vmm_stat_incr(hypctx->vcpu, VMEXIT_UNHANDLED, 1); vme->exitcode = VM_EXITCODE_BOGUS; handled = false; break; } return (handled); } int vmmops_gla2gpa(void *vcpui, struct vm_guest_paging *paging, uint64_t gla, int prot, uint64_t *gpa, int *is_fault) { /* Implement me. */ return (ENOSYS); } void riscv_send_ipi(struct hyp *hyp, cpuset_t *cpus) { struct hypctx *hypctx; struct vm *vm; uint16_t maxcpus; int i; vm = hyp->vm; maxcpus = vm_get_maxcpus(hyp->vm); for (i = 0; i < maxcpus; i++) { if (!CPU_ISSET(i, cpus)) continue; hypctx = hyp->ctx[i]; atomic_set_32(&hypctx->ipi_pending, 1); vcpu_notify_event(vm_vcpu(vm, i)); } } int riscv_check_ipi(struct hypctx *hypctx, bool clear) { int val; if (clear) val = atomic_swap_32(&hypctx->ipi_pending, 0); else val = hypctx->ipi_pending; return (val); } bool riscv_check_interrupts_pending(struct hypctx *hypctx) { if (hypctx->interrupts_pending) return (true); return (false); } static void riscv_sync_interrupts(struct hypctx *hypctx) { int pending; pending = aplic_check_pending(hypctx); if (pending) hypctx->guest_csrs.hvip |= HVIP_VSEIP; else hypctx->guest_csrs.hvip &= ~HVIP_VSEIP; /* Guest clears VSSIP bit manually. */ if (riscv_check_ipi(hypctx, true)) hypctx->guest_csrs.hvip |= HVIP_VSSIP; if (riscv_check_interrupts_pending(hypctx)) hypctx->guest_csrs.hvip |= HVIP_VSTIP; else hypctx->guest_csrs.hvip &= ~HVIP_VSTIP; csr_write(hvip, hypctx->guest_csrs.hvip); } int vmmops_run(void *vcpui, register_t pc, pmap_t pmap, struct vm_eventinfo *evinfo) { struct hypctx *hypctx; struct vm_exit *vme; struct vcpu *vcpu; register_t val; uint64_t hvip; bool handled; hypctx = (struct hypctx *)vcpui; vcpu = hypctx->vcpu; vme = vm_exitinfo(vcpu); hypctx->guest_regs.hyp_sepc = (uint64_t)pc; vmmops_delegate(); /* * From The RISC-V Instruction Set Manual * Volume II: RISC-V Privileged Architectures * * If the new virtual machine's guest physical page tables * have been modified, it may be necessary to execute an HFENCE.GVMA * instruction (see Section 5.3.2) before or after writing hgatp. */ __asm __volatile("hfence.gvma" ::: "memory"); csr_write(hgatp, pmap->pm_satp); if (has_sstc) csr_write(henvcfg, HENVCFG_STCE); csr_write(hie, HIE_VSEIE | HIE_VSSIE | HIE_SGEIE); /* TODO: should we trap rdcycle / rdtime? */ csr_write(hcounteren, HCOUNTEREN_CY | HCOUNTEREN_TM); vmmops_vcpu_restore_csrs(hypctx); for (;;) { dprintf("%s: pc %lx\n", __func__, pc); if (hypctx->has_exception) { hypctx->has_exception = false; /* * TODO: implement exception injection. */ } val = intr_disable(); /* Check if the vcpu is suspended */ if (vcpu_suspended(evinfo)) { intr_restore(val); vm_exit_suspended(vcpu, pc); break; } if (vcpu_debugged(vcpu)) { intr_restore(val); vm_exit_debug(vcpu, pc); break; } /* * TODO: What happens if a timer interrupt is asserted exactly * here, but for the previous VM? */ riscv_set_active_vcpu(hypctx); aplic_flush_hwstate(hypctx); riscv_sync_interrupts(hypctx); vmm_fence_process(hypctx); dprintf("%s: Entering guest VM, vsatp %lx, ss %lx hs %lx\n", __func__, csr_read(vsatp), hypctx->guest_regs.hyp_sstatus, hypctx->guest_regs.hyp_hstatus); vmm_switch(hypctx); dprintf("%s: Leaving guest VM, hstatus %lx\n", __func__, hypctx->guest_regs.hyp_hstatus); /* Guest can clear VSSIP. It can't clear VSTIP or VSEIP. */ hvip = csr_read(hvip); if ((hypctx->guest_csrs.hvip ^ hvip) & HVIP_VSSIP) { if (hvip & HVIP_VSSIP) { /* TODO: VSSIP was set by guest. */ } else { /* VSSIP was cleared by guest. */ hypctx->guest_csrs.hvip &= ~HVIP_VSSIP; } } aplic_sync_hwstate(hypctx); /* * TODO: deactivate stage 2 pmap here if needed. */ vme->scause = csr_read(scause); vme->sepc = csr_read(sepc); vme->stval = csr_read(stval); vme->htval = csr_read(htval); vme->htinst = csr_read(htinst); intr_restore(val); vmm_stat_incr(vcpu, VMEXIT_COUNT, 1); vme->pc = hypctx->guest_regs.hyp_sepc; vme->inst_length = INSN_SIZE; handled = riscv_handle_world_switch(hypctx, vme, pmap); if (handled == false) /* Exit loop to emulate instruction. */ break; else { /* Resume guest execution from the next instruction. */ hypctx->guest_regs.hyp_sepc += vme->inst_length; } } vmmops_vcpu_save_csrs(hypctx); return (0); } static void riscv_pcpu_vmcleanup(void *arg) { struct hyp *hyp; int i, maxcpus; hyp = arg; maxcpus = vm_get_maxcpus(hyp->vm); for (i = 0; i < maxcpus; i++) { if (riscv_get_active_vcpu() == hyp->ctx[i]) { riscv_set_active_vcpu(NULL); break; } } } void vmmops_vcpu_cleanup(void *vcpui) { struct hypctx *hypctx; hypctx = vcpui; dprintf("%s\n", __func__); aplic_cpucleanup(hypctx); mtx_destroy(&hypctx->fence_queue_mtx); free(hypctx->fence_queue, M_HYP); free(hypctx, M_HYP); } void vmmops_cleanup(void *vmi) { struct hyp *hyp; hyp = vmi; dprintf("%s\n", __func__); aplic_vmcleanup(hyp); smp_rendezvous(NULL, riscv_pcpu_vmcleanup, NULL, hyp); free(hyp, M_HYP); } /* * Return register value. Registers have different sizes and an explicit cast * must be made to ensure proper conversion. */ static uint64_t * hypctx_regptr(struct hypctx *hypctx, int reg) { switch (reg) { case VM_REG_GUEST_RA: return (&hypctx->guest_regs.hyp_ra); case VM_REG_GUEST_SP: return (&hypctx->guest_regs.hyp_sp); case VM_REG_GUEST_GP: return (&hypctx->guest_regs.hyp_gp); case VM_REG_GUEST_TP: return (&hypctx->guest_regs.hyp_tp); case VM_REG_GUEST_T0: return (&hypctx->guest_regs.hyp_t[0]); case VM_REG_GUEST_T1: return (&hypctx->guest_regs.hyp_t[1]); case VM_REG_GUEST_T2: return (&hypctx->guest_regs.hyp_t[2]); case VM_REG_GUEST_S0: return (&hypctx->guest_regs.hyp_s[0]); case VM_REG_GUEST_S1: return (&hypctx->guest_regs.hyp_s[1]); case VM_REG_GUEST_A0: return (&hypctx->guest_regs.hyp_a[0]); case VM_REG_GUEST_A1: return (&hypctx->guest_regs.hyp_a[1]); case VM_REG_GUEST_A2: return (&hypctx->guest_regs.hyp_a[2]); case VM_REG_GUEST_A3: return (&hypctx->guest_regs.hyp_a[3]); case VM_REG_GUEST_A4: return (&hypctx->guest_regs.hyp_a[4]); case VM_REG_GUEST_A5: return (&hypctx->guest_regs.hyp_a[5]); case VM_REG_GUEST_A6: return (&hypctx->guest_regs.hyp_a[6]); case VM_REG_GUEST_A7: return (&hypctx->guest_regs.hyp_a[7]); case VM_REG_GUEST_S2: return (&hypctx->guest_regs.hyp_s[2]); case VM_REG_GUEST_S3: return (&hypctx->guest_regs.hyp_s[3]); case VM_REG_GUEST_S4: return (&hypctx->guest_regs.hyp_s[4]); case VM_REG_GUEST_S5: return (&hypctx->guest_regs.hyp_s[5]); case VM_REG_GUEST_S6: return (&hypctx->guest_regs.hyp_s[6]); case VM_REG_GUEST_S7: return (&hypctx->guest_regs.hyp_s[7]); case VM_REG_GUEST_S8: return (&hypctx->guest_regs.hyp_s[8]); case VM_REG_GUEST_S9: return (&hypctx->guest_regs.hyp_s[9]); case VM_REG_GUEST_S10: return (&hypctx->guest_regs.hyp_s[10]); case VM_REG_GUEST_S11: return (&hypctx->guest_regs.hyp_s[11]); case VM_REG_GUEST_T3: return (&hypctx->guest_regs.hyp_t[3]); case VM_REG_GUEST_T4: return (&hypctx->guest_regs.hyp_t[4]); case VM_REG_GUEST_T5: return (&hypctx->guest_regs.hyp_t[5]); case VM_REG_GUEST_T6: return (&hypctx->guest_regs.hyp_t[6]); case VM_REG_GUEST_SEPC: return (&hypctx->guest_regs.hyp_sepc); default: break; } return (NULL); } int vmmops_getreg(void *vcpui, int reg, uint64_t *retval) { uint64_t *regp; int running, hostcpu; struct hypctx *hypctx; hypctx = vcpui; running = vcpu_is_running(hypctx->vcpu, &hostcpu); if (running && hostcpu != curcpu) panic("%s: %s%d is running", __func__, vm_name(hypctx->hyp->vm), vcpu_vcpuid(hypctx->vcpu)); if (reg == VM_REG_GUEST_ZERO) { *retval = 0; return (0); } regp = hypctx_regptr(hypctx, reg); if (regp == NULL) return (EINVAL); *retval = *regp; return (0); } int vmmops_setreg(void *vcpui, int reg, uint64_t val) { struct hypctx *hypctx; int running, hostcpu; uint64_t *regp; hypctx = vcpui; running = vcpu_is_running(hypctx->vcpu, &hostcpu); if (running && hostcpu != curcpu) panic("%s: %s%d is running", __func__, vm_name(hypctx->hyp->vm), vcpu_vcpuid(hypctx->vcpu)); regp = hypctx_regptr(hypctx, reg); if (regp == NULL) return (EINVAL); *regp = val; return (0); } int vmmops_exception(void *vcpui, uint64_t scause) { struct hypctx *hypctx; int running, hostcpu; hypctx = vcpui; running = vcpu_is_running(hypctx->vcpu, &hostcpu); if (running && hostcpu != curcpu) panic("%s: %s%d is running", __func__, vm_name(hypctx->hyp->vm), vcpu_vcpuid(hypctx->vcpu)); /* TODO: implement me. */ return (ENOSYS); } int vmmops_getcap(void *vcpui, int num, int *retval) { int ret; ret = ENOENT; switch (num) { case VM_CAP_SSTC: *retval = has_sstc; ret = 0; break; case VM_CAP_UNRESTRICTED_GUEST: *retval = 1; ret = 0; break; default: break; } return (ret); } int vmmops_setcap(void *vcpui, int num, int val) { return (ENOENT); }