Index: head/sys/amd64/vmm/amd/svm_support.S =================================================================== --- head/sys/amd64/vmm/amd/svm_support.S (revision 328010) +++ head/sys/amd64/vmm/amd/svm_support.S (revision 328011) @@ -1,132 +1,144 @@ /*- * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include "svm_assym.h" /* * Be friendly to DTrace FBT's prologue/epilogue pattern matching. * * They are also responsible for saving/restoring the host %rbp across VMRUN. */ #define VENTER push %rbp ; mov %rsp,%rbp #define VLEAVE pop %rbp #define VMLOAD .byte 0x0f, 0x01, 0xda #define VMRUN .byte 0x0f, 0x01, 0xd8 #define VMSAVE .byte 0x0f, 0x01, 0xdb /* * svm_launch(uint64_t vmcb, struct svm_regctx *gctx, struct pcpu *pcpu) * %rdi: physical address of VMCB * %rsi: pointer to guest context * %rdx: pointer to the pcpu data */ ENTRY(svm_launch) VENTER /* save pointer to the pcpu data */ push %rdx /* * Host register state saved across a VMRUN. * * All "callee saved registers" except: * %rsp: because it is preserved by the processor across VMRUN. * %rbp: because it is saved/restored by the function prologue/epilogue. */ push %rbx push %r12 push %r13 push %r14 push %r15 /* Save the physical address of the VMCB in %rax */ movq %rdi, %rax push %rsi /* push guest context pointer on the stack */ /* * Restore guest state. */ movq SCTX_R8(%rsi), %r8 movq SCTX_R9(%rsi), %r9 movq SCTX_R10(%rsi), %r10 movq SCTX_R11(%rsi), %r11 movq SCTX_R12(%rsi), %r12 movq SCTX_R13(%rsi), %r13 movq SCTX_R14(%rsi), %r14 movq SCTX_R15(%rsi), %r15 movq SCTX_RBP(%rsi), %rbp movq SCTX_RBX(%rsi), %rbx movq SCTX_RCX(%rsi), %rcx movq SCTX_RDX(%rsi), %rdx movq SCTX_RDI(%rsi), %rdi movq SCTX_RSI(%rsi), %rsi /* %rsi must be restored last */ VMLOAD VMRUN VMSAVE pop %rax /* pop guest context pointer from the stack */ /* * Save guest state. */ movq %r8, SCTX_R8(%rax) movq %r9, SCTX_R9(%rax) movq %r10, SCTX_R10(%rax) movq %r11, SCTX_R11(%rax) movq %r12, SCTX_R12(%rax) movq %r13, SCTX_R13(%rax) movq %r14, SCTX_R14(%rax) movq %r15, SCTX_R15(%rax) movq %rbp, SCTX_RBP(%rax) movq %rbx, SCTX_RBX(%rax) movq %rcx, SCTX_RCX(%rax) movq %rdx, SCTX_RDX(%rax) movq %rdi, SCTX_RDI(%rax) movq %rsi, SCTX_RSI(%rax) /* Restore host state */ pop %r15 pop %r14 pop %r13 pop %r12 pop %rbx /* Restore %GS.base to point to the host's pcpu data */ pop %rdx mov %edx, %eax shr $32, %rdx - mov $MSR_GSBASE, %ecx + mov $MSR_GSBASE, %rcx wrmsr + + /* + * Clobber the remaining registers with guest contents so they + * can't be misused. + */ + xor %rbp, %rbp + xor %rdi, %rdi + xor %rsi, %rsi + xor %r8, %r8 + xor %r9, %r9 + xor %r10, %r10 + xor %r11, %r11 VLEAVE ret END(svm_launch) Index: head/sys/amd64/vmm/intel/vmx_support.S =================================================================== --- head/sys/amd64/vmm/intel/vmx_support.S (revision 328010) +++ head/sys/amd64/vmm/intel/vmx_support.S (revision 328011) @@ -1,262 +1,278 @@ /*- * Copyright (c) 2011 NetApp, Inc. * Copyright (c) 2013 Neel Natu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include "vmx_assym.h" #ifdef SMP #define LK lock ; #else #define LK #endif /* Be friendly to DTrace FBT's prologue/epilogue pattern matching */ #define VENTER push %rbp ; mov %rsp,%rbp #define VLEAVE pop %rbp /* * Assumes that %rdi holds a pointer to the 'vmxctx'. * * On "return" all registers are updated to reflect guest state. The two * exceptions are %rip and %rsp. These registers are atomically switched * by hardware from the guest area of the vmcs. * * We modify %rsp to point to the 'vmxctx' so we can use it to restore * host context in case of an error with 'vmlaunch' or 'vmresume'. */ #define VMX_GUEST_RESTORE \ movq %rdi,%rsp; \ movq VMXCTX_GUEST_CR2(%rdi),%rsi; \ movq %rsi,%cr2; \ movq VMXCTX_GUEST_RSI(%rdi),%rsi; \ movq VMXCTX_GUEST_RDX(%rdi),%rdx; \ movq VMXCTX_GUEST_RCX(%rdi),%rcx; \ movq VMXCTX_GUEST_R8(%rdi),%r8; \ movq VMXCTX_GUEST_R9(%rdi),%r9; \ movq VMXCTX_GUEST_RAX(%rdi),%rax; \ movq VMXCTX_GUEST_RBX(%rdi),%rbx; \ movq VMXCTX_GUEST_RBP(%rdi),%rbp; \ movq VMXCTX_GUEST_R10(%rdi),%r10; \ movq VMXCTX_GUEST_R11(%rdi),%r11; \ movq VMXCTX_GUEST_R12(%rdi),%r12; \ movq VMXCTX_GUEST_R13(%rdi),%r13; \ movq VMXCTX_GUEST_R14(%rdi),%r14; \ movq VMXCTX_GUEST_R15(%rdi),%r15; \ movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */ /* + * Clobber the remaining registers with guest contents so they can't + * be misused. + */ +#define VMX_GUEST_CLOBBER \ + xor %rax, %rax; \ + xor %rcx, %rcx; \ + xor %rdx, %rdx; \ + xor %rsi, %rsi; \ + xor %r8, %r8; \ + xor %r9, %r9; \ + xor %r10, %r10; \ + xor %r11, %r11; + +/* * Save and restore the host context. * * Assumes that %rdi holds a pointer to the 'vmxctx'. */ #define VMX_HOST_SAVE \ movq %r15, VMXCTX_HOST_R15(%rdi); \ movq %r14, VMXCTX_HOST_R14(%rdi); \ movq %r13, VMXCTX_HOST_R13(%rdi); \ movq %r12, VMXCTX_HOST_R12(%rdi); \ movq %rbp, VMXCTX_HOST_RBP(%rdi); \ movq %rsp, VMXCTX_HOST_RSP(%rdi); \ movq %rbx, VMXCTX_HOST_RBX(%rdi); \ #define VMX_HOST_RESTORE \ movq VMXCTX_HOST_R15(%rdi), %r15; \ movq VMXCTX_HOST_R14(%rdi), %r14; \ movq VMXCTX_HOST_R13(%rdi), %r13; \ movq VMXCTX_HOST_R12(%rdi), %r12; \ movq VMXCTX_HOST_RBP(%rdi), %rbp; \ movq VMXCTX_HOST_RSP(%rdi), %rsp; \ movq VMXCTX_HOST_RBX(%rdi), %rbx; \ /* * vmx_enter_guest(struct vmxctx *vmxctx, int launched) * %rdi: pointer to the 'vmxctx' * %rsi: pointer to the 'vmx' * %edx: launch state of the VMCS * Interrupts must be disabled on entry. */ ENTRY(vmx_enter_guest) VENTER /* * Save host state before doing anything else. */ VMX_HOST_SAVE /* * Activate guest pmap on this cpu. */ movq VMXCTX_PMAP(%rdi), %r11 movl PCPU(CPUID), %eax LK btsl %eax, PM_ACTIVE(%r11) /* * If 'vmx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen' * then we must invalidate all mappings associated with this EPTP. */ movq PM_EPTGEN(%r11), %r10 cmpq %r10, VMX_EPTGEN(%rsi, %rax, 8) je guest_restore /* Refresh 'vmx->eptgen[curcpu]' */ movq %r10, VMX_EPTGEN(%rsi, %rax, 8) /* Setup the invept descriptor on the host stack */ mov %rsp, %r11 movq VMX_EPTP(%rsi), %rax movq %rax, -16(%r11) movq $0x0, -8(%r11) mov $0x1, %eax /* Single context invalidate */ invept -16(%r11), %rax jbe invept_error /* Check invept instruction error */ guest_restore: cmpl $0, %edx je do_launch VMX_GUEST_RESTORE vmresume /* * In the common case 'vmresume' returns back to the host through * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. * * If there is an error we return VMX_VMRESUME_ERROR to the caller. */ movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ movl $VMX_VMRESUME_ERROR, %eax jmp decode_inst_error do_launch: VMX_GUEST_RESTORE vmlaunch /* * In the common case 'vmlaunch' returns back to the host through * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. * * If there is an error we return VMX_VMLAUNCH_ERROR to the caller. */ movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ movl $VMX_VMLAUNCH_ERROR, %eax jmp decode_inst_error invept_error: movl $VMX_INVEPT_ERROR, %eax jmp decode_inst_error decode_inst_error: movl $VM_FAIL_VALID, %r11d jz inst_error movl $VM_FAIL_INVALID, %r11d inst_error: movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi) /* * The return value is already populated in %eax so we cannot use * it as a scratch register beyond this point. */ /* * Deactivate guest pmap from this cpu. */ movq VMXCTX_PMAP(%rdi), %r11 movl PCPU(CPUID), %r10d LK btrl %r10d, PM_ACTIVE(%r11) VMX_HOST_RESTORE VLEAVE ret /* * Non-error VM-exit from the guest. Make this a label so it can * be used by C code when setting up the VMCS. * The VMCS-restored %rsp points to the struct vmxctx */ ALIGN_TEXT .globl vmx_exit_guest vmx_exit_guest: /* * Save guest state that is not automatically saved in the vmcs. */ movq %rdi,VMXCTX_GUEST_RDI(%rsp) movq %rsi,VMXCTX_GUEST_RSI(%rsp) movq %rdx,VMXCTX_GUEST_RDX(%rsp) movq %rcx,VMXCTX_GUEST_RCX(%rsp) movq %r8,VMXCTX_GUEST_R8(%rsp) movq %r9,VMXCTX_GUEST_R9(%rsp) movq %rax,VMXCTX_GUEST_RAX(%rsp) movq %rbx,VMXCTX_GUEST_RBX(%rsp) movq %rbp,VMXCTX_GUEST_RBP(%rsp) movq %r10,VMXCTX_GUEST_R10(%rsp) movq %r11,VMXCTX_GUEST_R11(%rsp) movq %r12,VMXCTX_GUEST_R12(%rsp) movq %r13,VMXCTX_GUEST_R13(%rsp) movq %r14,VMXCTX_GUEST_R14(%rsp) movq %r15,VMXCTX_GUEST_R15(%rsp) movq %cr2,%rdi movq %rdi,VMXCTX_GUEST_CR2(%rsp) movq %rsp,%rdi /* * Deactivate guest pmap from this cpu. */ movq VMXCTX_PMAP(%rdi), %r11 movl PCPU(CPUID), %r10d LK btrl %r10d, PM_ACTIVE(%r11) VMX_HOST_RESTORE + + VMX_GUEST_CLOBBER /* * This will return to the caller of 'vmx_enter_guest()' with a return * value of VMX_GUEST_VMEXIT. */ movl $VMX_GUEST_VMEXIT, %eax VLEAVE ret END(vmx_enter_guest) /* * %rdi = interrupt handler entry point * * Calling sequence described in the "Instruction Set Reference" for the "INT" * instruction in Intel SDM, Vol 2. */ ENTRY(vmx_call_isr) VENTER mov %rsp, %r11 /* save %rsp */ and $~0xf, %rsp /* align on 16-byte boundary */ pushq $KERNEL_SS /* %ss */ pushq %r11 /* %rsp */ pushfq /* %rflags */ pushq $KERNEL_CS /* %cs */ cli /* disable interrupts */ callq *%rdi /* push %rip and call isr */ VLEAVE ret END(vmx_call_isr)