Index: head/sys/amd64/vmm/amd/svm_support.S =================================================================== --- head/sys/amd64/vmm/amd/svm_support.S +++ head/sys/amd64/vmm/amd/svm_support.S @@ -113,6 +113,23 @@ movq %rdi, SCTX_RDI(%rax) movq %rsi, SCTX_RSI(%rax) + /* + * To prevent malicious branch target predictions from + * affecting the host, overwrite all entries in the RSB upon + * exiting a guest. + */ + mov $16, %ecx /* 16 iterations, two calls per loop */ + mov %rsp, %rax +0: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: sub $1, %ecx + jnz 0b + mov %rax, %rsp + /* Restore host state */ pop %r15 pop %r14 Index: head/sys/amd64/vmm/intel/vmcs.c =================================================================== --- head/sys/amd64/vmm/intel/vmcs.c +++ head/sys/amd64/vmm/intel/vmcs.c @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include @@ -52,6 +53,12 @@ #include #endif +SYSCTL_DECL(_hw_vmm_vmx); + +static int no_flush_rsb; +SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, no_flush_rsb, CTLFLAG_RW, + &no_flush_rsb, 0, "Do not flush RSB upon vmexit"); + static uint64_t vmcs_fix_regval(uint32_t encoding, uint64_t val) { @@ -403,8 +410,15 @@ goto done; /* instruction pointer */ - if ((error = vmwrite(VMCS_HOST_RIP, (u_long)vmx_exit_guest)) != 0) - goto done; + if (no_flush_rsb) { + if ((error = vmwrite(VMCS_HOST_RIP, + (u_long)vmx_exit_guest)) != 0) + goto done; + } else { + if ((error = vmwrite(VMCS_HOST_RIP, + (u_long)vmx_exit_guest_flush_rsb)) != 0) + goto done; + } /* link pointer */ if ((error = vmwrite(VMCS_LINK_POINTER, ~0)) != 0) Index: head/sys/amd64/vmm/intel/vmx.h =================================================================== --- head/sys/amd64/vmm/intel/vmx.h +++ head/sys/amd64/vmm/intel/vmx.h @@ -150,5 +150,6 @@ int vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset); extern char vmx_exit_guest[]; +extern char vmx_exit_guest_flush_rsb[]; #endif Index: head/sys/amd64/vmm/intel/vmx_support.S =================================================================== --- head/sys/amd64/vmm/intel/vmx_support.S +++ head/sys/amd64/vmm/intel/vmx_support.S @@ -42,6 +42,29 @@ #define VLEAVE pop %rbp /* + * Save the guest context. + */ +#define VMX_GUEST_SAVE \ + movq %rdi,VMXCTX_GUEST_RDI(%rsp); \ + movq %rsi,VMXCTX_GUEST_RSI(%rsp); \ + movq %rdx,VMXCTX_GUEST_RDX(%rsp); \ + movq %rcx,VMXCTX_GUEST_RCX(%rsp); \ + movq %r8,VMXCTX_GUEST_R8(%rsp); \ + movq %r9,VMXCTX_GUEST_R9(%rsp); \ + movq %rax,VMXCTX_GUEST_RAX(%rsp); \ + movq %rbx,VMXCTX_GUEST_RBX(%rsp); \ + movq %rbp,VMXCTX_GUEST_RBP(%rsp); \ + movq %r10,VMXCTX_GUEST_R10(%rsp); \ + movq %r11,VMXCTX_GUEST_R11(%rsp); \ + movq %r12,VMXCTX_GUEST_R12(%rsp); \ + movq %r13,VMXCTX_GUEST_R13(%rsp); \ + movq %r14,VMXCTX_GUEST_R14(%rsp); \ + movq %r15,VMXCTX_GUEST_R15(%rsp); \ + movq %cr2,%rdi; \ + movq %rdi,VMXCTX_GUEST_CR2(%rsp); \ + movq %rsp,%rdi; + +/* * Assumes that %rdi holds a pointer to the 'vmxctx'. * * On "return" all registers are updated to reflect guest state. The two @@ -211,31 +234,55 @@ * The VMCS-restored %rsp points to the struct vmxctx */ ALIGN_TEXT - .globl vmx_exit_guest -vmx_exit_guest: + .globl vmx_exit_guest_flush_rsb +vmx_exit_guest_flush_rsb: /* * Save guest state that is not automatically saved in the vmcs. */ - movq %rdi,VMXCTX_GUEST_RDI(%rsp) - movq %rsi,VMXCTX_GUEST_RSI(%rsp) - movq %rdx,VMXCTX_GUEST_RDX(%rsp) - movq %rcx,VMXCTX_GUEST_RCX(%rsp) - movq %r8,VMXCTX_GUEST_R8(%rsp) - movq %r9,VMXCTX_GUEST_R9(%rsp) - movq %rax,VMXCTX_GUEST_RAX(%rsp) - movq %rbx,VMXCTX_GUEST_RBX(%rsp) - movq %rbp,VMXCTX_GUEST_RBP(%rsp) - movq %r10,VMXCTX_GUEST_R10(%rsp) - movq %r11,VMXCTX_GUEST_R11(%rsp) - movq %r12,VMXCTX_GUEST_R12(%rsp) - movq %r13,VMXCTX_GUEST_R13(%rsp) - movq %r14,VMXCTX_GUEST_R14(%rsp) - movq %r15,VMXCTX_GUEST_R15(%rsp) + VMX_GUEST_SAVE - movq %cr2,%rdi - movq %rdi,VMXCTX_GUEST_CR2(%rsp) + /* + * Deactivate guest pmap from this cpu. + */ + movq VMXCTX_PMAP(%rdi), %r11 + movl PCPU(CPUID), %r10d + LK btrl %r10d, PM_ACTIVE(%r11) - movq %rsp,%rdi + VMX_HOST_RESTORE + + VMX_GUEST_CLOBBER + + /* + * To prevent malicious branch target predictions from + * affecting the host, overwrite all entries in the RSB upon + * exiting a guest. + */ + mov $16, %ecx /* 16 iterations, two calls per loop */ + mov %rsp, %rax +0: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: call 2f /* create an RSB entry. */ +1: pause + call 1b /* capture rogue speculation. */ +2: sub $1, %ecx + jnz 0b + mov %rax, %rsp + + /* + * This will return to the caller of 'vmx_enter_guest()' with a return + * value of VMX_GUEST_VMEXIT. + */ + movl $VMX_GUEST_VMEXIT, %eax + VLEAVE + ret + + .globl vmx_exit_guest +vmx_exit_guest: + /* + * Save guest state that is not automatically saved in the vmcs. + */ + VMX_GUEST_SAVE /* * Deactivate guest pmap from this cpu.