Page MenuHomeFreeBSD

mitigate against CVE-2017-5715 by flushing the return stack buffer (RSB) upon returning from the guest
ClosedPublic

Authored by tychon on Feb 8 2018, 9:00 PM.
Tags
None
Referenced Files
Unknown Object (File)
Jan 21 2024, 12:11 AM
Unknown Object (File)
Nov 13 2023, 10:22 PM
Unknown Object (File)
Nov 6 2023, 11:30 AM
Unknown Object (File)
Nov 4 2023, 9:24 AM
Unknown Object (File)
Nov 2 2023, 2:07 AM
Unknown Object (File)
Aug 16 2023, 4:57 AM
Unknown Object (File)
Aug 14 2023, 1:22 PM
Unknown Object (File)
Jul 7 2023, 4:01 AM
Subscribers

Details

Summary

Provide further mitigation against CVE-2017-5715 by flushing the return stack buffer (RSB) upon returning from the guest.

This was inspired by this linux commit:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/arch/x86/kvm?id=117cc7a908c83697b0b737d15ae1eb5943afe35b

Diff Detail

Repository
rS FreeBSD src repository - subversion
Lint
Lint Not Applicable
Unit
Tests Not Applicable

Event Timeline

This revision is now accepted and ready to land.Feb 10 2018, 6:20 PM
sys/amd64/vmm/amd/svm_support.S
116–132 ↗(On Diff #39065)

This looks same as VMX, can it be in common macro.

sys/amd64/vmm/intel/vmx_support.S
237–240 ↗(On Diff #39065)

Other option to have single vmx_exit_guest but use sysctl to enable/disable RSB flush, something like this[its for SVM but we can do similar for vmx].

Index: sys/amd64/vmm/amd/svm.c
===================================================================
--- sys/amd64/vmm/amd/svm.c	(revision 328628)
+++ sys/amd64/vmm/amd/svm.c	(working copy)
@@ -115,6 +115,13 @@
 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0,
     "Number of ASIDs supported by this processor");
 
+int flush_rsb_enable = 1;
+SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, flus_rsb_enable, CTLFLAG_RDTUN, &flush_rsb_enable, 0,
+    "Enable RSB flush");
+
+int flush_rsb_count = 0;
+SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, flus_rsb_count, CTLFLAG_RDTUN, &flush_rsb_count, 0,
+    "RSB flush count");
 /* Current ASID generation for each host cpu */
 static struct asid asid[MAXCPU];
 
Index: sys/amd64/vmm/amd/svm_genassym.c
===================================================================
--- sys/amd64/vmm/amd/svm_genassym.c	(revision 328628)
+++ sys/amd64/vmm/amd/svm_genassym.c	(working copy)
@@ -33,6 +33,8 @@
 
 #include "svm.h"
 
+extern int flush_rsb_enable;
+
 ASSYM(SCTX_RBX, offsetof(struct svm_regctx, sctx_rbx));
 ASSYM(SCTX_RCX, offsetof(struct svm_regctx, sctx_rcx));
 ASSYM(SCTX_RBP, offsetof(struct svm_regctx, sctx_rbp));
Index: sys/amd64/vmm/amd/svm_support.S
===================================================================
--- sys/amd64/vmm/amd/svm_support.S	(revision 328628)
+++ sys/amd64/vmm/amd/svm_support.S	(working copy)
@@ -41,6 +41,8 @@
 #define	VMRUN	.byte 0x0f, 0x01, 0xd8
 #define	VMSAVE	.byte 0x0f, 0x01, 0xdb
 
+.extern flush_rsb_enable
+.extern flush_rsb_count
 /*
  * svm_launch(uint64_t vmcb, struct svm_regctx *gctx, struct pcpu *pcpu)
  * %rdi: physical address of VMCB
@@ -113,8 +115,29 @@
 	movq %rdi, SCTX_RDI(%rax)
 	movq %rsi, SCTX_RSI(%rax)
 
+        cmpq $1, flush_rsb_enable
+        jne no_rsb_flush
+        incq flush_rsb_count
+
+        /*
+         * To prevent malicious branch target predictions from
+         * affecting the host, overwrite all entries in the RSB upon
+         * exiting a guest.
+         */
+        mov $16, %ecx        /* 16 iterations, two calls per loop */
+        mov %rsp, %rax
+0:        call 2f                /* create an RSB entry. */
+1:        pause
+        call 1b                /* capture rogue speculation. */
+2:        call 2f                /* create an RSB entry. */
+1:        pause
+        call 1b                /* capture rogue speculation. */
+2:        sub $1, %ecx
+        jnz 0b
+        mov %rax, %rsp
+ 
 	/* Restore host state */
-	pop %r15
+no_rsb_flush:	pop %r15
 	pop %r14
 	pop %r13
 	pop %r12
$
This revision was automatically updated to reflect the committed changes.

While a conditional branch dependent on global variable isn't quite the same as an indirect branch dependent on a global variable, it would seem the opportunity still exists to evict the cache line containing the global variable to give the CPU some time to speculatively execute in the neighborhood of the branch. What I've seen in other implementation is the placement of the rsb flushing code before any branches. Perhaps that's overly conservative but I think we should follow that approach too.

It's for that reason that I duplicated the entire guest exit path for intel and inserted the flush routines into one giving us a standard exit with differentiated returns. The analogue for amd could involve a common exit path with separate enter/flush paths and a knob to switch between them. Alas, without a way to test them I opted to go there.

I'm going to commit what I have now; that doesn't preclude enhancing the amd version in a follow-on commit.