Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/amd64/support.S
Show All 27 Lines | |||||
* SUCH DAMAGE. | * SUCH DAMAGE. | ||||
* | * | ||||
* $FreeBSD$ | * $FreeBSD$ | ||||
*/ | */ | ||||
#include "opt_ddb.h" | #include "opt_ddb.h" | ||||
#include <machine/asmacros.h> | #include <machine/asmacros.h> | ||||
#include <machine/specialreg.h> | |||||
#include <machine/pmap.h> | #include <machine/pmap.h> | ||||
#include "assym.s" | #include "assym.s" | ||||
.text | .text | ||||
/* | /* | ||||
* bcopy family | * bcopy family | ||||
▲ Show 20 Lines • Show All 775 Lines • ▼ Show 20 Lines | ENTRY(pmap_pti_pcid_invalidate) | ||||
retq | retq | ||||
/* | /* | ||||
* void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va); | * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va); | ||||
* Invalidates virtual address va in address space ucr3, then returns to kcr3. | * Invalidates virtual address va in address space ucr3, then returns to kcr3. | ||||
*/ | */ | ||||
ALIGN_TEXT | ALIGN_TEXT | ||||
ENTRY(pmap_pti_pcid_invlpg) | ENTRY(pmap_pti_pcid_invlpg) | ||||
pushfq | pushfq | ||||
op: 1f
2.5.1.3:
~~~
On processors with enhanced IBRS, an RSB overwrite sequence does not suffice… | |||||
Not Done Inline ActionsRe-read the citation and the code where you put the note. Your suggestion would result in doing something which is exactly opposite to what is recommended in the 2.5.1.3. If the IA32_ARCH_CAP_IBRS_ALL bit is set, then RSB reset sequence is useless, while jmp 1f would jump right to the sequence. Instead, the paragraph recommends to enable SMEP as the measure. We always have SMEP turned on if CPU supports it, there is no such action as turning SMEP on after the kernel is booted. So there is nothing to do in the case of enhanced IBRS. kib: Re-read the citation and the code where you put the note. Your suggestion would result in… | |||||
cli | cli | ||||
movq %rdi,%cr3 /* to user page table */ | movq %rdi,%cr3 /* to user page table */ | ||||
invlpg (%rdx) | invlpg (%rdx) | ||||
Not Done Inline ActionsAccording to 2.5.1.3 the CPUID_STDEXT3_IBPD is not IA32_ARCH_CAP_IBRS_ALL? op: According to 2.5.1.3 the CPUID_STDEXT3_IBPD is not IA32_ARCH_CAP_IBRS_ALL?
| |||||
Not Done Inline ActionsNope, it's fine, just the CPUID_STDEXT3_IBPD was named misleading. op: Nope, it's fine, just the CPUID_STDEXT3_IBPD was named misleading. | |||||
movq %rsi,%cr3 /* back to kernel */ | movq %rsi,%cr3 /* back to kernel */ | ||||
popfq | popfq | ||||
retq | retq | ||||
/* | /* | ||||
* void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva, | * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva, | ||||
* vm_offset_t eva); | * vm_offset_t eva); | ||||
* Invalidates virtual addresses between sva and eva in address space ucr3, | * Invalidates virtual addresses between sva and eva in address space ucr3, | ||||
* then returns to kcr3. | * then returns to kcr3. | ||||
*/ | */ | ||||
ALIGN_TEXT | ALIGN_TEXT | ||||
ENTRY(pmap_pti_pcid_invlrng) | ENTRY(pmap_pti_pcid_invlrng) | ||||
pushfq | pushfq | ||||
cli | cli | ||||
movq %rdi,%cr3 /* to user page table */ | movq %rdi,%cr3 /* to user page table */ | ||||
1: invlpg (%rdx) | 1: invlpg (%rdx) | ||||
addq $PAGE_SIZE,%rdx | addq $PAGE_SIZE,%rdx | ||||
cmpq %rdx,%rcx | cmpq %rdx,%rcx | ||||
ja 1b | ja 1b | ||||
movq %rsi,%cr3 /* back to kernel */ | movq %rsi,%cr3 /* back to kernel */ | ||||
popfq | popfq | ||||
retq | retq | ||||
.altmacro | |||||
.macro ibrs_seq_label l | |||||
handle_ibrs_\l: | |||||
.endm | |||||
.macro ibrs_call_label l | |||||
call handle_ibrs_\l | |||||
.endm | |||||
.macro ibrs_seq count | |||||
ll=1 | |||||
.rept \count | |||||
ibrs_call_label %(ll) | |||||
nop | |||||
ibrs_seq_label %(ll) | |||||
addq $8,%rsp | |||||
ll=ll+1 | |||||
.endr | |||||
.endm | |||||
/* all callers already saved %rax, %rdx, and %rcx */ | |||||
ENTRY(handle_ibrs_entry) | |||||
cmpb $0,hw_ibrs_active(%rip) | |||||
je 1f | |||||
movl $MSR_IA32_SPEC_CTRL,%ecx | |||||
movl $IA32_SPEC_CTRL_IBRS,%eax | |||||
movl $IA32_SPEC_CTRL_IBRS>>32,%edx | |||||
wrmsr | |||||
movb $1,PCPU(IBPB_SET) | |||||
testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip) | |||||
jne 1f | |||||
ibrs_seq 32 | |||||
1: ret | |||||
END(handle_ibrs_entry) | |||||
ENTRY(handle_ibrs_exit) | |||||
cmpb $0,PCPU(IBPB_SET) | |||||
je 1f | |||||
movl $MSR_IA32_SPEC_CTRL,%ecx | |||||
xorl %eax,%eax | |||||
xorl %edx,%edx | |||||
wrmsr | |||||
movb $0,PCPU(IBPB_SET) | |||||
1: ret | |||||
END(handle_ibrs_exit) | |||||
/* registers-neutral version, but needs stack */ | |||||
ENTRY(handle_ibrs_exit_rs) | |||||
cmpb $0,PCPU(IBPB_SET) | |||||
je 1f | |||||
pushq %rax | |||||
pushq %rdx | |||||
pushq %rcx | |||||
movl $MSR_IA32_SPEC_CTRL,%ecx | |||||
xorl %eax,%eax | |||||
xorl %edx,%edx | |||||
wrmsr | |||||
popq %rcx | |||||
popq %rdx | |||||
popq %rax | |||||
movb $0,PCPU(IBPB_SET) | |||||
1: ret | |||||
END(handle_ibrs_exit_rs) | |||||
.noaltmacro |
1f
2.5.1.3:
~~~
On processors with enhanced IBRS, an RSB overwrite sequence does not suffice to prevent the
predicted target of a near return from using an RSB entry created in a less privileged predictor mode.
Software can avoid this by enabling SMEP (for transitions from user mode to supervisor mode
) and by maintaining IA32_SPEC_CTRL.IBRS = 1 (for VM exits).
~~~