Changeset View
Standalone View
sys/arm/arm/trap-v6.c
Show First 20 Lines • Show All 281 Lines • ▼ Show 20 Lines | abort_handler(struct trapframe *tf, int prefetch) | ||||
uint32_t fsr; | uint32_t fsr; | ||||
struct ksig ksig; | struct ksig ksig; | ||||
struct proc *p; | struct proc *p; | ||||
struct pcb *pcb; | struct pcb *pcb; | ||||
struct vm_map *map; | struct vm_map *map; | ||||
struct vmspace *vm; | struct vmspace *vm; | ||||
vm_prot_t ftype; | vm_prot_t ftype; | ||||
bool usermode; | bool usermode; | ||||
int bp_harden; | |||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
void *onfault; | void *onfault; | ||||
#endif | #endif | ||||
VM_CNT_INC(v_trap); | VM_CNT_INC(v_trap); | ||||
td = curthread; | td = curthread; | ||||
fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get(); | fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get(); | ||||
#if __ARM_ARCH >= 7 | #if __ARM_ARCH >= 7 | ||||
far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get(); | far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get(); | ||||
#else | #else | ||||
far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get(); | far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get(); | ||||
#endif | #endif | ||||
idx = FSR_TO_FAULT(fsr); | idx = FSR_TO_FAULT(fsr); | ||||
usermode = TRAPF_USERMODE(tf); /* Abort came from user mode? */ | usermode = TRAPF_USERMODE(tf); /* Abort came from user mode? */ | ||||
/* | |||||
* Apply BP hardening by flushing the branch prediction cache | |||||
* for prefaults on kernel addresses. | |||||
*/ | |||||
if (__predict_false(prefetch && far > VM_MAXUSER_ADDRESS && | |||||
(idx == FAULT_TRAN_L2 || idx == FAULT_PERM_L2))) { | |||||
bp_harden = PCPU_GET(bp_harden_kind); | |||||
if (bp_harden == PCPU_BP_HARDEN_KIND_BPIALL) | |||||
_CP15_BPIALL(); | |||||
else if (bp_harden == PCPU_BP_HARDEN_KIND_ICIALLU) | |||||
_CP15_ICIALLU(); | |||||
} | |||||
if (usermode) | if (usermode) | ||||
td->td_frame = tf; | td->td_frame = tf; | ||||
CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d", | CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d", | ||||
__func__, fsr, idx, far, prefetch, usermode); | __func__, fsr, idx, far, prefetch, usermode); | ||||
/* | /* | ||||
* Firstly, handle aborts that are not directly related to mapping. | * Firstly, handle aborts that are not directly related to mapping. | ||||
*/ | */ | ||||
if (__predict_false(idx == FAULT_EA_IMPREC)) { | if (__predict_false(idx == FAULT_EA_IMPREC)) { | ||||
abort_imprecise(tf, fsr, prefetch, usermode); | abort_imprecise(tf, fsr, prefetch, usermode); | ||||
return; | return; | ||||
} | } | ||||
if (__predict_false(idx == FAULT_DEBUG)) { | if (__predict_false(idx == FAULT_DEBUG)) { | ||||
abort_debug(tf, fsr, prefetch, usermode, far); | abort_debug(tf, fsr, prefetch, usermode, far); | ||||
return; | return; | ||||
} | } | ||||
/* | /* | ||||
imp: I think this comment needs some more explanation.
"Apply BP hardening by flushing the branch… | |||||
Not Done Inline ActionsI will do it. mmel: I will do it. | |||||
* ARM has a set of unprivileged load and store instructions | * ARM has a set of unprivileged load and store instructions | ||||
* (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other | * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other | ||||
* than user mode and OS should recognize their aborts and behave | * than user mode and OS should recognize their aborts and behave | ||||
Not Done Inline Actionswhy would this info be per-cpu? Seems like it could be rolled up into the IF statement as well if it were just a global... imp: why would this info be per-cpu? Seems like it could be rolled up into the IF statement as well… | |||||
Not Done Inline ActionsI plan to add support for multicluster SoC in "near" future. So yes, each cluster can have unique cores (big.LITTLE for example) mmel: I plan to add support for multicluster SoC in "near" future. So yes, each cluster can have… | |||||
* appropriately. However, there is no way how to do that reasonably | * appropriately. However, there is no way how to do that reasonably | ||||
* in general unless we restrict the handling somehow. | * in general unless we restrict the handling somehow. | ||||
* | * | ||||
* For now, these instructions are used only in copyin()/copyout() | * For now, these instructions are used only in copyin()/copyout() | ||||
* like functions where usermode buffers are checked in advance that | * like functions where usermode buffers are checked in advance that | ||||
Not Done Inline ActionsThe above differs from the assembler as I pointed out. This is what I'd expect here. I seem to recall reading that we need to do this invalidation as early as possible. Is that true for this case? If so, can we move this block earlier in the function with a comment to that effect? imp: The above differs from the assembler as I pointed out. This is what I'd expect here. I seem to… | |||||
Not Done Inline ActionsThis is exact point of my confusion. Logic tells me that BP must be flushed before first indirect branch is taken (simply, before mistrained BP can be used). And this is not what this code does (nor original linux version). I'm very afraid that this is done specifically for some real (but not published) attack vector based on Linux code. Anyway, I will move (and comment) this block. mmel: This is exact point of my confusion. Logic tells me that BP must be flushed before first… | |||||
Not Done Inline Actionsexcellent! imp: excellent! | |||||
* they are not from KVA space. Thus, no action is needed here. | * they are not from KVA space. Thus, no action is needed here. | ||||
*/ | */ | ||||
/* | /* | ||||
* (1) Handle access and R/W hardware emulation aborts. | * (1) Handle access and R/W hardware emulation aborts. | ||||
* (2) Check that abort is not on pmap essential address ranges. | * (2) Check that abort is not on pmap essential address ranges. | ||||
* There is no way how to fix it, so we don't even try. | * There is no way how to fix it, so we don't even try. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 306 Lines • Show Last 20 Lines |
I think this comment needs some more explanation.
"Apply BP hardening by flushing the branch prediction cache for prefaults on kernel addresses."
might be a good start.