Index: sys/arm/arm/trap-v6.c =================================================================== --- sys/arm/arm/trap-v6.c +++ sys/arm/arm/trap-v6.c @@ -171,6 +171,86 @@ {abort_fatal, "Undefined Code (0x40F)"} }; +static __inline int +read_instruction_nofault(vm_offset_t addr, uint32_t *inst) +{ + + if (!ALIGNED_POINTER(addr, uint32_t) || cp15_ats1cpr_check(addr) != 0) + return (EFAULT); + + *inst = *((uint32_t*)addr); + return (0); +} + +/* + * There are eight load/store unpriviledged instructions and each of them has + * two 32-bits encodings. And one more 16-bits (thumb) encoding, but not used + * in kernel. + * + * bit 27-20 bit 7-4 + * + * STRT A1 01 00 x 010 x x x x + * A2 01 10 x 010 x x x 0 + * STRHT A1 00 00 x 110 1 0 1 1 + * A2 00 00 x 010 1 0 1 1 + * STRBT A1 01 00 x 110 x x x x + * A2 01 10 x 110 x x x 0 + * LDRT A1 01 00 x 011 x x x x + * A2 01 10 x 011 x x x 0 + * LDRHT A1 00 00 x 111 1 0 1 1 + * A2 00 00 x 011 1 0 1 1 + * LDRSHT A1 00 00 x 111 1 1 1 1 + * A2 00 00 x 011 1 1 1 1 + * LDRBT A1 01 00 x 111 x x x x + * A2 01 10 x 111 x x x 0 + * LDRSBT A1 00 00 x 111 1 1 0 1 + * A2 00 00 x 011 1 1 0 1 + * + * The bit x and other bits in these encodings are arbitrary (conditions, + * registers, immediate values, ... ). + * + * We can group these encodings into four sets: + * + * STRT A1 01 00 x 010 x x x x + * STRBT A1 01 00 x 110 x x x x + * LDRT A1 01 00 x 011 x x x x + * LDRBT A1 01 00 x 111 x x x x + * + * (inst & 0x0f200000) == 0x04200000 + * + * STRT A2 01 10 x 010 x x x 0 + * STRBT A2 01 10 x 110 x x x 0 + * LDRT A2 01 10 x 011 x x x 0 + * LDRBT A2 01 10 x 111 x x x 0 + * + * (inst & 0x0f200010) == 0x06200000 + * + * STRHT A1 00 00 x 110 1 0 1 1 + * A2 00 00 x 010 1 0 1 1 + * LDRHT A1 00 00 x 111 1 0 1 1 + * A2 00 00 x 011 1 0 1 1 + * + * (inst & 0x0f2000f0) == 0x002000b0 + * + * LDRSHT A1 00 00 x 111 1 1 1 1 + * A2 00 00 x 011 1 1 1 1 + * LDRSBT A1 00 00 x 111 1 1 0 1 + * A2 00 00 x 011 1 1 0 1 + * + * (inst & 0x0f3000d0) == 0x003000d0 + */ +static __inline bool +is_instruction_unprivileged(uint32_t inst) +{ + + if ((inst & 0x0f200000) == 0x04200000 || + (inst & 0x0f200010) == 0x06200000 || + (inst & 0x0f2000f0) == 0x002000b0 || + (inst & 0x0f3000d0) == 0x003000d0) + return (true); + return (false); +} + static __inline void call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr) { @@ -289,7 +369,7 @@ struct vm_map *map; struct vmspace *vm; vm_prot_t ftype; - bool usermode; + bool usermode, usr_copy_fault; #ifdef INVARIANTS void *onfault; #endif @@ -329,13 +409,33 @@ * appropriately. However, there is no way how to do that reasonably * in general unless we restrict the handling somehow. * - * For now, these instructions are used only in copyin()/copyout() - * like functions where usermode buffers are checked in advance that - * they are not from KVA space. Thus, no action is needed here. + * For example, these instructions are used in copyin()/copyout() like + * functions where usermode buffers are checked in advance that they are + * not from KVA space. Then, no action is needed here. + * + * There might be other cases which don't check the buffers in advance. + * However, the handling is restricted here to cases when pcb_onfault + * is not NULL. This is quite reasonable restriction as pcb_onfault is + * used for quick error handling of access to user address space done + * in kernel. */ + usr_copy_fault = false; + pcb = td->td_pcb; + if (__predict_false(!usermode && pcb->pcb_onfault != NULL)) { + uint32_t inst; + + /* Read instruction safely to not double abort. */ + if (read_instruction_nofault(TRAPF_PC(tf), &inst) != 0) + panic("%s: bad PC %x", __func__, TRAPF_PC(tf)); + + /* Is this load/store unprivileged instruction? */ + if (is_instruction_unprivileged(inst)) + usr_copy_fault = true; + } #ifdef ARM_NEW_PMAP - rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode); + rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode || + usr_copy_fault); if (rv == KERN_SUCCESS) return; if (rv == KERN_INVALID_ADDRESS) @@ -423,7 +523,6 @@ * Don't pass faulting cache operation to vm_fault(). We don't want * to handle all vm stuff at this moment. */ - pcb = td->td_pcb; if (__predict_false(pcb->pcb_onfault == cachebailout)) { tf->tf_r0 = far; /* return failing address */ tf->tf_pc = (register_t)pcb->pcb_onfault; @@ -459,7 +558,7 @@ /* * Don't allow user-mode faults in kernel address space. */ - if (usermode) + if (usermode || usr_copy_fault) goto nogo; map = kernel_map; Index: sys/arm/include/cpu-v6.h =================================================================== --- sys/arm/include/cpu-v6.h +++ sys/arm/include/cpu-v6.h @@ -578,6 +578,36 @@ tlb_flush_all_ng_local(); } +/* + * Check address for privileged (PL1) read access. + * + * The check must be done atomicaly. In this case, it means that + * this function must be called while interrupts are disabled. + */ +static __inline int +cp15_ats1cpr_check(vm_offset_t addr) +{ + + cp15_ats1cpr_set(addr); + isb(); + return (cp15_par_get() & 0x01 ? EFAULT : 0); +} + +/* + * Check address for privileged (PL1) write access. + * + * The check must be done atomicaly. In this case, it means that + * this function must be called while interrupts are disabled. + */ +static __inline int +cp15_ats1cpw_check(vm_offset_t addr) +{ + + cp15_ats1cpw_set(addr); + isb(); + return (cp15_par_get() & 0x01 ? EFAULT : 0); +} + #else /* ! __ARM_ARCH >= 6 */ /*