diff --git a/sys/arm64/arm64/sys_machdep.c b/sys/arm64/arm64/sys_machdep.c --- a/sys/arm64/arm64/sys_machdep.c +++ b/sys/arm64/arm64/sys_machdep.c @@ -32,13 +32,64 @@ #include #include +#include #include +#include +#include +#include + #include +#include + +static int +bti_guard_memory(struct thread *td, vm_offset_t start, vm_offset_t end) +{ + vm_map_t map; + + map = &td->td_proc->p_vmspace->vm_map; + vm_map_protect(map, start, end, 0, 0, PMAP_ENTER_BTI, + VM_MAP_PROTECT_SET_ARCHFLAGS); + + return (0); +} int sysarch(struct thread *td, struct sysarch_args *uap) { + struct arm64_guard_page_args gp_args; + int error; + + switch(uap->op) { + case ARM64_GUARD_PAGE: + error = copyin(uap->parms, &gp_args, sizeof(gp_args)); + if (error != 0) + break; + + /* Align the start to a page alignment */ + gp_args.len += gp_args.addr & PAGE_MASK; + gp_args.addr = trunc_page(gp_args.addr); + /* Align the length */ + gp_args.len = round_page(gp_args.len); + + /* Check the address points to user memory */ + if (gp_args.addr >= VM_MAX_USER_ADDRESS) + return (EINVAL); + /* + * Check the length is not too long. As the length may wrap + * we need to make sure it is no longer than the remaining + * user memory. + */ + if ((VM_MAX_USER_ADDRESS - gp_args.addr) < gp_args.len) + return (EINVAL); + + error = bti_guard_memory(td, gp_args.addr, + gp_args.addr + gp_args.len); + break; + default: + error = EINVAL; + break; + } - return (ENOTSUP); + return (error); } diff --git a/sys/arm64/include/sysarch.h b/sys/arm64/include/sysarch.h --- a/sys/arm64/include/sysarch.h +++ b/sys/arm64/include/sysarch.h @@ -36,6 +36,13 @@ #ifndef _MACHINE_SYSARCH_H_ #define _MACHINE_SYSARCH_H_ +#define ARM64_GUARD_PAGE 0 + +struct arm64_guard_page_args { + __uintptr_t addr; + __size_t len; +}; + #ifndef _KERNEL __BEGIN_DECLS