diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -190,6 +190,11 @@ CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, "enable sigfastblock for new processes"); +static bool __elfN(allow_wx) = true; +SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, + CTLFLAG_RWTUN, &__elfN(allow_wx), 0, + "Allow pages to be mapped simultaneously writable and executable"); + static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) @@ -1237,6 +1242,9 @@ imgp->map_flags |= MAP_ASLR_IGNSTART; } + if (!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0) + imgp->map_flags |= MAP_WXORX; + error = exec_new_vmspace(imgp, sv); vmspace = imgp->proc->p_vmspace; map = &vmspace->vm_map; diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -1043,6 +1043,7 @@ struct rlimit rlim_stack; vm_offset_t sv_minuser, stack_addr; vm_map_t map; + vm_prot_t stack_prot; u_long ssiz; imgp->vmspace_destroyed = 1; @@ -1074,12 +1075,12 @@ pmap_remove_pages(vmspace_pmap(vmspace)); vm_map_remove(map, vm_map_min(map), vm_map_max(map)); /* - * An exec terminates mlockall(MCL_FUTURE), ASLR state - * must be re-evaluated. + * An exec terminates mlockall(MCL_FUTURE). + * ASLR and W^X states must be re-evaluated. */ vm_map_lock(map); vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR | - MAP_ASLR_IGNSTART); + MAP_ASLR_IGNSTART | MAP_WXORX); vm_map_unlock(map); } else { error = vmspace_exec(p, sv_minuser, sv->sv_maxuser); @@ -1126,11 +1127,16 @@ if (ssiz < imgp->eff_stack_sz) imgp->eff_stack_sz = ssiz; stack_addr = sv->sv_usrstack - ssiz; - error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz, - obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : - sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN); - if (error != KERN_SUCCESS) + stack_prot = obj != NULL && imgp->stack_prot != 0 ? + imgp->stack_prot :sv->sv_stackprot; + error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz, stack_prot, + VM_PROT_ALL, MAP_STACK_GROWS_DOWN); + if (error != KERN_SUCCESS) { + uprintf("exec_new_vmspace: mapping stack size %#jx prot %#x " + "failed mach error %d errno %d\n", (uintmax_t)ssiz, + stack_prot, error, vm_mmap_to_errno(error)); return (vm_mmap_to_errno(error)); + } /* * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -228,6 +228,7 @@ #define MAP_ASLR 0x08 /* enabled ASLR */ #define MAP_ASLR_IGNSTART 0x10 #define MAP_REPLENISH 0x20 +#define MAP_WXORX 0x40 /* enforce W^X */ #ifdef _KERNEL #if defined(KLD_MODULE) && !defined(KLD_TIED) diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1671,6 +1671,10 @@ if (start == end || !vm_map_range_valid(map, start, end)) return (KERN_INVALID_ADDRESS); + if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE | + VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) + return (KERN_PROTECTION_FAILURE); + /* * Find the entry prior to the proposed starting address; if it's part * of an existing entry, this range is bogus. @@ -2751,6 +2755,13 @@ in_tran = NULL; vm_map_lock(map); + if ((map->flags & MAP_WXORX) != 0 && (new_prot & + (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | + VM_PROT_EXECUTE)) { + vm_map_unlock(map); + return (KERN_PROTECTION_FAILURE); + } + /* * Ensure that we are not concurrently wiring pages. vm_map_wire() may * need to fault pages into the map and will drop the map lock while