diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -190,6 +190,11 @@ CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, "enable sigfastblock for new processes"); +static bool __elfN(allow_wx) = true; +SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, + CTLFLAG_RWTUN, &__elfN(allow_wx), 0, + "Allow pages to be mapped simultaneously writable and executable"); + static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) @@ -1237,6 +1242,9 @@ imgp->map_flags |= MAP_ASLR_IGNSTART; } + if (!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0) + imgp->map_flags |= MAP_WXORX; + error = exec_new_vmspace(imgp, sv); vmspace = imgp->proc->p_vmspace; map = &vmspace->vm_map; diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -1074,12 +1074,12 @@ pmap_remove_pages(vmspace_pmap(vmspace)); vm_map_remove(map, vm_map_min(map), vm_map_max(map)); /* - * An exec terminates mlockall(MCL_FUTURE), ASLR state - * must be re-evaluated. + * An exec terminates mlockall(MCL_FUTURE). + * ASLR and W^X states must be re-evaluated. */ vm_map_lock(map); vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR | - MAP_ASLR_IGNSTART); + MAP_ASLR_IGNSTART | MAP_WXORX); vm_map_unlock(map); } else { error = vmspace_exec(p, sv_minuser, sv->sv_maxuser); diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -228,6 +228,7 @@ #define MAP_ASLR 0x08 /* enabled ASLR */ #define MAP_ASLR_IGNSTART 0x10 #define MAP_REPLENISH 0x20 +#define MAP_WXORX 0x40 /* enforce W^X */ #ifdef _KERNEL #if defined(KLD_MODULE) && !defined(KLD_TIED) diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1671,6 +1671,10 @@ if (start == end || !vm_map_range_valid(map, start, end)) return (KERN_INVALID_ADDRESS); + if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE | + VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) + return (KERN_PROTECTION_FAILURE); + /* * Find the entry prior to the proposed starting address; if it's part * of an existing entry, this range is bogus. @@ -2751,6 +2755,13 @@ in_tran = NULL; vm_map_lock(map); + if ((map->flags & MAP_WXORX) != 0 && (new_prot & + (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | + VM_PROT_EXECUTE)) { + vm_map_unlock(map); + return (KERN_PROTECTION_FAILURE); + } + /* * Ensure that we are not concurrently wiring pages. vm_map_wire() may * need to fault pages into the map and will drop the map lock while