Index: sys/kern/imgact_elf.c =================================================================== --- sys/kern/imgact_elf.c +++ sys/kern/imgact_elf.c @@ -209,6 +209,12 @@ __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable stack address randomization"); +static int __elfN(aslr_shared_page) = __ELF_WORD_SIZE == 64; +SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, shared_page, CTLFLAG_RWTUN, + &__elfN(aslr_shared_page), 0, + __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) + ": enable shared page address randomization"); + static int __elfN(sigfastblock) = 1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock, CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, @@ -1305,6 +1311,8 @@ imgp->map_flags |= MAP_ASLR_IGNSTART; if (__elfN(aslr_stack)) imgp->map_flags |= MAP_ASLR_STACK; + if (__elfN(aslr_shared_page)) + imgp->imgp_flags |= IMGP_ASLR_SHARED_PAGE; } if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 && Index: sys/kern/kern_exec.c =================================================================== --- sys/kern/kern_exec.c +++ sys/kern/kern_exec.c @@ -1110,8 +1110,7 @@ } /* - * Run down the current address space and install a new one. Map the shared - * page. + * Run down the current address space and install a new one. */ int exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv) @@ -1120,7 +1119,6 @@ struct proc *p = imgp->proc; struct vmspace *vmspace = p->p_vmspace; struct thread *td = curthread; - vm_object_t obj; vm_offset_t sv_minuser; vm_map_t map; @@ -1168,22 +1166,6 @@ } map->flags |= imgp->map_flags; - /* Map a shared page */ - obj = sv->sv_shared_page_obj; - if (obj != NULL) { - vm_object_reference(obj); - error = vm_map_fixed(map, obj, 0, - sv->sv_shared_page_base, sv->sv_shared_page_len, - VM_PROT_READ | VM_PROT_EXECUTE, - VM_PROT_READ | VM_PROT_EXECUTE, - MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE); - if (error != KERN_SUCCESS) { - vm_object_deallocate(obj); - return (vm_mmap_to_errno(error)); - } - vmspace->vm_shp_base = sv->sv_shared_page_base; - } - return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0); } @@ -1199,9 +1181,11 @@ vm_map_t map; struct vmspace *vmspace; vm_offset_t stack_addr, stack_top; + vm_offset_t sharedpage_addr; u_long ssiz; int error, find_space, stack_off; vm_prot_t stack_prot; + vm_object_t obj; p = imgp->proc; sv = p->p_sysent; @@ -1253,6 +1237,42 @@ stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *)); } + /* Map a shared page */ + obj = sv->sv_shared_page_obj; + if (obj == NULL) { + sharedpage_addr = 0; + goto out; + } + + /* + * If randomization is disabled the vm logic maps it exactly + * to the specific address (VMFS_NO_SPACE). + * Otherwise it can choose any address above .data section. + * Same logic is used for stack address randomization. + */ + if ((imgp->imgp_flags & IMGP_ASLR_SHARED_PAGE) != 0) { + sharedpage_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr + + lim_max(curthread, RLIMIT_DATA)); + find_space = VMFS_ANY_SPACE; + } else { + sharedpage_addr = sv->sv_shared_page_base; + find_space = VMFS_NO_SPACE; + } + vm_object_reference(obj); + error = vm_map_find(map, obj, 0, + &sharedpage_addr, sv->sv_shared_page_len, + sv->sv_maxuser, find_space, + VM_PROT_READ | VM_PROT_EXECUTE, + VM_PROT_READ | VM_PROT_EXECUTE, + MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE); + if (error != KERN_SUCCESS) { + uprintf("%s: mapping shared page at addr: %p" + "failed, mach error %d errno %d\n", __func__, + (void *)sharedpage_addr, error, vm_mmap_to_errno(error)); + vm_object_deallocate(obj); + return (vm_mmap_to_errno(error)); + } +out: /* * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they * are still used to enforce the stack rlimit on the process stack. @@ -1260,6 +1280,7 @@ vmspace->vm_maxsaddr = (char *)stack_addr; vmspace->vm_stacktop = stack_top; vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; + vmspace->vm_shp_base = sharedpage_addr; return (0); } Index: sys/kern/kern_proc.c =================================================================== --- sys/kern/kern_proc.c +++ sys/kern/kern_proc.c @@ -3245,6 +3245,9 @@ kvm.kvm_map_flags |= KMAP_FLAG_WXORX; if ((vmspace->vm_map.flags & MAP_ASLR_STACK) != 0) kvm.kvm_map_flags |= KMAP_FLAG_ASLR_STACK; + if (vmspace->vm_shp_base != p->p_sysent->sv_shared_page_base && + PROC_HAS_SHP(p)) + kvm.kvm_map_flags |= KMAP_FLAG_ASLR_SHARED_PAGE; #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) { Index: sys/sys/imgact.h =================================================================== --- sys/sys/imgact.h +++ sys/sys/imgact.h @@ -92,6 +92,8 @@ bool opened; /* we have opened executable vnode */ bool textset; u_int map_flags; +#define IMGP_ASLR_SHARED_PAGE 0x1 + uint32_t imgp_flags; }; #ifdef _KERNEL Index: sys/sys/user.h =================================================================== --- sys/sys/user.h +++ sys/sys/user.h @@ -625,6 +625,7 @@ #define KMAP_FLAG_ASLR_IGNSTART 0x04 /* ASLR may map into sbrk grow region */ #define KMAP_FLAG_WXORX 0x08 /* W^X mapping policy is enforced */ #define KMAP_FLAG_ASLR_STACK 0x10 /* the stack location is randomized */ +#define KMAP_FLAG_ASLR_SHARED_PAGE 0x20 /* the shared page location is randomized */ struct kinfo_vm_layout { uintptr_t kvm_min_user_addr; Index: tests/sys/kern/kern_copyin.c =================================================================== --- tests/sys/kern/kern_copyin.c +++ tests/sys/kern/kern_copyin.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -104,6 +105,8 @@ { char template[] = "copyin.XXXXXX"; uintptr_t maxuser; + size_t size; + void *addr; #if defined(__mips__) /* @@ -119,6 +122,16 @@ #else maxuser = VM_MAXUSER_ADDRESS; #endif + size = sysconf(_SC_PAGESIZE); + + /* + * Since the shared page address can be randomized we need to make + * sure that something is mapped at the top of the user address space. + * Otherwise reading bytes from maxuser-X will fail rendering this test + * useless. + */ + addr = mmap((void *)(maxuser - size), size, PROT_READ, + MAP_ANON | MAP_FIXED | MAP_EXCL, -1, 0); scratch_file = mkstemp(template); ATF_REQUIRE(scratch_file != -1); @@ -141,6 +154,9 @@ ATF_CHECK(copyin_checker(ADDR_SIGNED, 1) == EFAULT); ATF_CHECK(copyin_checker2(ADDR_SIGNED) == EFAULT); #endif + + if (addr != MAP_FAILED) + munmap(addr, PAGE_SIZE); } ATF_TP_ADD_TCS(tp)