diff --git a/sys/compat/freebsd32/freebsd32_misc.c b/sys/compat/freebsd32/freebsd32_misc.c --- a/sys/compat/freebsd32/freebsd32_misc.c +++ b/sys/compat/freebsd32/freebsd32_misc.c @@ -512,7 +512,7 @@ prot |= PROT_EXEC; #endif return (kern_mprotect(td, (uintptr_t)PTRIN(uap->addr), uap->len, - prot)); + prot, 0)); } int diff --git a/sys/compat/linux/linux_mmap.c b/sys/compat/linux/linux_mmap.c --- a/sys/compat/linux/linux_mmap.c +++ b/sys/compat/linux/linux_mmap.c @@ -229,16 +229,22 @@ int linux_mprotect_common(struct thread *td, uintptr_t addr, size_t len, int prot) { + int flags = 0; - /* XXX Ignore PROT_GROWSDOWN and PROT_GROWSUP for now. */ - prot &= ~(LINUX_PROT_GROWSDOWN | LINUX_PROT_GROWSUP); - if ((prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0) + /* XXX Ignore PROT_GROWSUP for now. */ + prot &= ~LINUX_PROT_GROWSUP; + if ((prot & ~(LINUX_PROT_GROWSDOWN | PROT_READ | PROT_WRITE | + PROT_EXEC)) != 0) return (EINVAL); + if ((prot & LINUX_PROT_GROWSDOWN) != 0) { + prot &= ~LINUX_PROT_GROWSDOWN; + flags |= VM_MAP_PROTECT_GROWSDOWN; + } #if defined(__amd64__) linux_fixup_prot(td, &prot); #endif - return (kern_mprotect(td, addr, len, prot)); + return (kern_mprotect(td, addr, len, prot, flags)); } /* diff --git a/sys/sys/syscallsubr.h b/sys/sys/syscallsubr.h --- a/sys/sys/syscallsubr.h +++ b/sys/sys/syscallsubr.h @@ -216,7 +216,8 @@ int kern_mmap_racct_check(struct thread *td, struct vm_map *map, vm_size_t size); int kern_mmap_maxprot(struct proc *p, int prot); -int kern_mprotect(struct thread *td, uintptr_t addr, size_t size, int prot); +int kern_mprotect(struct thread *td, uintptr_t addr, size_t size, + int prot, int flags); int kern_msgctl(struct thread *, int, int, struct msqid_ds *); int kern_msgrcv(struct thread *, int, void *, size_t, long, int, long *); int kern_msgsnd(struct thread *, int, const void *, size_t, int, long); diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -521,11 +521,12 @@ #define VM_MAP_PROTECT_SET_PROT 0x0001 #define VM_MAP_PROTECT_SET_MAXPROT 0x0002 +#define VM_MAP_PROTECT_GROWSDOWN 0x0004 int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_prot_t new_prot, vm_prot_t new_maxprot, int flags); int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); -void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, +vm_map_entry_t vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, vm_map_entry_t entry); void vm_map_startup (void); int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -148,6 +148,8 @@ static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, vm_offset_t failed_addr); +#define CONTAINS_BITS(set, bits) ((~(set) & (bits)) == 0) + #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) @@ -1601,20 +1603,16 @@ } /* - * vm_map_insert: - * - * Inserts the given whole VM object into the target - * map at the specified address range. The object's - * size should match that of the address range. - * - * Requires that the map be locked, and leaves it so. - * - * If object is non-NULL, ref count must be bumped by caller - * prior to making call to account for the new entry. + * vm_map_insert1() is identical to vm_map_insert(), and returns the + * newly inserted map entry in '*res'. in case the new entry is + * coalesced with a neightborow or existing entry was resized, that + * entry is returned. In any case, the returned entry covers the + * specified address range. */ -int -vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, - vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) +static int +vm_map_insert1(vm_map_t map, vm_object_t object, vm_ooffset_t offset, + vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow, + vm_map_entry_t *res) { vm_map_entry_t new_entry, next_entry, prev_entry; struct ucred *cred; @@ -1761,7 +1759,8 @@ map->size += end - prev_entry->end; vm_map_entry_resize(map, prev_entry, end - prev_entry->end); - vm_map_try_merge_entries(map, prev_entry, next_entry); + *res = vm_map_try_merge_entries(map, prev_entry, + next_entry); return (KERN_SUCCESS); } @@ -1822,7 +1821,7 @@ * other cases, which are less common. */ vm_map_try_merge_entries(map, prev_entry, new_entry); - vm_map_try_merge_entries(map, new_entry, next_entry); + *res = vm_map_try_merge_entries(map, new_entry, next_entry); if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), @@ -1832,6 +1831,28 @@ return (KERN_SUCCESS); } +/* + * vm_map_insert: + * + * Inserts the given whole VM object into the target + * map at the specified address range. The object's + * size should match that of the address range. + * + * Requires that the map be locked, and leaves it so. + * + * If object is non-NULL, ref count must be bumped by caller + * prior to making call to account for the new entry. + */ +int +vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, + vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) +{ + vm_map_entry_t res; + + return (vm_map_insert1(map, object, offset, start, end, prot, max, + cow, &res)); +} + /* * vm_map_findspace: * @@ -2273,7 +2294,8 @@ * another entry. */ #define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \ - MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC) + MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC | \ + MAP_ENTRY_STACK_GAP_UP | MAP_ENTRY_STACK_GAP_DN) static bool vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) @@ -2325,7 +2347,7 @@ * * The map must be locked. */ -void +vm_map_entry_t vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry, vm_map_entry_t entry) { @@ -2335,7 +2357,9 @@ vm_map_mergeable_neighbors(prev_entry, entry)) { vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT); vm_map_merged_neighbor_dispose(map, prev_entry); + return (entry); } + return (prev_entry); } /* @@ -2701,6 +2725,28 @@ VM_OBJECT_RUNLOCK(object); } +static void +vm_map_protect_guard_phase3(vm_map_entry_t entry, vm_prot_t new_prot, + vm_prot_t new_maxprot, int flags) +{ + vm_prot_t old_prot; + + MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0); + if ((entry->eflags & (MAP_ENTRY_STACK_GAP_UP | + MAP_ENTRY_STACK_GAP_DN)) == 0) + return; + + old_prot = PROT_EXTRACT(entry->offset); + if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) { + entry->offset = PROT_MAX(new_maxprot) | + (new_maxprot & old_prot); + } + if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) { + entry->offset = new_prot | PROT_MAX( + PROT_MAX_EXTRACT(entry->offset)); + } +} + /* * vm_map_protect: * @@ -2720,9 +2766,9 @@ if (start == end) return (KERN_SUCCESS); - if ((flags & (VM_MAP_PROTECT_SET_PROT | VM_MAP_PROTECT_SET_MAXPROT)) == - (VM_MAP_PROTECT_SET_PROT | VM_MAP_PROTECT_SET_MAXPROT) && - (new_prot & new_maxprot) != new_prot) + if (CONTAINS_BITS(flags, VM_MAP_PROTECT_SET_PROT | + VM_MAP_PROTECT_SET_MAXPROT) && + !CONTAINS_BITS(new_maxprot, new_prot)) return (KERN_OUT_OF_BOUNDS); again: @@ -2731,8 +2777,7 @@ if ((map->flags & MAP_WXORX) != 0 && (flags & VM_MAP_PROTECT_SET_PROT) != 0 && - (new_prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | - VM_PROT_EXECUTE)) { + CONTAINS_BITS(new_prot, VM_PROT_WRITE | VM_PROT_EXECUTE)) { vm_map_unlock(map); return (KERN_PROTECTION_FAILURE); } @@ -2750,23 +2795,36 @@ if (!vm_map_lookup_entry(map, start, &first_entry)) first_entry = vm_map_entry_succ(first_entry); + if ((flags & VM_MAP_PROTECT_GROWSDOWN) != 0 && + (first_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) { + while (!CONTAINS_BITS(first_entry->eflags, + MAP_ENTRY_GUARD | MAP_ENTRY_STACK_GAP_DN) && + first_entry != vm_map_entry_first(map)) + first_entry = vm_map_entry_pred(first_entry); + start = first_entry->start; + } + /* * Make a first pass to check for protection violations. */ + old_prot = 0; + if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) + old_prot |= new_prot; + if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) + old_prot |= new_maxprot; for (entry = first_entry; entry->start < end; entry = vm_map_entry_succ(entry)) { - if ((entry->eflags & MAP_ENTRY_GUARD) != 0) - continue; if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { vm_map_unlock(map); return (KERN_INVALID_ARGUMENT); } - if ((flags & VM_MAP_PROTECT_SET_PROT) == 0) - new_prot = entry->protection; - if ((flags & VM_MAP_PROTECT_SET_MAXPROT) == 0) - new_maxprot = entry->max_protection; - if ((new_prot & entry->max_protection) != new_prot || - (new_maxprot & entry->max_protection) != new_maxprot) { + if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { + if (!CONTAINS_BITS(PROT_MAX_EXTRACT(entry->offset), + old_prot)) + return (KERN_PROTECTION_FAILURE); + continue; + } + if (!CONTAINS_BITS(entry->max_protection, old_prot)) { vm_map_unlock(map); return (KERN_PROTECTION_FAILURE); } @@ -2864,10 +2922,15 @@ entry->start < end; vm_map_try_merge_entries(map, prev_entry, entry), prev_entry = entry, entry = vm_map_entry_succ(entry)) { - if (rv != KERN_SUCCESS || - (entry->eflags & MAP_ENTRY_GUARD) != 0) + if (rv != KERN_SUCCESS) continue; + if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { + vm_map_protect_guard_phase3(entry, new_prot, + new_maxprot, flags); + continue; + } + old_prot = entry->protection; if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) { @@ -4553,10 +4616,10 @@ gap_bot = top; gap_top = addrbos + max_ssize; } - rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); + rv = vm_map_insert1(map, NULL, 0, bot, top, prot, max, cow, + &new_entry); if (rv != KERN_SUCCESS) return (rv); - new_entry = vm_map_entry_succ(prev_entry); KASSERT(new_entry->end == top || new_entry->start == bot, ("Bad entry start/end for new stack entry")); KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || @@ -4567,10 +4630,17 @@ ("new entry lacks MAP_ENTRY_GROWS_UP")); if (gap_bot == gap_top) return (KERN_SUCCESS); - rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, + rv = vm_map_insert1(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? - MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP)); + MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP), &gap_entry); if (rv == KERN_SUCCESS) { + KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0, + ("entry %p not gap %#x", gap_entry, gap_entry->eflags)); + KASSERT((gap_entry->eflags & (MAP_ENTRY_STACK_GAP_DN | + MAP_ENTRY_STACK_GAP_UP)) != 0, + ("entry %p not stack gap %#x", gap_entry, + gap_entry->eflags)); + /* * Gap can never successfully handle a fault, so * read-ahead logic is never used for it. Re-use @@ -4580,10 +4650,8 @@ * store the original stack protections in the * object offset. */ - gap_entry = orient == MAP_STACK_GROWS_DOWN ? - vm_map_entry_pred(new_entry) : vm_map_entry_succ(new_entry); gap_entry->next_read = sgp; - gap_entry->offset = prot; + gap_entry->offset = prot | PROT_MAX(max); } else { (void)vm_map_delete(map, bot, top); } @@ -4602,8 +4670,8 @@ struct vmspace *vm; struct ucred *cred; vm_offset_t gap_end, gap_start, grow_start; - vm_size_t grow_amount, guard, max_grow; - vm_prot_t prot; + vm_size_t grow_amount, guard, max_grow, sgp; + vm_prot_t prot, max; rlim_t lmemlim, stacklim, vmemlim; int rv, rv1 __diagused; bool gap_deleted, grow_down, is_procstack; @@ -4748,7 +4816,9 @@ * The gap_entry "offset" field is overloaded. See * vm_map_stack_locked(). */ - prot = gap_entry->offset; + prot = PROT_EXTRACT(gap_entry->offset); + max = PROT_MAX_EXTRACT(gap_entry->offset); + sgp = gap_entry->next_read; grow_start = gap_entry->end - grow_amount; if (gap_entry->start + grow_amount == gap_entry->end) { @@ -4762,13 +4832,16 @@ gap_deleted = false; } rv = vm_map_insert(map, NULL, 0, grow_start, - grow_start + grow_amount, prot, prot, MAP_STACK_GROWS_DOWN); + grow_start + grow_amount, prot, max, MAP_STACK_GROWS_DOWN); if (rv != KERN_SUCCESS) { if (gap_deleted) { - rv1 = vm_map_insert(map, NULL, 0, gap_start, + rv1 = vm_map_insert1(map, NULL, 0, gap_start, gap_end, VM_PROT_NONE, VM_PROT_NONE, - MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN); + MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN, + &gap_entry); MPASS(rv1 == KERN_SUCCESS); + gap_entry->next_read = sgp; + gap_entry->offset = prot | PROT_MAX(max); } else vm_map_entry_resize(map, gap_entry, grow_amount); diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -658,16 +658,17 @@ sys_mprotect(struct thread *td, struct mprotect_args *uap) { - return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot)); + return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, + uap->prot, 0)); } int -kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot) +kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot, + int flags) { vm_offset_t addr; vm_size_t pageoff; int vm_error, max_prot; - int flags; addr = addr0; if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0) @@ -687,7 +688,7 @@ if (addr + size < addr) return (EINVAL); - flags = VM_MAP_PROTECT_SET_PROT; + flags |= VM_MAP_PROTECT_SET_PROT; if (max_prot != 0) flags |= VM_MAP_PROTECT_SET_MAXPROT; vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,