diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -525,7 +525,7 @@ int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_prot_t new_prot, vm_prot_t new_maxprot, int flags); int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); -void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, +vm_map_entry_t vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, vm_map_entry_t entry); void vm_map_startup (void); int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1601,20 +1601,16 @@ } /* - * vm_map_insert: - * - * Inserts the given whole VM object into the target - * map at the specified address range. The object's - * size should match that of the address range. - * - * Requires that the map be locked, and leaves it so. - * - * If object is non-NULL, ref count must be bumped by caller - * prior to making call to account for the new entry. + * vm_map_insert1() is identical to vm_map_insert(), and returns the + * newly inserted map entry in '*res'. in case the new entry is + * coalesced with a neightborow or existing entry was resized, that + * entry is returned. In any case, the returned entry covers the + * specified address range. */ -int -vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, - vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) +static int +vm_map_insert1(vm_map_t map, vm_object_t object, vm_ooffset_t offset, + vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow, + vm_map_entry_t *res) { vm_map_entry_t new_entry, next_entry, prev_entry; struct ucred *cred; @@ -1761,7 +1757,8 @@ map->size += end - prev_entry->end; vm_map_entry_resize(map, prev_entry, end - prev_entry->end); - vm_map_try_merge_entries(map, prev_entry, next_entry); + *res = vm_map_try_merge_entries(map, prev_entry, + next_entry); return (KERN_SUCCESS); } @@ -1822,7 +1819,7 @@ * other cases, which are less common. */ vm_map_try_merge_entries(map, prev_entry, new_entry); - vm_map_try_merge_entries(map, new_entry, next_entry); + *res = vm_map_try_merge_entries(map, new_entry, next_entry); if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), @@ -1832,6 +1829,28 @@ return (KERN_SUCCESS); } +/* + * vm_map_insert: + * + * Inserts the given whole VM object into the target + * map at the specified address range. The object's + * size should match that of the address range. + * + * Requires that the map be locked, and leaves it so. + * + * If object is non-NULL, ref count must be bumped by caller + * prior to making call to account for the new entry. + */ +int +vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, + vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) +{ + vm_map_entry_t res; + + return (vm_map_insert1(map, object, offset, start, end, prot, max, + cow, &res)); +} + /* * vm_map_findspace: * @@ -2325,7 +2344,7 @@ * * The map must be locked. */ -void +vm_map_entry_t vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry, vm_map_entry_t entry) { @@ -2335,7 +2354,9 @@ vm_map_mergeable_neighbors(prev_entry, entry)) { vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT); vm_map_merged_neighbor_dispose(map, prev_entry); + return (entry); } + return (prev_entry); } /* @@ -4566,9 +4587,9 @@ ("new entry lacks MAP_ENTRY_GROWS_UP")); if (gap_bot == gap_top) return (KERN_SUCCESS); - rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, + rv = vm_map_insert1(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? - MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP)); + MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP), &gap_entry); if (rv == KERN_SUCCESS) { /* * Gap can never successfully handle a fault, so @@ -4579,8 +4600,6 @@ * store the original stack protections into the * object offset. */ - gap_entry = orient == MAP_STACK_GROWS_DOWN ? - vm_map_entry_pred(new_entry) : vm_map_entry_succ(new_entry); gap_entry->next_read = sgp; gap_entry->offset = prot; } else {