Index: sys/security/mac/mac_process.c =================================================================== --- sys/security/mac/mac_process.c +++ sys/security/mac/mac_process.c @@ -252,7 +252,7 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred, struct vm_map *map) { - vm_map_entry_t vme; + vm_map_entry_t prev, vme; int result; vm_prot_t revokeperms; vm_object_t backing_object, object; @@ -264,7 +264,8 @@ return; vm_map_lock(map); - for (vme = map->header.next; vme != &map->header; vme = vme->next) { + for (prev = &map->header; + (vme = prev->next) != &map->header; prev = vme) { if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) { mac_proc_vm_revoke_recurse(td, cred, vme->object.sub_map); @@ -363,7 +364,7 @@ } pmap_protect(map->pmap, vme->start, vme->end, vme->protection & ~revokeperms); - vm_map_simplify_entry(map, vme); + vm_map_try_merge_entries(map, prev, vme); } } vm_map_unlock(map); Index: sys/vm/vm_map.h =================================================================== --- sys/vm/vm_map.h +++ sys/vm/vm_map.h @@ -418,7 +418,8 @@ boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); -void vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry); +void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, + vm_map_entry_t entry); void vm_map_startup (void); int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -1546,7 +1546,7 @@ map->size += end - prev_entry->end; vm_map_entry_resize(map, prev_entry, end - prev_entry->end); - vm_map_simplify_entry(map, prev_entry); + vm_map_try_merge_entries(map, prev_entry, prev_entry->next); return (KERN_SUCCESS); } @@ -1606,7 +1606,8 @@ * with the previous entry when object is NULL. Here, we handle the * other cases, which are less common. */ - vm_map_simplify_entry(map, new_entry); + vm_map_try_merge_entries(map, prev_entry, new_entry); + vm_map_try_merge_entries(map, new_entry, new_entry->next); if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), @@ -2075,34 +2076,24 @@ } /* - * vm_map_simplify_entry: + * vm_map_try_merge_entries: * - * Simplify the given map entry by merging with either neighbor. This - * routine also has the ability to merge with both neighbors. + * Compare the given map entry to its predecessor, and merge it into its + * predecessor if possible. The entry remains valid, and may be extended. + * The predecessor may be deleted. * * The map must be locked. - * - * This routine guarantees that the passed entry remains valid (though - * possibly extended). When merging, this routine may delete one or - * both neighbors. */ void -vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) +vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, vm_map_entry_t entry) { - vm_map_entry_t next, prev; - if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) != 0) - return; - prev = entry->prev; - if (vm_map_mergeable_neighbors(prev, entry)) { + VM_MAP_ASSERT_LOCKED(map); + if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && + vm_map_mergeable_neighbors(prev, entry)) { vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT); vm_map_merged_neighbor_dispose(map, prev); } - next = entry->next; - if (vm_map_mergeable_neighbors(entry, next)) { - vm_map_entry_unlink(map, next, UNLINK_MERGE_PREV); - vm_map_merged_neighbor_dispose(map, next); - } } /* @@ -2176,7 +2167,7 @@ * This routine is called only when it is known that * the entry must be split. */ -static void +static vm_map_entry_t _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) { vm_map_entry_t new_entry; @@ -2218,6 +2209,7 @@ * left the same. */ } + return (new_entry); } /* @@ -2436,6 +2428,29 @@ } /* + * vm_map_lookup_entry_and_prev: [ internal use only ] + * + * Finds the map entry that contains the predecessor to the given address, + * or is the precessor of the entry that starts with the given address. + * Set entry to the map entry that will contain the given address after + * clipping. + */ +static vm_map_entry +vm_map_lookup_entry_and_prev(vm_map_t map, vm_offset_t start, + vm_map_entry_t *entry) +{ + vm_map_entry_t prev; + + if (start == 0) + prev = &map->header; + else + vm_map_lookup_entry(map, start - 1, &prev); + *entry = prev->end > start ? prev : prev->next; + return (prev); +} + + +/* * vm_map_protect: * * Sets the protection of the specified address @@ -2447,7 +2462,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_prot_t new_prot, boolean_t set_max) { - vm_map_entry_t current, entry, in_tran; + vm_map_entry_t current, entry, in_tran, prev; vm_object_t obj; struct ucred *cred; vm_prot_t old_prot; @@ -2469,10 +2484,8 @@ vm_map_wait_busy(map); VM_MAP_RANGE_CHECK(map, start, end); + prev = vm_map_lookup_entry_and_prev(map, start, &entry); - if (!vm_map_lookup_entry(map, start, &entry)) - entry = entry->next; - /* * Make a first pass to check for protection violations. */ @@ -2513,7 +2526,8 @@ * some may now be mergeable. */ rv = KERN_SUCCESS; - vm_map_clip_start(map, entry, start); + if (prev == entry) + prev = _vm_map_clip_start(map, entry, start); for (current = entry; current->start < end; current = current->next) { vm_map_clip_end(map, current, end); @@ -2572,7 +2586,8 @@ * [Note that clipping is not necessary the second time.] */ for (current = entry; current->start < end; - vm_map_simplify_entry(map, current), current = current->next) { + vm_map_try_merge_entries(map, prev, current), + prev = current, current = prev->next) { if (rv != KERN_SUCCESS || (current->eflags & MAP_ENTRY_GUARD) != 0) continue; @@ -2610,6 +2625,7 @@ #undef MASK } } + vm_map_try_merge_entries(map, prev, current); vm_map_unlock(map); return (rv); } @@ -2629,7 +2645,7 @@ vm_offset_t end, int behav) { - vm_map_entry_t current, entry; + vm_map_entry_t current, entry, prev; bool modify_map; /* @@ -2667,14 +2683,8 @@ * Locate starting entry and clip if necessary. */ VM_MAP_RANGE_CHECK(map, start, end); + prev = vm_map_lookup_entry_and_prev(map, start, &entry); - if (vm_map_lookup_entry(map, start, &entry)) { - if (modify_map) - vm_map_clip_start(map, entry, start); - } else { - entry = entry->next; - } - if (modify_map) { /* * madvise behaviors that are implemented in the vm_map_entry. @@ -2682,8 +2692,10 @@ * We clip the vm_map_entry so that behavioral changes are * limited to the specified address range. */ + if (prev == entry) + prev = _vm_map_clip_start(map, entry, start); for (current = entry; current->start < end; - current = current->next) { + prev = current, current = prev->next) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) continue; @@ -2714,8 +2726,9 @@ default: break; } - vm_map_simplify_entry(map, current); + vm_map_try_merge_entries(map, prev, current); } + vm_map_try_merge_entries(map, prev, current); vm_map_unlock(map); } else { vm_pindex_t pstart, pend; @@ -2803,8 +2816,7 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_inherit_t new_inheritance) { - vm_map_entry_t entry; - vm_map_entry_t temp_entry; + vm_map_entry_t entry, prev; switch (new_inheritance) { case VM_INHERIT_NONE: @@ -2819,19 +2831,20 @@ return (KERN_SUCCESS); vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &temp_entry)) { - entry = temp_entry; - vm_map_clip_start(map, entry, start); - } else - entry = temp_entry->next; + prev = vm_map_lookup_entry_and_prev(map, start, &entry); + if (entry == prev) + prev = _vm_map_clip_start(map, entry, start); + while (entry->start < end) { vm_map_clip_end(map, entry, end); if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || new_inheritance != VM_INHERIT_ZERO) entry->inheritance = new_inheritance; - vm_map_simplify_entry(map, entry); + vm_map_try_merge_entries(map, prev, entry); + prev = entry; entry = entry->next; } + vm_map_try_merge_entries(map, prev, entry); vm_map_unlock(map); return (KERN_SUCCESS); } @@ -2894,7 +2907,7 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) { - vm_map_entry_t entry, first_entry; + vm_map_entry_t entry, prev; int rv; bool first_iteration, holes_ok, need_wakeup, user_unwire; @@ -2904,16 +2917,12 @@ user_unwire = (flags & VM_MAP_WIRE_USER) != 0; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); - if (!vm_map_lookup_entry(map, start, &first_entry)) { - if (holes_ok) - first_entry = first_entry->next; - else { - vm_map_unlock(map); - return (KERN_INVALID_ADDRESS); - } + prev = vm_map_lookup_entry_and_prev(map, start, &entry); + if (prev != entry && entry->start > start && !holes_ok) { + vm_map_unlock(map); + return (KERN_INVALID_ADDRESS); } first_iteration = true; - entry = first_entry; rv = KERN_SUCCESS; while (entry->start < end) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { @@ -2930,10 +2939,14 @@ rv = KERN_INVALID_ADDRESS; break; } - first_entry = first_iteration ? entry : NULL; + prev = NULL; continue; } first_iteration = false; + if (prev == entry) + prev = _vm_map_clip_start(map, entry, start); + else + vm_map_clip_start(map, entry, start); vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); /* @@ -2942,7 +2955,7 @@ */ KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && entry->wiring_thread == NULL, - ("owned map entry %p", entry)); + ("%s: owned map entry %p", __func__, entry)); entry->eflags |= MAP_ENTRY_IN_TRANSITION; entry->wiring_thread = curthread; /* @@ -2967,12 +2980,13 @@ entry = entry->next; } need_wakeup = false; - if (first_entry == NULL && - !vm_map_lookup_entry(map, start, &first_entry)) { - KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); - first_entry = first_entry->next; + if (prev == NULL) { + prev = vm_map_lookup_entry_and_prev(map, start, &entry); + KASSERT(entry->start == start || holes_ok, + ("%s: lookup failed", __func__)); } - for (entry = first_entry; entry->start < end; entry = entry->next) { + for (entry = prev->next; entry->start < end; + prev = entry, entry = entry->next) { /* * If holes_ok was specified, an empty * space in the unwired region could have been mapped @@ -2985,7 +2999,7 @@ if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || entry->wiring_thread != curthread) { KASSERT(holes_ok, - ("vm_map_unwire: !HOLESOK and new/changed entry")); + ("%s: !HOLESOK and new/changed entry", __func__)); continue; } @@ -2999,17 +3013,18 @@ entry->eflags &= ~MAP_ENTRY_USER_WIRED; } KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, - ("vm_map_unwire: in-transition flag missing %p", entry)); + ("%s: in-transition flag missing %p", __func__, entry)); KASSERT(entry->wiring_thread == curthread, - ("vm_map_unwire: alien wire %p", entry)); + ("%s: alien wire %p", __func__, entry)); entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; entry->wiring_thread = NULL; if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; need_wakeup = true; } - vm_map_simplify_entry(map, entry); + vm_map_try_merge_entries(map, prev, entry); } + vm_map_try_merge_entries(map, prev, entry); vm_map_unlock(map); if (need_wakeup) vm_map_wakeup(map); @@ -3095,7 +3110,7 @@ int vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) { - vm_map_entry_t entry, first_entry, tmp_entry; + vm_map_entry_t entry, prev, tmp_entry; vm_offset_t faddr, saved_end, saved_start; u_long npages; u_int last_timestamp; @@ -3113,14 +3128,10 @@ holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; user_wire = (flags & VM_MAP_WIRE_USER) != 0; VM_MAP_RANGE_CHECK(map, start, end); - if (!vm_map_lookup_entry(map, start, &first_entry)) { - if (holes_ok) - first_entry = first_entry->next; - else - return (KERN_INVALID_ADDRESS); - } + prev = vm_map_lookup_entry_and_prev(map, start, &entry); + if (prev != entry && entry->start > start && !holes_ok) + return (KERN_INVALID_ADDRESS); first_iteration = true; - entry = first_entry; while (entry->start < end) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { /* @@ -3134,11 +3145,14 @@ rv = KERN_INVALID_ADDRESS; goto done; } - first_entry = first_iteration ? entry : NULL; + prev = NULL; continue; } first_iteration = false; - vm_map_clip_start(map, entry, start); + if (prev == entry) + prev = _vm_map_clip_start(map, entry, start); + else + vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); /* * Mark the entry in case the map lock is released. (See @@ -3146,7 +3160,7 @@ */ KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && entry->wiring_thread == NULL, - ("owned map entry %p", entry)); + ("%s: owned map entry %p", __func__, entry)); entry->eflags |= MAP_ENTRY_IN_TRANSITION; entry->wiring_thread = curthread; if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 @@ -3201,11 +3215,8 @@ if (!vm_map_lookup_entry(map, saved_start, &tmp_entry)) KASSERT(false, - ("vm_map_wire: lookup failed")); - if (entry == first_entry) - first_entry = tmp_entry; - else - first_entry = NULL; + ("%s: lookup failed", __func__)); + prev = NULL; entry = tmp_entry; while (entry->end < saved_end) { /* @@ -3247,12 +3258,13 @@ rv = KERN_SUCCESS; done: need_wakeup = false; - if (first_entry == NULL && - !vm_map_lookup_entry(map, start, &first_entry)) { - KASSERT(holes_ok, ("vm_map_wire: lookup failed")); - first_entry = first_entry->next; + if (prev == NULL) { + prev = vm_map_lookup_entry_and_prev(map, start, &entry); + KASSERT(entry->start == start || holes_ok, + ("%s: lookup failed", __func__)); } - for (entry = first_entry; entry->start < end; entry = entry->next) { + for (entry = prev->next; entry->start < end; + prev = entry, entry = entry->next) { /* * If holes_ok was specified, an empty * space in the unwired region could have been mapped @@ -3265,7 +3277,7 @@ if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || entry->wiring_thread != curthread) { KASSERT(holes_ok, - ("vm_map_wire: !HOLESOK and new/changed entry")); + ("%s: !HOLESOK and new/changed entry", __func__)); continue; } @@ -3295,9 +3307,9 @@ entry->wired_count--; } KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, - ("vm_map_wire: in-transition flag missing %p", entry)); + ("%s: in-transition flag missing %p", __func__, entry)); KASSERT(entry->wiring_thread == curthread, - ("vm_map_wire: alien wire %p", entry)); + ("%s: alien wire %p", __func__, entry)); entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WIRE_SKIPPED); entry->wiring_thread = NULL; @@ -3305,8 +3317,9 @@ entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; need_wakeup = true; } - vm_map_simplify_entry(map, entry); + vm_map_try_merge_entries(map, prev, entry); } + vm_map_try_merge_entries(map, prev, entry); if (need_wakeup) vm_map_wakeup(map); return (rv); @@ -3944,8 +3957,8 @@ old_entry->object.vm_object); /* - * As in vm_map_simplify_entry(), the - * vnode lock will not be acquired in + * As in vm_map_merged_neighbor_dispose(), + * the vnode lock will not be acquired in * this call to vm_object_deallocate(). */ vm_object_deallocate(object);