Index: sys/security/mac/mac_process.c =================================================================== --- sys/security/mac/mac_process.c +++ sys/security/mac/mac_process.c @@ -252,7 +252,7 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred, struct vm_map *map) { - vm_map_entry_t vme; + vm_map_entry_t prev, vme; int result; vm_prot_t revokeperms; vm_object_t backing_object, object; @@ -264,7 +264,8 @@ return; vm_map_lock(map); - for (vme = map->header.next; vme != &map->header; vme = vme->next) { + for (prev = &map->header; (vme = prev->next) != &map->header; + prev = vme) { if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) { mac_proc_vm_revoke_recurse(td, cred, vme->object.sub_map); @@ -363,7 +364,7 @@ } pmap_protect(map->pmap, vme->start, vme->end, vme->protection & ~revokeperms); - vm_map_try_merge_entries(map, vme->prev, vme); + vm_map_try_merge_entries(map, prev, vme); } } vm_map_unlock(map); Index: sys/vm/vm_map.h =================================================================== --- sys/vm/vm_map.h +++ sys/vm/vm_map.h @@ -99,7 +99,6 @@ * Also included is control information for virtual copy operations. */ struct vm_map_entry { - struct vm_map_entry *prev; /* previous entry */ struct vm_map_entry *next; /* next entry */ struct vm_map_entry *left; /* left child in binary search tree */ struct vm_map_entry *right; /* right child in binary search tree */ @@ -415,7 +414,7 @@ int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, vm_pindex_t *, vm_prot_t *, boolean_t *); void vm_map_lookup_done (vm_map_t, vm_map_entry_t); -boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); +bool vm_map_lookup_entry(vm_map_t, vm_offset_t, vm_map_entry_t *); int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -137,7 +137,7 @@ static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); static int vm_map_growstack(vm_map_t map, vm_offset_t addr, - vm_map_entry_t gap_entry); + vm_map_entry_t gap_entry, vm_map_entry_t stack_entry); static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); #ifdef INVARIANTS @@ -901,7 +901,7 @@ _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) { - map->header.next = map->header.prev = &map->header; + map->header.next = &map->header; map->header.eflags = MAP_ENTRY_HEADER; map->needs_wakeup = FALSE; map->system_map = 0; @@ -991,6 +991,17 @@ root->right->max_free : right_ancestor->start - root->end); } +/* + * vm_map_splay_split, vm_map_splay_merge: + * + * The Sleator and Tarjan top-down splay algorithm with the following + * variation. Max_free must be computed bottom-up, so on the downward + * pass (vm_map_splay_split), maintain the left and right spines in + * reverse order, and ensure that the max_free values for those nodes + * store the values of their descendents not on the search path. Later, + * make a second pass up each side (vm_map_splay_merge) to fix the + * pointers and compute max_free. The time bound is O(log n) amortized. + */ #define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ vm_size_t max_free; \ \ @@ -1174,56 +1185,6 @@ } /* - * vm_map_splay: - * - * The Sleator and Tarjan top-down splay algorithm with the - * following variation. Max_free must be computed bottom-up, so - * on the downward pass, maintain the left and right spines in - * reverse order. Then, make a second pass up each side to fix - * the pointers and compute max_free. The time bound is O(log n) - * amortized. - * - * The new root is the vm_map_entry containing "addr", or else an - * adjacent entry (lower if possible) if addr is not in the tree. - * - * The map must be locked, and leaves it so. - * - * Returns: the new root. - */ -static vm_map_entry_t -vm_map_splay(vm_map_t map, vm_offset_t addr) -{ - vm_map_entry_t llist, rlist, root; - - root = vm_map_splay_split(map, addr, 0, &llist, &rlist); - if (root != NULL) { - /* do nothing */ - } else if (llist != &map->header) { - /* - * Recover the greatest node in the left - * subtree and make it the root. - */ - root = llist; - llist = root->right; - root->right = NULL; - } else if (rlist != &map->header) { - /* - * Recover the least node in the right - * subtree and make it the root. - */ - root = rlist; - rlist = root->left; - root->left = NULL; - } else { - /* There is no root. */ - return (NULL); - } - vm_map_splay_merge(map, root, llist, rlist); - VM_MAP_ASSERT_CONSISTENT(map); - return (root); -} - -/* * vm_map_entry_{un,}link: * * Insert/remove entries from maps. @@ -1241,9 +1202,8 @@ root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); KASSERT(root == NULL, ("vm_map_entry_link: link object already mapped")); - entry->prev = llist; + llist->next = entry; entry->next = rlist; - llist->next = rlist->prev = entry; entry->left = entry->right = NULL; vm_map_splay_merge(map, entry, llist, rlist); VM_MAP_ASSERT_CONSISTENT(map); @@ -1258,25 +1218,25 @@ vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, enum unlink_merge_type op) { - vm_map_entry_t llist, rlist, root, y; + vm_map_entry_t llist, rlist, root; VM_MAP_ASSERT_LOCKED(map); root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); KASSERT(root != NULL, ("vm_map_entry_unlink: unlink object not mapped")); + vm_map_splay_findprev(root, &llist); vm_map_splay_findnext(root, &rlist); + llist->next = rlist; switch (op) { case UNLINK_MERGE_NEXT: rlist->start = root->start; rlist->offset = root->offset; - y = root->left; root = rlist; rlist = root->left; - root->left = y; + root->left = NULL; break; case UNLINK_MERGE_NONE: - vm_map_splay_findprev(root, &llist); if (llist != &map->header) { root = llist; llist = root->right; @@ -1289,9 +1249,6 @@ root = NULL; break; } - y = entry->next; - y->prev = entry->prev; - y->prev->next = y; if (root != NULL) vm_map_splay_merge(map, root, llist, rlist); else @@ -1329,37 +1286,36 @@ } /* - * vm_map_lookup_entry: [ internal use only ] + * vm_map_lookup_helper: [ internal use only ] * - * Finds the map entry containing (or - * immediately preceding) the specified address - * in the given map; the entry is returned - * in the "entry" parameter. The boolean - * result indicates whether the address is - * actually contained in the map. + * Finds the map entry containing, or a map entry adjacent to, the + * specified address in the given map. The boolean result indicates + * whether a map entry contains the address. If no map entry contains the + * address, the parameter lesseq decides whether to provide the entry + * before the address, or the one after it. + * + * The entry is returned in the "entry" parameter. If the address is + * contained in the map, and the parameter nbr is not NULL, then an + * adjacent map entry is saved in nbr. If ((*entry)->eflags & + * MAP_ENTRY_STACK_GAP_DN), then nbr stores the next entry, and otherwise + * stores the previous one. */ -boolean_t -vm_map_lookup_entry( - vm_map_t map, - vm_offset_t address, - vm_map_entry_t *entry) /* OUT */ +static bool +vm_map_lookup_helper(vm_map_t map, vm_offset_t addr, bool lesseq, + vm_map_entry_t *entry, vm_map_entry_t *nbr) { - vm_map_entry_t cur, lbound; - boolean_t locked; + vm_map_entry_t llist, rlist, root; + bool locked, found; /* * If the map is empty, then the map entry immediately preceding - * "address" is the map's header. + * "addr" is the map's header. */ - cur = map->root; - if (cur == NULL) { + root = map->root; + if (root == NULL) { *entry = &map->header; - return (FALSE); + return (false); } - if (address >= cur->start && cur->end > address) { - *entry = cur; - return (TRUE); - } if ((locked = vm_map_locked(map)) || sx_try_upgrade(&map->lock)) { /* @@ -1368,42 +1324,100 @@ * change the map. Thus, the map's timestamp need not change * on a temporary upgrade. */ - cur = vm_map_splay(map, address); + root = vm_map_splay_split(map, addr, 0, &llist, &rlist); + found = root != NULL; + if (root != NULL) { + *entry = root; + if (nbr == NULL) + ; /* Ignore. */ + else if ((root->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { + vm_map_splay_findnext(root, &rlist); + root->right = NULL; + *nbr = rlist; + } else { + vm_map_splay_findprev(root, &llist); + root->left = NULL; + *nbr = llist; + } + } else if (llist != &map->header) { + /* + * Recover the greatest node in the left + * subtree and make it the root. + */ + *entry = lesseq ? llist : rlist; + root = llist; + llist = root->right; + root->right = NULL; + } else { + /* + * Recover the least node in the right + * subtree and make it the root. + */ + *entry = lesseq ? llist : rlist; + root = rlist; + rlist = root->left; + root->left = NULL; + } + vm_map_splay_merge(map, root, llist, rlist); + VM_MAP_ASSERT_CONSISTENT(map); if (!locked) sx_downgrade(&map->lock); - - /* - * If "address" is contained within a map entry, the new root - * is that map entry. Otherwise, the new root is a map entry - * immediately before or after "address". - */ - if (address < cur->start) { - *entry = &map->header; - return (FALSE); - } - *entry = cur; - return (address < cur->end); + return (found); } /* * Since the map is only locked for read access, perform a - * standard binary search tree lookup for "address". + * standard binary search tree lookup for "addr". */ - lbound = &map->header; + llist = rlist = &map->header; do { - if (address < cur->start) { - cur = cur->left; - } else if (cur->end <= address) { - lbound = cur; - cur = cur->right; + if (addr < root->start) { + rlist = root; + root = root->left; + } else if (root->end <= addr) { + llist = root; + root = root->right; } else { - *entry = cur; - return (TRUE); + *entry = root; + if (nbr == NULL) + ; /* Ignore. */ + else if ((root->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { + /* Make nbr the successor to root. */ + if (root->right != NULL) { + rlist = root->right; + while (rlist->left != NULL) + rlist = rlist->left; + } + *nbr = rlist; + } else { + /* Make nbr the predecessor to root. */ + if (root->left != NULL) { + llist = root->left; + while (llist->right != NULL) + llist = llist->right; + } + *nbr = llist; + } + return (true); } - } while (cur != NULL); - *entry = lbound; - return (FALSE); + } while (root != NULL); + *entry = lesseq ? llist : rlist; + return (false); } +bool +vm_map_lookup_entry(vm_map_t map, vm_offset_t addr, + vm_map_entry_t *entry) /* OUT */ +{ + return (vm_map_lookup_helper(map, addr, true, entry, NULL)); +} + +static bool +vm_map_lookup_entry_ge(vm_map_t map, vm_offset_t addr, + vm_map_entry_t *entry) /* OUT */ +{ + return (vm_map_lookup_helper(map, addr, false, entry, NULL)); +} + /* * vm_map_insert: * @@ -2076,9 +2090,9 @@ /* * vm_map_try_merge_entries: * - * Compare the given map entry to its predecessor, and merge its precessor - * into it if possible. The entry remains valid, and may be extended. - * The predecessor may be deleted. + * Compare the given map entry to its predecessor, and merge its + * predecessor into it if possible. The entry remains valid, and may be + * extended. The predecessor may be deleted. * * The map must be locked. */ @@ -2165,7 +2179,7 @@ * This routine is called only when it is known that * the entry must be split. */ -static void +static vm_map_entry_t _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) { vm_map_entry_t new_entry; @@ -2207,6 +2221,7 @@ * left the same. */ } + return (new_entry); } /* @@ -2300,10 +2315,8 @@ VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &entry)) { + if (vm_map_lookup_entry_ge(map, start, &entry)) vm_map_clip_start(map, entry, start); - } else - entry = entry->next; vm_map_clip_end(map, entry, end); @@ -2425,6 +2438,29 @@ } /* + * vm_map_lookup_entry_and_prev: [ internal use only ] + * + * Finds the map entry that contains the predecessor to the given address, + * or is the predecessor of the entry that starts with the given address. + * Set entry to the map entry that will contain the given address after + * clipping. + */ +static vm_map_entry_t +vm_map_lookup_entry_and_prev(vm_map_t map, vm_offset_t start, + vm_map_entry_t *entry) +{ + vm_map_entry_t prev; + + if (start == 0) + prev = &map->header; + else + vm_map_lookup_entry(map, start - 1, &prev); + *entry = prev->end > start ? prev : prev->next; + return (prev); +} + + +/* * vm_map_protect: * * Sets the protection of the specified address @@ -2436,7 +2472,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_prot_t new_prot, boolean_t set_max) { - vm_map_entry_t current, entry, in_tran; + vm_map_entry_t current, entry, in_tran, prev; vm_object_t obj; struct ucred *cred; vm_prot_t old_prot; @@ -2458,10 +2494,8 @@ vm_map_wait_busy(map); VM_MAP_RANGE_CHECK(map, start, end); + prev = vm_map_lookup_entry_and_prev(map, start, &entry); - if (!vm_map_lookup_entry(map, start, &entry)) - entry = entry->next; - /* * Make a first pass to check for protection violations. */ @@ -2502,7 +2536,8 @@ * some may now be mergeable. */ rv = KERN_SUCCESS; - vm_map_clip_start(map, entry, start); + if (prev == entry) + prev = _vm_map_clip_start(map, entry, start); for (current = entry; current->start < end; current = current->next) { vm_map_clip_end(map, current, end); @@ -2561,8 +2596,8 @@ * [Note that clipping is not necessary the second time.] */ for (current = entry; current->start < end; - vm_map_try_merge_entries(map, current->prev, current), - current = current->next) { + vm_map_try_merge_entries(map, prev, current), + prev = current, current = prev->next) { if (rv != KERN_SUCCESS || (current->eflags & MAP_ENTRY_GUARD) != 0) continue; @@ -2600,7 +2635,7 @@ #undef MASK } } - vm_map_try_merge_entries(map, current->prev, current); + vm_map_try_merge_entries(map, prev, current); vm_map_unlock(map); return (rv); } @@ -2620,7 +2655,7 @@ vm_offset_t end, int behav) { - vm_map_entry_t current, entry; + vm_map_entry_t current, entry, prev; bool modify_map; /* @@ -2658,14 +2693,8 @@ * Locate starting entry and clip if necessary. */ VM_MAP_RANGE_CHECK(map, start, end); + prev = vm_map_lookup_entry_and_prev(map, start, &entry); - if (vm_map_lookup_entry(map, start, &entry)) { - if (modify_map) - vm_map_clip_start(map, entry, start); - } else { - entry = entry->next; - } - if (modify_map) { /* * madvise behaviors that are implemented in the vm_map_entry. @@ -2673,8 +2702,10 @@ * We clip the vm_map_entry so that behavioral changes are * limited to the specified address range. */ + if (prev == entry) + prev = _vm_map_clip_start(map, entry, start); for (current = entry; current->start < end; - current = current->next) { + prev = current, current = current->next) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) continue; @@ -2705,9 +2736,9 @@ default: break; } - vm_map_try_merge_entries(map, current->prev, current); + vm_map_try_merge_entries(map, prev, current); } - vm_map_try_merge_entries(map, current->prev, current); + vm_map_try_merge_entries(map, prev, current); vm_map_unlock(map); } else { vm_pindex_t pstart, pend; @@ -2807,8 +2838,7 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_inherit_t new_inheritance) { - vm_map_entry_t entry; - vm_map_entry_t temp_entry; + vm_map_entry_t entry, prev; switch (new_inheritance) { case VM_INHERIT_NONE: @@ -2823,20 +2853,20 @@ return (KERN_SUCCESS); vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &temp_entry)) { - entry = temp_entry; - vm_map_clip_start(map, entry, start); - } else - entry = temp_entry->next; + prev = vm_map_lookup_entry_and_prev(map, start, &entry); + if (entry == prev) + prev = _vm_map_clip_start(map, entry, start); + while (entry->start < end) { vm_map_clip_end(map, entry, end); if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || new_inheritance != VM_INHERIT_ZERO) entry->inheritance = new_inheritance; - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, prev, entry); + prev = entry; entry = entry->next; } - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, prev, entry); vm_map_unlock(map); return (KERN_SUCCESS); } @@ -2899,7 +2929,7 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) { - vm_map_entry_t entry, first_entry; + vm_map_entry_t entry, prev; int rv; bool first_iteration, holes_ok, need_wakeup, user_unwire; @@ -2909,16 +2939,12 @@ user_unwire = (flags & VM_MAP_WIRE_USER) != 0; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); - if (!vm_map_lookup_entry(map, start, &first_entry)) { - if (holes_ok) - first_entry = first_entry->next; - else { - vm_map_unlock(map); - return (KERN_INVALID_ADDRESS); - } + prev = vm_map_lookup_entry_and_prev(map, start, &entry); + if (prev != entry && entry->start > start && !holes_ok) { + vm_map_unlock(map); + return (KERN_INVALID_ADDRESS); } first_iteration = true; - entry = first_entry; rv = KERN_SUCCESS; while (entry->start < end) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { @@ -2935,10 +2961,14 @@ rv = KERN_INVALID_ADDRESS; break; } - first_entry = first_iteration ? entry : NULL; + prev = NULL; continue; } first_iteration = false; + if (prev == entry) + prev = _vm_map_clip_start(map, entry, start); + else + vm_map_clip_start(map, entry, start); vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); /* @@ -2947,7 +2977,7 @@ */ KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && entry->wiring_thread == NULL, - ("owned map entry %p", entry)); + ("%s: owned map entry %p", __func__, entry)); entry->eflags |= MAP_ENTRY_IN_TRANSITION; entry->wiring_thread = curthread; /* @@ -2972,12 +3002,13 @@ entry = entry->next; } need_wakeup = false; - if (first_entry == NULL && - !vm_map_lookup_entry(map, start, &first_entry)) { - KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); - first_entry = first_entry->next; + if (prev == NULL) { + prev = vm_map_lookup_entry_and_prev(map, start, &entry); + KASSERT(entry->start == start || holes_ok, + ("%s: lookup failed", __func__)); } - for (entry = first_entry; entry->start < end; entry = entry->next) { + for (entry = prev->next; entry->start < end; + prev = entry, entry = entry->next) { /* * If holes_ok was specified, an empty * space in the unwired region could have been mapped @@ -2990,7 +3021,7 @@ if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || entry->wiring_thread != curthread) { KASSERT(holes_ok, - ("vm_map_unwire: !HOLESOK and new/changed entry")); + ("%s: !HOLESOK and new/changed entry", __func__)); continue; } @@ -3004,18 +3035,18 @@ entry->eflags &= ~MAP_ENTRY_USER_WIRED; } KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, - ("vm_map_unwire: in-transition flag missing %p", entry)); + ("%s: in-transition flag missing %p", __func__, entry)); KASSERT(entry->wiring_thread == curthread, - ("vm_map_unwire: alien wire %p", entry)); + ("%s: alien wire %p", __func__, entry)); entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; entry->wiring_thread = NULL; if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; need_wakeup = true; } - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, prev, entry); } - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, prev, entry); vm_map_unlock(map); if (need_wakeup) vm_map_wakeup(map); @@ -3101,7 +3132,7 @@ int vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) { - vm_map_entry_t entry, first_entry, tmp_entry; + vm_map_entry_t entry, prev, tmp_entry; vm_offset_t faddr, saved_end, saved_start; u_long npages; u_int last_timestamp; @@ -3119,14 +3150,10 @@ holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; user_wire = (flags & VM_MAP_WIRE_USER) != 0; VM_MAP_RANGE_CHECK(map, start, end); - if (!vm_map_lookup_entry(map, start, &first_entry)) { - if (holes_ok) - first_entry = first_entry->next; - else - return (KERN_INVALID_ADDRESS); - } + prev = vm_map_lookup_entry_and_prev(map, start, &entry); + if (prev != entry && entry->start > start && !holes_ok) + return (KERN_INVALID_ADDRESS); first_iteration = true; - entry = first_entry; while (entry->start < end) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { /* @@ -3140,11 +3167,14 @@ rv = KERN_INVALID_ADDRESS; goto done; } - first_entry = first_iteration ? entry : NULL; + prev = NULL; continue; } first_iteration = false; - vm_map_clip_start(map, entry, start); + if (prev == entry) + prev = _vm_map_clip_start(map, entry, start); + else + vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); /* * Mark the entry in case the map lock is released. (See @@ -3152,7 +3182,7 @@ */ KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && entry->wiring_thread == NULL, - ("owned map entry %p", entry)); + ("%s: owned map entry %p", __func__, entry)); entry->eflags |= MAP_ENTRY_IN_TRANSITION; entry->wiring_thread = curthread; if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 @@ -3207,11 +3237,8 @@ if (!vm_map_lookup_entry(map, saved_start, &tmp_entry)) KASSERT(false, - ("vm_map_wire: lookup failed")); - if (entry == first_entry) - first_entry = tmp_entry; - else - first_entry = NULL; + ("%s: lookup failed", __func__)); + prev = NULL; entry = tmp_entry; while (entry->end < saved_end) { /* @@ -3253,12 +3280,13 @@ rv = KERN_SUCCESS; done: need_wakeup = false; - if (first_entry == NULL && - !vm_map_lookup_entry(map, start, &first_entry)) { - KASSERT(holes_ok, ("vm_map_wire: lookup failed")); - first_entry = first_entry->next; + if (prev == NULL) { + prev = vm_map_lookup_entry_and_prev(map, start, &entry); + KASSERT(entry->start == start || holes_ok, + ("%s: lookup failed", __func__)); } - for (entry = first_entry; entry->start < end; entry = entry->next) { + for (entry = prev->next; entry->start < end; + prev = entry, entry = entry->next) { /* * If holes_ok was specified, an empty * space in the unwired region could have been mapped @@ -3271,7 +3299,7 @@ if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || entry->wiring_thread != curthread) { KASSERT(holes_ok, - ("vm_map_wire: !HOLESOK and new/changed entry")); + ("%s: !HOLESOK and new/changed entry", __func__)); continue; } @@ -3301,9 +3329,9 @@ entry->wired_count--; } KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, - ("vm_map_wire: in-transition flag missing %p", entry)); + ("%s: in-transition flag missing %p", __func__, entry)); KASSERT(entry->wiring_thread == curthread, - ("vm_map_wire: alien wire %p", entry)); + ("%s: alien wire %p", __func__, entry)); entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WIRE_SKIPPED); entry->wiring_thread = NULL; @@ -3311,9 +3339,9 @@ entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; need_wakeup = true; } - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, prev, entry); } - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, prev, entry); if (need_wakeup) vm_map_wakeup(map); return (rv); @@ -3356,7 +3384,8 @@ if (!vm_map_lookup_entry(map, start, &entry)) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); - } else if (start == end) { + } + if (start == end) { start = entry->start; end = entry->end; } @@ -3411,9 +3440,10 @@ start += size; vm_object_deallocate(object); vm_map_lock_read(map); - if (last_timestamp == map->timestamp || - !vm_map_lookup_entry(map, start, ¤t)) + if (last_timestamp == map->timestamp) current = current->next; + else + vm_map_lookup_entry_ge(map, start, ¤t); } vm_map_unlock_read(map); @@ -3546,7 +3576,6 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) { vm_map_entry_t entry; - vm_map_entry_t first_entry; VM_MAP_ASSERT_LOCKED(map); if (start == end) @@ -3555,12 +3584,8 @@ /* * Find the start of the region, and clip it */ - if (!vm_map_lookup_entry(map, start, &first_entry)) - entry = first_entry->next; - else { - entry = first_entry; + if (vm_map_lookup_entry_ge(map, start, &entry)) vm_map_clip_start(map, entry, start); - } /* * Step through all entries in this region @@ -3578,29 +3603,22 @@ vm_map_entry_system_wired_count(entry) != 0)) { unsigned int last_timestamp; vm_offset_t saved_start; - vm_map_entry_t tmp_entry; saved_start = entry->start; entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; last_timestamp = map->timestamp; (void) vm_map_unlock_and_wait(map, 0); vm_map_lock(map); - if (last_timestamp + 1 != map->timestamp) { - /* - * Look again for the entry because the map was - * modified while it was unlocked. - * Specifically, the entry may have been - * clipped, merged, or deleted. - */ - if (!vm_map_lookup_entry(map, saved_start, - &tmp_entry)) - entry = tmp_entry->next; - else { - entry = tmp_entry; - vm_map_clip_start(map, entry, - saved_start); - } - } + if (last_timestamp + 1 == map->timestamp) + continue; + + /* + * Look again for the entry because the map was + * modified while it was unlocked. Specifically, the + * entry may have been clipped, merged, or deleted. + */ + if (vm_map_lookup_entry_ge(map, saved_start, &entry)) + vm_map_clip_start(map, entry, saved_start); continue; } vm_map_clip_end(map, entry, end); @@ -3675,11 +3693,9 @@ vm_prot_t protection) { vm_map_entry_t entry; - vm_map_entry_t tmp_entry; - if (!vm_map_lookup_entry(map, start, &tmp_entry)) + if (!vm_map_lookup_entry(map, start, &entry)) return (FALSE); - entry = tmp_entry; while (start < end) { /* @@ -4199,7 +4215,7 @@ * stack_guard_page for vm_map_growstack(). */ if (orient == MAP_STACK_GROWS_DOWN) - new_entry->prev->next_read = sgp; + prev_entry->next->next_read = sgp; else new_entry->next->next_read = sgp; } else { @@ -4213,9 +4229,9 @@ * successfully grow the stack. */ static int -vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) +vm_map_growstack(vm_map_t map, vm_offset_t addr, + vm_map_entry_t gap_entry, vm_map_entry_t stack_entry) { - vm_map_entry_t stack_entry; struct proc *p; struct vmspace *vm; struct ucred *cred; @@ -4250,19 +4266,18 @@ vmemlim = lim_cur(curthread, RLIMIT_VMEM); retry: /* If addr is not in a hole for a stack grow area, no need to grow. */ - if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) + if (gap_entry == NULL && + !vm_map_lookup_helper(map, addr, true, &gap_entry, &stack_entry)) return (KERN_FAILURE); if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) return (KERN_SUCCESS); if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { - stack_entry = gap_entry->next; if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || stack_entry->start != gap_entry->end) return (KERN_FAILURE); grow_amount = round_page(stack_entry->start - addr); grow_down = true; } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { - stack_entry = gap_entry->prev; if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || stack_entry->end != gap_entry->start) return (KERN_FAILURE); @@ -4538,7 +4553,7 @@ vm_prot_t *out_prot, /* OUT */ boolean_t *wired) /* OUT */ { - vm_map_entry_t entry; + vm_map_entry_t entry, nbr; vm_map_t map = *var_map; vm_prot_t prot; vm_prot_t fault_type = fault_typea; @@ -4554,7 +4569,7 @@ /* * Lookup the faulting address. */ - if (!vm_map_lookup_entry(map, vaddr, out_entry)) { + if (!vm_map_lookup_helper(map, vaddr, true, out_entry, &nbr)) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); } @@ -4582,7 +4597,7 @@ (entry->eflags & MAP_ENTRY_GUARD) != 0 && (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | MAP_ENTRY_STACK_GAP_UP)) != 0 && - vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) + vm_map_growstack(map, vaddr, entry, nbr) == KERN_SUCCESS) goto RetryLookupLocked; } fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;