Index: vm_fault.c =================================================================== --- vm_fault.c +++ vm_fault.c @@ -605,7 +605,7 @@ fs.entry->wiring_thread != curthread) { vm_map_unlock_read(fs.map); vm_map_lock(fs.map); - if (vm_map_lookup_entry(fs.map, vaddr, &fs.entry) && + if (vm_map_lookup_entry_lesseq(fs.map, vaddr, &fs.entry) && (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION)) { unlock_vp(&fs); fs.entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; Index: vm_map.h =================================================================== --- vm_map.h +++ vm_map.h @@ -415,7 +415,7 @@ int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, vm_pindex_t *, vm_prot_t *, boolean_t *); void vm_map_lookup_done (vm_map_t, vm_map_entry_t); -boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); +bool vm_map_lookup_entry_lesseq (vm_map_t, vm_offset_t, vm_map_entry_t *); int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); void vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry); Index: vm_map.c =================================================================== --- vm_map.c +++ vm_map.c @@ -1166,56 +1166,6 @@ } /* - * vm_map_splay: - * - * The Sleator and Tarjan top-down splay algorithm with the - * following variation. Max_free must be computed bottom-up, so - * on the downward pass, maintain the left and right spines in - * reverse order. Then, make a second pass up each side to fix - * the pointers and compute max_free. The time bound is O(log n) - * amortized. - * - * The new root is the vm_map_entry containing "addr", or else an - * adjacent entry (lower if possible) if addr is not in the tree. - * - * The map must be locked, and leaves it so. - * - * Returns: the new root. - */ -static vm_map_entry_t -vm_map_splay(vm_map_t map, vm_offset_t addr) -{ - vm_map_entry_t llist, rlist, root; - - root = vm_map_splay_split(map, addr, 0, &llist, &rlist); - if (root != NULL) { - /* do nothing */ - } else if (llist != &map->header) { - /* - * Recover the greatest node in the left - * subtree and make it the root. - */ - root = llist; - llist = root->right; - root->right = NULL; - } else if (rlist != &map->header) { - /* - * Recover the least node in the right - * subtree and make it the root. - */ - root = rlist; - rlist = root->left; - root->left = NULL; - } else { - /* There is no root. */ - return (NULL); - } - vm_map_splay_merge(map, root, llist, rlist); - VM_MAP_ASSERT_CONSISTENT(map); - return (root); -} - -/* * vm_map_entry_{un,}link: * * Insert/remove entries from maps. @@ -1333,35 +1283,31 @@ /* * vm_map_lookup_entry: [ internal use only ] * - * Finds the map entry containing (or - * immediately preceding) the specified address - * in the given map; the entry is returned - * in the "entry" parameter. The boolean - * result indicates whether the address is - * actually contained in the map. + * Finds the map entry containing (or adjacent to) the specified address + * in the given map; the entry is returned in the "entry" parameter. The + * boolean result indicates whether the address is actually contained in + * the map. If the address is not contained in the map, parameter lesseq + * determines whether the entry provides is before or after the address. + * If the address is contained in the map, parameter nbr, if not NULL, is + * where the next or previous entry is saved, depending on the value of + * eflags in the found entry. */ -boolean_t -vm_map_lookup_entry( - vm_map_t map, - vm_offset_t address, - vm_map_entry_t *entry) /* OUT */ +static bool +vm_map_lookup_entry(vm_map_t map, vm_offset_t addr, bool lesseq, + vm_map_entry_t *entry, vm_map_entry_t *nbr) /* OUT */ { - vm_map_entry_t cur, lbound; - boolean_t locked; + vm_map_entry_t llist, rlist, root; + bool locked, rv; /* * If the map is empty, then the map entry immediately preceding - * "address" is the map's header. + * "addr" is the map's header. */ - cur = map->root; - if (cur == NULL) { + root = map->root; + if (root == NULL) { *entry = &map->header; - return (FALSE); + return (false); } - if (address >= cur->start && cur->end > address) { - *entry = cur; - return (TRUE); - } if ((locked = vm_map_locked(map)) || sx_try_upgrade(&map->lock)) { /* @@ -1370,42 +1316,96 @@ * change the map. Thus, the map's timestamp need not change * on a temporary upgrade. */ - cur = vm_map_splay(map, address); + root = vm_map_splay_split(map, addr, 0, &llist, &rlist); + rv = root != NULL; + *entry = root; + if (root != NULL) { + if (nbr == NULL); + else if ((root->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { + vm_map_splay_findnext(root, &rlist); + *nbr = rlist; + } else { + vm_map_splay_findprev(root, &llist); + *nbr = llist; + } + } else if (llist != &map->header) { + /* + * Recover the greatest node in the left + * subtree and make it the root. + */ + *entry = lesseq ? llist : rlist; + root = llist; + llist = root->right; + root->right = NULL; + } else { + /* + * Recover the least node in the right + * subtree and make it the root. + */ + *entry = lesseq ? llist : rlist; + root = rlist; + rlist = root->left; + root->left = NULL; + } + vm_map_splay_merge(map, root, llist, rlist); + VM_MAP_ASSERT_CONSISTENT(map); if (!locked) sx_downgrade(&map->lock); - - /* - * If "address" is contained within a map entry, the new root - * is that map entry. Otherwise, the new root is a map entry - * immediately before or after "address". - */ - if (address < cur->start) { - *entry = &map->header; - return (FALSE); - } - *entry = cur; - return (address < cur->end); + return (rv); } /* * Since the map is only locked for read access, perform a - * standard binary search tree lookup for "address". + * standard binary search tree lookup for "addr". */ - lbound = &map->header; + llist = rlist = &map->header; do { - if (address < cur->start) { - cur = cur->left; - } else if (cur->end <= address) { - lbound = cur; - cur = cur->right; + if (addr < root->start) { + rlist = root; + root = root->left; + } else if (root->end <= addr) { + llist = root; + root = root->right; } else { - *entry = cur; - return (TRUE); + *entry = root; + if (nbr == NULL); + else if ((root->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { + /* Make nbr the successor to root. */ + if (root->right != NULL) { + rlist = root->right; + while (rlist->left != NULL) + rlist = rlist->left; + } + *nbr = rlist; + } else { + /* Make nbr the predecessor to root. */ + if (root->left != NULL) { + llist = root->left; + while (llist->right != NULL) + llist = llist->right; + } + *nbr = llist; + } + return (true); } - } while (cur != NULL); - *entry = lbound; - return (FALSE); + } while (root != NULL); + *entry = lesseq ? llist : rlist; + return (false); } +bool +vm_map_lookup_entry_lesseq(vm_map_t map, vm_offset_t addr, + vm_map_entry_t *entry) /* OUT */ +{ + return (vm_map_lookup_entry(map, addr, true, entry, NULL)); +} + +static bool +vm_map_lookup_entry_grtreq(vm_map_t map, vm_offset_t addr, + vm_map_entry_t *entry) /* OUT */ +{ + return (vm_map_lookup_entry(map, addr, false, entry, NULL)); +} + /* * vm_map_insert: * @@ -1422,7 +1422,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) { - vm_map_entry_t new_entry, prev_entry, temp_entry; + vm_map_entry_t new_entry, prev_entry; struct ucred *cred; vm_eflags_t protoeflags; vm_inherit_t inheritance; @@ -1447,11 +1447,9 @@ * Find the entry prior to the proposed starting address; if it's part * of an existing entry, this range is bogus. */ - if (vm_map_lookup_entry(map, start, &temp_entry)) + if (vm_map_lookup_entry_lesseq(map, start, &prev_entry)) return (KERN_NO_SPACE); - prev_entry = temp_entry; - /* * Assert that the next entry doesn't overlap the end point. */ @@ -2314,10 +2312,8 @@ VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &entry)) { + if (vm_map_lookup_entry_grtreq(map, start, &entry)) vm_map_clip_start(map, entry, start); - } else - entry = entry->next; vm_map_clip_end(map, entry, end); @@ -2472,11 +2468,8 @@ VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &entry)) { + if (vm_map_lookup_entry_grtreq(map, start, &entry)) vm_map_clip_start(map, entry, start); - } else { - entry = entry->next; - } /* * Make a first pass to check for protection violations. @@ -2665,11 +2658,9 @@ */ VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &entry)) { + if (vm_map_lookup_entry_grtreq(map, start, &entry)) { if (modify_map) vm_map_clip_start(map, entry, start); - } else { - entry = entry->next; } if (modify_map) { @@ -2801,7 +2792,6 @@ vm_inherit_t new_inheritance) { vm_map_entry_t entry; - vm_map_entry_t temp_entry; switch (new_inheritance) { case VM_INHERIT_NONE: @@ -2816,11 +2806,8 @@ return (KERN_SUCCESS); vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &temp_entry)) { - entry = temp_entry; + if (vm_map_lookup_entry_grtreq(map, start, &entry)) vm_map_clip_start(map, entry, start); - } else - entry = temp_entry->next; while (entry->start < end) { vm_map_clip_end(map, entry, end); if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || @@ -2853,10 +2840,8 @@ user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); - if (!vm_map_lookup_entry(map, start, &first_entry)) { - if (flags & VM_MAP_WIRE_HOLESOK) - first_entry = first_entry->next; - else { + if (!vm_map_lookup_entry_grtreq(map, start, &first_entry)) { + if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { vm_map_unlock(map); return (KERN_INVALID_ADDRESS); } @@ -2884,11 +2869,9 @@ * Specifically, the entry may have been * clipped, merged, or deleted. */ - if (!vm_map_lookup_entry(map, saved_start, + if (!vm_map_lookup_entry_grtreq(map, saved_start, &tmp_entry)) { - if (flags & VM_MAP_WIRE_HOLESOK) - tmp_entry = tmp_entry->next; - else { + if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { if (saved_start == start) { /* * First_entry has been deleted. @@ -2946,11 +2929,9 @@ done: need_wakeup = FALSE; if (first_entry == NULL) { - result = vm_map_lookup_entry(map, start, &first_entry); - if (!result && (flags & VM_MAP_WIRE_HOLESOK)) - first_entry = first_entry->next; - else - KASSERT(result, ("vm_map_unwire: lookup failed")); + result = vm_map_lookup_entry_grtreq(map, start, &first_entry); + KASSERT(result || (flags & VM_MAP_WIRE_HOLESOK) != 0, + ("vm_map_unwire: lookup failed")); } for (entry = first_entry; entry->start < end; entry = entry->next) { /* @@ -3092,10 +3073,8 @@ prot |= VM_PROT_WRITE; user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; VM_MAP_RANGE_CHECK(map, start, end); - if (!vm_map_lookup_entry(map, start, &first_entry)) { - if (flags & VM_MAP_WIRE_HOLESOK) - first_entry = first_entry->next; - else + if (!vm_map_lookup_entry_grtreq(map, start, &first_entry)) { + if ((flags & VM_MAP_WIRE_HOLESOK) == 0) return (KERN_INVALID_ADDRESS); } last_timestamp = map->timestamp; @@ -3121,11 +3100,9 @@ * Specifically, the entry may have been * clipped, merged, or deleted. */ - if (!vm_map_lookup_entry(map, saved_start, + if (!vm_map_lookup_entry_grtreq(map, saved_start, &tmp_entry)) { - if (flags & VM_MAP_WIRE_HOLESOK) - tmp_entry = tmp_entry->next; - else { + if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { if (saved_start == start) { /* * first_entry has been deleted. @@ -3207,7 +3184,7 @@ * may have been clipped, but NOT merged or * deleted. */ - result = vm_map_lookup_entry(map, saved_start, + result = vm_map_lookup_entry_lesseq(map, saved_start, &tmp_entry); KASSERT(result, ("vm_map_wire: lookup failed")); if (entry == first_entry) @@ -3258,11 +3235,9 @@ done: need_wakeup = FALSE; if (first_entry == NULL) { - result = vm_map_lookup_entry(map, start, &first_entry); - if (!result && (flags & VM_MAP_WIRE_HOLESOK)) - first_entry = first_entry->next; - else - KASSERT(result, ("vm_map_wire: lookup failed")); + result = vm_map_lookup_entry_grtreq(map, start, &first_entry); + KASSERT(result || (flags & VM_MAP_WIRE_HOLESOK) != 0, + ("vm_map_wire: lookup failed")); } for (entry = first_entry; entry->start < end; entry = entry->next) { /* @@ -3360,10 +3335,11 @@ vm_map_lock_read(map); VM_MAP_RANGE_CHECK(map, start, end); - if (!vm_map_lookup_entry(map, start, &entry)) { + if (!vm_map_lookup_entry_lesseq(map, start, &entry)) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); - } else if (start == end) { + } + if (start == end) { start = entry->start; end = entry->end; } @@ -3400,7 +3376,7 @@ smap = current->object.sub_map; vm_map_lock_read(smap); - (void) vm_map_lookup_entry(smap, offset, &tentry); + (void) vm_map_lookup_entry_lesseq(smap, offset, &tentry); tsize = tentry->end - offset; if (tsize < size) size = tsize; @@ -3418,9 +3394,10 @@ start += size; vm_object_deallocate(object); vm_map_lock_read(map); - if (last_timestamp == map->timestamp || - !vm_map_lookup_entry(map, start, ¤t)) + if (last_timestamp == map->timestamp) current = current->next; + else + vm_map_lookup_entry_grtreq(map, start, ¤t); } vm_map_unlock_read(map); @@ -3553,7 +3530,6 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) { vm_map_entry_t entry; - vm_map_entry_t first_entry; VM_MAP_ASSERT_LOCKED(map); if (start == end) @@ -3562,12 +3538,8 @@ /* * Find the start of the region, and clip it */ - if (!vm_map_lookup_entry(map, start, &first_entry)) - entry = first_entry->next; - else { - entry = first_entry; + if (vm_map_lookup_entry_grtreq(map, start, &entry)) vm_map_clip_start(map, entry, start); - } /* * Step through all entries in this region @@ -3585,29 +3557,22 @@ vm_map_entry_system_wired_count(entry) != 0)) { unsigned int last_timestamp; vm_offset_t saved_start; - vm_map_entry_t tmp_entry; saved_start = entry->start; entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; last_timestamp = map->timestamp; (void) vm_map_unlock_and_wait(map, 0); vm_map_lock(map); - if (last_timestamp + 1 != map->timestamp) { - /* - * Look again for the entry because the map was - * modified while it was unlocked. - * Specifically, the entry may have been - * clipped, merged, or deleted. - */ - if (!vm_map_lookup_entry(map, saved_start, - &tmp_entry)) - entry = tmp_entry->next; - else { - entry = tmp_entry; - vm_map_clip_start(map, entry, - saved_start); - } - } + if (last_timestamp + 1 == map->timestamp) + continue; + + /* + * Look again for the entry because the map was + * modified while it was unlocked. Specifically, the + * entry may have been clipped, merged, or deleted. + */ + if (vm_map_lookup_entry_grtreq(map, saved_start, &entry)) + vm_map_clip_start(map, entry, saved_start); continue; } vm_map_clip_end(map, entry, end); @@ -3682,11 +3647,9 @@ vm_prot_t protection) { vm_map_entry_t entry; - vm_map_entry_t tmp_entry; - if (!vm_map_lookup_entry(map, start, &tmp_entry)) + if (!vm_map_lookup_entry_lesseq(map, start, &entry)) return (FALSE); - entry = tmp_entry; while (start < end) { /* @@ -4122,7 +4085,7 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) { - vm_map_entry_t new_entry, prev_entry; + vm_map_entry_t new_entry; vm_offset_t bot, gap_bot, gap_top, top; vm_size_t init_ssize, sgp; int orient, rv; @@ -4150,13 +4113,13 @@ init_ssize = max_ssize - sgp; /* If addr is already mapped, no go */ - if (vm_map_lookup_entry(map, addrbos, &prev_entry)) + if (vm_map_lookup_entry_grtreq(map, addrbos, &new_entry)) return (KERN_NO_SPACE); /* * If we can't accommodate max_ssize in the current mapping, no go. */ - if (prev_entry->next->start < addrbos + max_ssize) + if (new_entry->start < addrbos + max_ssize) return (KERN_NO_SPACE); /* @@ -4183,7 +4146,6 @@ rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); if (rv != KERN_SUCCESS) return (rv); - new_entry = prev_entry->next; KASSERT(new_entry->end == top || new_entry->start == bot, ("Bad entry start/end for new stack entry")); KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || @@ -4242,19 +4204,18 @@ vmemlim = lim_cur(curthread, RLIMIT_VMEM); retry: /* If addr is not in a hole for a stack grow area, no need to grow. */ - if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) + if (gap_entry == NULL && + !vm_map_lookup_entry(map, addr, true, &gap_entry, &stack_entry)) return (KERN_FAILURE); if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) return (KERN_SUCCESS); if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { - stack_entry = gap_entry->next; if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || stack_entry->start != gap_entry->end) return (KERN_FAILURE); grow_amount = round_page(stack_entry->start - addr); grow_down = true; } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { - stack_entry = gap_entry->prev; if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || stack_entry->end != gap_entry->start) return (KERN_FAILURE); @@ -4544,7 +4505,7 @@ /* * Lookup the faulting address. */ - if (!vm_map_lookup_entry(map, vaddr, out_entry)) { + if (!vm_map_lookup_entry_lesseq(map, vaddr, out_entry)) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); } @@ -4719,7 +4680,7 @@ /* * Lookup the faulting address. */ - if (!vm_map_lookup_entry(map, vaddr, out_entry)) + if (!vm_map_lookup_entry_lesseq(map, vaddr, out_entry)) return (KERN_INVALID_ADDRESS); entry = *out_entry; Index: vm_mmap.c =================================================================== --- vm_mmap.c +++ vm_mmap.c @@ -545,7 +545,7 @@ * an executable region. */ pkm.pm_address = (uintptr_t) NULL; - if (vm_map_lookup_entry(map, addr, &entry)) { + if (vm_map_lookup_entry_lesseq(map, addr, &entry)) { for (; entry->start < addr + size; entry = entry->next) { if (vm_map_check_protection(map, entry->start, @@ -763,7 +763,7 @@ RestartScan: timestamp = map->timestamp; - if (!vm_map_lookup_entry(map, addr, &entry)) { + if (!vm_map_lookup_entry_lesseq(map, addr, &entry)) { vm_map_unlock_read(map); return (ENOMEM); }