Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -641,7 +641,8 @@ * to lock out sleepers/wakers. */ vm_map_lock(map); - if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) + addr = vm_map_findspace(map, vm_map_min(map), size); + if (addr < vm_map_max(map)) break; /* no space now; see if we can ever get space */ if (vm_map_max(map) - vm_map_min(map) < size) { Index: sys/vm/vm_map.h =================================================================== --- sys/vm/vm_map.h +++ sys/vm/vm_map.h @@ -106,7 +106,6 @@ vm_offset_t start; /* start address */ vm_offset_t end; /* end address */ vm_offset_t next_read; /* vaddr of the next sequential read */ - vm_size_t adj_free; /* amount of adjacent free space */ vm_size_t max_free; /* max free space in subtree */ union vm_map_object object; /* object I point to */ vm_ooffset_t offset; /* offset into object */ @@ -402,7 +401,7 @@ vm_size_t, vm_offset_t, vm_offset_t, int, vm_prot_t, vm_prot_t, int); int vm_map_fixed(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int); -int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *); +vm_offset_t vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t); int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t); void vm_map_init(vm_map_t, pmap_t, vm_offset_t, vm_offset_t); int vm_map_insert (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int); Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -132,9 +132,6 @@ static int vm_map_zinit(void *mem, int ize, int flags); static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max); -static int vm_map_alignspace(vm_map_t map, vm_object_t object, - vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length, - vm_offset_t max_addr, vm_offset_t alignment); static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); @@ -672,8 +669,50 @@ #define VM_MAP_ASSERT_LOCKED(map) \ _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) + +static void +_vm_map_assert_consistent(vm_map_t map) +{ + vm_map_entry_t entry; + vm_map_entry_t child; + vm_size_t max_left, max_right; + + for (entry = map->header.next; entry != &map->header; + entry = entry->next) { + KASSERT(entry->prev->end <= entry->start, + ("map %p prev->end = %lu, start = %lu.", + map, entry->prev->end, entry->start)); + KASSERT(entry->start < entry->end, + ("map %p start = %lu, end = %lu.", + map, entry->start, entry->end)); + KASSERT(entry->end <= entry->next->start, + ("map %p end = %lu, next->start = %lu.", + map, entry->end, entry->next->start)); + KASSERT(entry->left == NULL || + entry->left->start < entry->start, + ("map %p left->start = %lu, start = %lu.", + map, entry->left->start, entry->start)); + KASSERT(entry->right == NULL || + entry->start < entry->right->start, + ("map %p start = %lu, right->start = %lu.", + map, entry->start, entry->right->start)); + child = entry->left; + max_left = (child != NULL) ? child->max_free : + entry->start - entry->prev->end; + child = entry->right; + max_right = (child != NULL) ? child->max_free : + entry->next->start - entry->end; + KASSERT(entry->max_free = MAX(max_left, max_right), + ("map %p max = %lu, max_left = %lu, max_right = %lu.", + map, entry->max_free, max_left, max_right)); + } +} + +#define VM_MAP_ASSERT_CONSISTENT(map) \ + _vm_map_assert_consistent(map) #else #define VM_MAP_ASSERT_LOCKED(map) +#define VM_MAP_ASSERT_CONSISTENT(map) #endif /* @@ -865,100 +904,117 @@ static inline void vm_map_entry_set_max_free(vm_map_entry_t entry) { - - entry->max_free = entry->adj_free; - if (entry->left != NULL && entry->left->max_free > entry->max_free) - entry->max_free = entry->left->max_free; - if (entry->right != NULL && entry->right->max_free > entry->max_free) - entry->max_free = entry->right->max_free; + vm_map_entry_t child; + vm_size_t max_left, max_right; + + child = entry->left; + max_left = (child != NULL) ? child->max_free : + entry->start - entry->prev->end; + child = entry->right; + max_right = (child != NULL) ? child->max_free : + entry->next->start - entry->end; + entry->max_free = MAX(max_left, max_right); } +#define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ + y = root->left; \ + if (y != NULL && (test)) { \ + /* Rotate right and make y root. */ \ + root->left = y->right; \ + y->right = root; \ + vm_map_entry_set_max_free(root); \ + root = y; \ + y = root->left; \ + } \ + /* Put root on rlist. */ \ + root->left = rlist; \ + rlist = root; \ + root = y; \ +} while (0) + +#define SPLAY_RIGHT_STEP(root, y, llist, test) do { \ + y = root->right; \ + if (y != NULL && (test)) { \ + /* Rotate left and make y root. */ \ + root->right = y->left; \ + y->left = root; \ + vm_map_entry_set_max_free(root); \ + root = y; \ + y = root->right; \ + } \ + /* Put root on llist. */ \ + root->right = llist; \ + llist = root; \ + root = y; \ +} while (0) + /* - * vm_map_entry_splay: - * - * The Sleator and Tarjan top-down splay algorithm with the - * following variation. Max_free must be computed bottom-up, so - * on the downward pass, maintain the left and right spines in - * reverse order. Then, make a second pass up each side to fix - * the pointers and compute max_free. The time bound is O(log n) - * amortized. - * - * The new root is the vm_map_entry containing "addr", or else an - * adjacent entry (lower or higher) if addr is not in the tree. - * - * The map must be locked, and leaves it so. - * - * Returns: the new root. + * Walk down the tree until we find addr or a NULL pointer where addr would go, + * breaking off left and right subtrees of nodes less than, or greater than + * addr. Treat pointers to nodes with max_free < length as NULL pointers. + * llist and rlist are the two sides in reverse order (bottom-up), with llist + * linked by the right pointer and rlist linked by the left pointer in the + * vm_map_entry. */ static vm_map_entry_t -vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) +vm_map_splay_split(vm_offset_t addr, vm_size_t length, + vm_map_entry_t root, vm_map_entry_t *out_llist, vm_map_entry_t *out_rlist) { vm_map_entry_t llist, rlist; - vm_map_entry_t ltree, rtree; vm_map_entry_t y; - /* Special case of empty tree. */ - if (root == NULL) - return (root); - - /* - * Pass One: Splay down the tree until we find addr or a NULL - * pointer where addr would go. llist and rlist are the two - * sides in reverse order (bottom-up), with llist linked by - * the right pointer and rlist linked by the left pointer in - * the vm_map_entry. Wait until Pass Two to set max_free on - * the two spines. - */ llist = NULL; rlist = NULL; - for (;;) { - /* root is never NULL in here. */ + while (root != NULL && root->max_free >= length) { if (addr < root->start) { - y = root->left; - if (y == NULL) - break; - if (addr < y->start && y->left != NULL) { - /* Rotate right and put y on rlist. */ - root->left = y->right; - y->right = root; - vm_map_entry_set_max_free(root); - root = y->left; - y->left = rlist; - rlist = y; - } else { - /* Put root on rlist. */ - root->left = rlist; - rlist = root; - root = y; - } + SPLAY_LEFT_STEP(root, y, rlist, + y->max_free >= length && addr < y->start); } else if (addr >= root->end) { - y = root->right; - if (y == NULL) - break; - if (addr >= y->end && y->right != NULL) { - /* Rotate left and put y on llist. */ - root->right = y->left; - y->left = root; - vm_map_entry_set_max_free(root); - root = y->right; - y->right = llist; - llist = y; - } else { - /* Put root on llist. */ - root->right = llist; - llist = root; - root = y; - } + SPLAY_RIGHT_STEP(root, y, llist, + y->max_free >= length && addr >= y->end); } else break; } + *out_llist = llist; + *out_rlist = rlist; + return (root); +} + +static void +vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *iolist) +{ + vm_map_entry_t rlist, y; + + root = root->right; + rlist = *iolist; + while (root != NULL) + SPLAY_LEFT_STEP(root, y, rlist, true); + *iolist = rlist; +} + +static void +vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *iolist) +{ + vm_map_entry_t llist, y; + + root = root->left; + llist = *iolist; + while (root != NULL) + SPLAY_RIGHT_STEP(root, y, llist, true); + *iolist = llist; +} + +/* + * Walk back up the two spines, flip the pointers and set max_free. The + * subtrees of the root go at the bottom of llist and rlist. + */ +static vm_map_entry_t +vm_map_splay_merge(vm_map_entry_t root, + vm_map_entry_t llist, vm_map_entry_t rlist, + vm_map_entry_t ltree, vm_map_entry_t rtree) +{ + vm_map_entry_t y; - /* - * Pass Two: Walk back up the two spines, flip the pointers - * and set max_free. The subtrees of the root go at the - * bottom of llist and rlist. - */ - ltree = root->left; while (llist != NULL) { y = llist->right; llist->right = ltree; @@ -966,7 +1022,6 @@ ltree = llist; llist = y; } - rtree = root->right; while (rlist != NULL) { y = rlist->left; rlist->left = rtree; @@ -985,6 +1040,55 @@ return (root); } +/* + * vm_map_entry_splay: + * + * The Sleator and Tarjan top-down splay algorithm with the + * following variation. Max_free must be computed bottom-up, so + * on the downward pass, maintain the left and right spines in + * reverse order. Then, make a second pass up each side to fix + * the pointers and compute max_free. The time bound is O(log n) + * amortized. + * + * The new root is the vm_map_entry containing "addr", or else an + * adjacent entry (lower if possible) if addr is not in the tree. + * + * The map must be locked, and leaves it so. + * + * Returns: the new root. + */ +static vm_map_entry_t +vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) +{ + vm_map_entry_t llist, rlist; + + root = vm_map_splay_split(addr, 0, root, &llist, &rlist); + if (root != NULL) { + /* do nothing */ + } else if (llist != NULL) { + /* + * Recover the greatest node in the left + * subtree and make it the root. + */ + root = llist; + llist = root->right; + root->right = NULL; + } else if (rlist != NULL) { + /* + * Recover the least node in the right + * subtree and make it the root. + */ + root = rlist; + rlist = root->left; + root->left = NULL; + } else { + /* There is no root. */ + return (NULL); + } + return (vm_map_splay_merge(root, llist, rlist, + root->left, root->right)); +} + /* * vm_map_entry_{un,}link: * @@ -992,67 +1096,88 @@ */ static void vm_map_entry_link(vm_map_t map, - vm_map_entry_t after_where, vm_map_entry_t entry) { + vm_map_entry_t llist, rlist, root; - CTR4(KTR_VM, - "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, - map->nentries, entry, after_where); + CTR3(KTR_VM, + "vm_map_entry_link: map %p, nentries %d, entry %p", map, + map->nentries, entry); VM_MAP_ASSERT_LOCKED(map); - KASSERT(after_where->end <= entry->start, - ("vm_map_entry_link: prev end %jx new start %jx overlap", - (uintmax_t)after_where->end, (uintmax_t)entry->start)); - KASSERT(entry->end <= after_where->next->start, - ("vm_map_entry_link: new end %jx next start %jx overlap", - (uintmax_t)entry->end, (uintmax_t)after_where->next->start)); - map->nentries++; - entry->prev = after_where; - entry->next = after_where->next; - entry->next->prev = entry; - after_where->next = entry; - - if (after_where != &map->header) { - if (after_where != map->root) - vm_map_entry_splay(after_where->start, map->root); - entry->right = after_where->right; - entry->left = after_where; - after_where->right = NULL; - after_where->adj_free = entry->start - after_where->end; - vm_map_entry_set_max_free(after_where); - } else { - entry->right = map->root; - entry->left = NULL; - } - entry->adj_free = entry->next->start - entry->end; - vm_map_entry_set_max_free(entry); + root = map->root; + root = vm_map_splay_split(entry->start, 0, root, &llist, &rlist); + KASSERT(root == NULL, + ("vm_map_entry_link: link object already mapped")); + entry->prev = (llist == NULL) ? &map->header : llist; + entry->next = (rlist == NULL) ? &map->header : rlist; + entry->prev->next = entry->next->prev = entry; + root = vm_map_splay_merge(entry, llist, rlist, NULL, NULL); map->root = entry; + VM_MAP_ASSERT_CONSISTENT(map); } +enum unlink_merge_type { + UNLINK_MERGE_PREV, + UNLINK_MERGE_NONE, + UNLINK_MERGE_NEXT +}; + static void vm_map_entry_unlink(vm_map_t map, - vm_map_entry_t entry) + vm_map_entry_t entry, + enum unlink_merge_type op) { - vm_map_entry_t next, prev, root; + vm_map_entry_t llist, rlist, root, y; VM_MAP_ASSERT_LOCKED(map); - if (entry != map->root) - vm_map_entry_splay(entry->start, map->root); - if (entry->left == NULL) - root = entry->right; - else { - root = vm_map_entry_splay(entry->start, entry->left); - root->right = entry->right; - root->adj_free = entry->next->start - root->end; - vm_map_entry_set_max_free(root); + llist = entry->prev; + rlist = entry->next; + llist->next = rlist; + rlist->prev = llist; + root = map->root; + root = vm_map_splay_split(entry->start, 0, root, &llist, &rlist); + KASSERT(root != NULL, + ("vm_map_entry_unlink: unlink object not mapped")); + + switch (op) { + case UNLINK_MERGE_PREV: + vm_map_splay_findprev(root, &llist); + llist->end = root->end; + y = root->right; + root = llist; + llist = root->right; + root->right = y; + break; + case UNLINK_MERGE_NEXT: + vm_map_splay_findnext(root, &rlist); + rlist->start = root->start; + rlist->offset = root->offset; + y = root->left; + root = rlist; + rlist = root->left; + root->left = y; + break; + case UNLINK_MERGE_NONE: + vm_map_splay_findprev(root, &llist); + vm_map_splay_findnext(root, &rlist); + if (llist != NULL) { + root = llist; + llist = root->right; + root->right = NULL; + } else if (rlist != NULL) { + root = rlist; + rlist = root->left; + root->left = NULL; + } else + root = NULL; + break; } + if (root != NULL) + root = vm_map_splay_merge(root, llist, rlist, + root->left, root->right); map->root = root; - - prev = entry->prev; - next = entry->next; - next->prev = prev; - prev->next = next; + VM_MAP_ASSERT_CONSISTENT(map); map->nentries--; CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, map->nentries, entry); @@ -1061,27 +1186,30 @@ /* * vm_map_entry_resize_free: * - * Recompute the amount of free space following a vm_map_entry - * and propagate that value up the tree. Call this function after - * resizing a map entry in-place, that is, without a call to - * vm_map_entry_link() or _unlink(). + * Recompute the amount of free space following a modified vm_map_entry + * and propagate those values up the tree. Call this function after + * resizing a map entry in-place by changing the end value, without a + * call to vm_map_entry_link() or _unlink(). * * The map must be locked, and leaves it so. */ static void vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) { + vm_map_entry_t llist, rlist, root; - /* - * Using splay trees without parent pointers, propagating - * max_free up the tree is done by moving the entry to the - * root and making the change there. - */ - if (entry != map->root) - map->root = vm_map_entry_splay(entry->start, map->root); - - entry->adj_free = entry->next->start - entry->end; - vm_map_entry_set_max_free(entry); + VM_MAP_ASSERT_LOCKED(map); + root = map->root; + root = vm_map_splay_split(entry->start, 0, root, &llist, &rlist); + KASSERT(root != NULL, + ("vm_map_entry_resize_free: resize_free object not mapped")); + vm_map_splay_findnext(root, &rlist); + root->right = NULL; + map->root = vm_map_splay_merge(root, llist, rlist, + root->left, root->right); + VM_MAP_ASSERT_CONSISTENT(map); + CTR3(KTR_VM, "vm_map_entry_resize_free: map %p, nentries %d, entry %p", map, + map->nentries, entry); } /* @@ -1100,7 +1228,7 @@ vm_offset_t address, vm_map_entry_t *entry) /* OUT */ { - vm_map_entry_t cur; + vm_map_entry_t cur, lbound; boolean_t locked; /* @@ -1108,12 +1236,15 @@ * "address" is the map's header. */ cur = map->root; - if (cur == NULL) + if (cur == NULL) { *entry = &map->header; - else if (address >= cur->start && cur->end > address) { + return (FALSE); + } + if (address >= cur->start && cur->end > address) { *entry = cur; return (TRUE); - } else if ((locked = vm_map_locked(map)) || + } + if ((locked = vm_map_locked(map)) || sx_try_upgrade(&map->lock)) { /* * Splay requires a write lock on the map. However, it only @@ -1122,6 +1253,7 @@ * on a temporary upgrade. */ map->root = cur = vm_map_entry_splay(address, cur); + VM_MAP_ASSERT_CONSISTENT(map); if (!locked) sx_downgrade(&map->lock); @@ -1130,35 +1262,30 @@ * is that map entry. Otherwise, the new root is a map entry * immediately before or after "address". */ - if (address >= cur->start) { + if (address < cur->start) { + *entry = &map->header; + return (FALSE); + } + *entry = cur; + return (address < cur->end); + } + /* + * Since the map is only locked for read access, perform a + * standard binary search tree lookup for "address". + */ + lbound = &map->header; + do { + if (address < cur->start) { + cur = cur->left; + } else if (cur->end <= address) { + lbound = cur; + cur = cur->right; + } else { *entry = cur; - if (cur->end > address) - return (TRUE); - } else - *entry = cur->prev; - } else - /* - * Since the map is only locked for read access, perform a - * standard binary search tree lookup for "address". - */ - for (;;) { - if (address < cur->start) { - if (cur->left == NULL) { - *entry = cur->prev; - break; - } - cur = cur->left; - } else if (cur->end > address) { - *entry = cur; - return (TRUE); - } else { - if (cur->right == NULL) { - *entry = cur; - break; - } - cur = cur->right; - } + return (TRUE); } + } while (cur != NULL); + *entry = lbound; return (FALSE); } @@ -1351,7 +1478,7 @@ /* * Insert the new entry into the list */ - vm_map_entry_link(map, prev_entry, new_entry); + vm_map_entry_link(map, new_entry); if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) map->size += new_entry->end - new_entry->start; @@ -1377,23 +1504,22 @@ * Find the first fit (lowest VM address) for "length" free bytes * beginning at address >= start in the given map. * - * In a vm_map_entry, "adj_free" is the amount of free space - * adjacent (higher address) to this entry, and "max_free" is the - * maximum amount of contiguous free space in its subtree. This - * allows finding a free region in one path down the tree, so - * O(log n) amortized with splay trees. + * In a vm_map_entry, "max_free" is the maximum amount of + * contiguous free space between an entry in its subtree and a + * neighbor of that entry. This allows finding a free region in + * one path down the tree, so O(log n) amortized with splay + * trees. * * The map must be locked, and leaves it so. * - * Returns: 0 on success, and starting address in *addr, - * 1 if insufficient space. + * Returns: starting address if sufficient space, + * vm_map_max(map) if insufficient space. */ -int -vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, - vm_offset_t *addr) /* OUT */ +vm_offset_t +vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length) { - vm_map_entry_t entry; - vm_offset_t st; + vm_map_entry_t llist, rlist, root, y; + vm_size_t left_length; /* * Request must fit within min/max VM address and must avoid @@ -1401,57 +1527,87 @@ */ start = MAX(start, vm_map_min(map)); if (start + length > vm_map_max(map) || start + length < start) - return (1); + return (vm_map_max(map)); /* Empty tree means wide open address space. */ - if (map->root == NULL) { - *addr = start; - return (0); - } + if (map->root == NULL) + return (start); /* * After splay, if start comes before root node, then there * must be a gap from start to the root. */ - map->root = vm_map_entry_splay(start, map->root); - if (start + length <= map->root->start) { - *addr = start; - return (0); + root = vm_map_splay_split(start, length, map->root, + &llist, &rlist); + if (root != NULL) + start = root->end; + else if (rlist != NULL) { + root = rlist; + rlist = root->left; + root->left = NULL; + } else { + root = llist; + llist = root->right; + root->right = NULL; } + map->root = vm_map_splay_merge(root, llist, rlist, + root->left, root->right); + VM_MAP_ASSERT_CONSISTENT(map); + if (start + length <= root->start) + return (start); /* * Root is the last node that might begin its gap before * start, and this is the last comparison where address * wrap might be a problem. */ - st = (start > map->root->end) ? start : map->root->end; - if (length <= map->root->end + map->root->adj_free - st) { - *addr = st; - return (0); - } + if (root->right == NULL && + start + length <= vm_map_max(map)) + return (start); /* With max_free, can immediately tell if no solution. */ - entry = map->root->right; - if (entry == NULL || length > entry->max_free) - return (1); + if (root->right == NULL || length > root->right->max_free) + return (vm_map_max(map)); /* - * Search the right subtree in the order: left subtree, root, - * right subtree (first fit). The previous splay implies that - * all regions in the right subtree have addresses > start. + * Splay for the least large-enough gap in the right subtree. */ - while (entry != NULL) { - if (entry->left != NULL && entry->left->max_free >= length) - entry = entry->left; - else if (entry->adj_free >= length) { - *addr = entry->end; - return (0); - } else - entry = entry->right; + llist = NULL; + rlist = NULL; + for (left_length = 0; ; + left_length = root->left != NULL ? + root->left->max_free : root->start - llist->end) { + if (length <= left_length) + SPLAY_LEFT_STEP(root, y, rlist, + length <= (y->left != NULL ? + y->left->max_free : y->start - llist->end)); + else + SPLAY_RIGHT_STEP(root, y, llist, + length > (y->left != NULL ? + y->left->max_free : y->start - root->end)); + if (root == NULL) + break; } - - /* Can't get here, so panic if we do. */ - panic("vm_map_findspace: max_free corrupt"); + root = llist; + llist = root->right; + if ((y = rlist) == NULL) + root->right = NULL; + else { + rlist = y->left; + y->left = NULL; + root->right = y->right; + } + root = vm_map_splay_merge(root, llist, rlist, + root->left, root->right); + if (y != NULL) { + y->right = root->right; + vm_map_entry_set_max_free(y); + root->right = y; + vm_map_entry_set_max_free(root); + } + map->root = root; + VM_MAP_ASSERT_CONSISTENT(map); + return (root->end); } int @@ -1532,8 +1688,9 @@ VM_MAP_ASSERT_LOCKED(map); free_addr = *addr; - KASSERT(!vm_map_findspace(map, free_addr, length, addr) && - free_addr == *addr, ("caller provided insufficient free space")); + KASSERT(free_addr == vm_map_findspace(map, free_addr, length), + ("caller failed to provide space %d at address %p", + (int)length, (void*)free_addr)); for (;;) { /* * At the start of every iteration, the free space at address @@ -1559,8 +1716,10 @@ * be a valid address, in which case vm_map_findspace() cannot * be relied upon to fail. */ - if (aligned_addr < free_addr || - vm_map_findspace(map, aligned_addr, length, addr) || + if (aligned_addr < free_addr) + return (KERN_NO_SPACE); + *addr = vm_map_findspace(map, aligned_addr, length); + if (*addr >= vm_map_max(map) || (max_addr != 0 && *addr + length > max_addr)) return (KERN_NO_SPACE); free_addr = *addr; @@ -1672,21 +1831,25 @@ gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR && (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ? aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx]; - if (vm_map_findspace(map, curr_min_addr, length + - gap * pagesizes[pidx], addr) || + *addr = vm_map_findspace(map, curr_min_addr, + length + gap * pagesizes[pidx]); + if (*addr >= vm_map_max(map) || (max_addr != 0 && *addr + length > max_addr)) goto again; /* And randomize the start address. */ *addr += (arc4random() % gap) * pagesizes[pidx]; - } else if (vm_map_findspace(map, curr_min_addr, length, addr) || - (max_addr != 0 && *addr + length > max_addr)) { - if (cluster) { - cluster = false; - MPASS(try == 1); - goto again; + } else { + *addr = vm_map_findspace(map, curr_min_addr, length); + if (*addr >= vm_map_max(map) || + (max_addr != 0 && *addr + length > max_addr)) { + if (cluster) { + cluster = false; + MPASS(try == 1); + goto again; + } + rv = KERN_NO_SPACE; + goto done; } - rv = KERN_NO_SPACE; - goto done; } if (find_space != VMFS_ANY_SPACE && @@ -1824,18 +1987,12 @@ return; prev = entry->prev; if (vm_map_mergeable_neighbors(prev, entry)) { - vm_map_entry_unlink(map, prev); - entry->start = prev->start; - entry->offset = prev->offset; - if (entry->prev != &map->header) - vm_map_entry_resize_free(map, entry->prev); + vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT); vm_map_merged_neighbor_dispose(map, prev); } next = entry->next; if (vm_map_mergeable_neighbors(entry, next)) { - vm_map_entry_unlink(map, next); - entry->end = next->end; - vm_map_entry_resize_free(map, entry); + vm_map_entry_unlink(map, next, UNLINK_MERGE_PREV); vm_map_merged_neighbor_dispose(map, next); } } @@ -1913,7 +2070,7 @@ if (new_entry->cred != NULL) crhold(entry->cred); - vm_map_entry_link(map, entry->prev, new_entry); + vm_map_entry_link(map, new_entry); if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { vm_object_reference(new_entry->object.vm_object); @@ -1995,7 +2152,7 @@ if (new_entry->cred != NULL) crhold(entry->cred); - vm_map_entry_link(map, entry, new_entry); + vm_map_entry_link(map, new_entry); if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { vm_object_reference(new_entry->object.vm_object); @@ -3131,7 +3288,7 @@ vm_pindex_t offidxstart, offidxend, count, size1; vm_size_t size; - vm_map_entry_unlink(map, entry); + vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); object = entry->object.vm_object; if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { @@ -3674,8 +3831,7 @@ * Insert the entry into the new map -- we know we're * inserting at the end of the new map. */ - vm_map_entry_link(new_map, new_map->header.prev, - new_entry); + vm_map_entry_link(new_map, new_entry); vmspace_map_entry_forked(vm1, vm2, new_entry); /* @@ -3702,8 +3858,7 @@ new_entry->wired_count = 0; new_entry->object.vm_object = NULL; new_entry->cred = NULL; - vm_map_entry_link(new_map, new_map->header.prev, - new_entry); + vm_map_entry_link(new_map, new_entry); vmspace_map_entry_forked(vm1, vm2, new_entry); vm_map_copy_entry(old_map, new_map, old_entry, new_entry, fork_charge); @@ -3726,8 +3881,7 @@ new_entry->max_protection = old_entry->max_protection; new_entry->inheritance = VM_INHERIT_ZERO; - vm_map_entry_link(new_map, new_map->header.prev, - new_entry); + vm_map_entry_link(new_map, new_entry); vmspace_map_entry_forked(vm1, vm2, new_entry); new_entry->cred = curthread->td_ucred;