Index: sys/security/mac/mac_process.c =================================================================== --- sys/security/mac/mac_process.c +++ sys/security/mac/mac_process.c @@ -252,7 +252,7 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred, struct vm_map *map) { - vm_map_entry_t vme; + vm_map_entry_t prev, vme; int result; vm_prot_t revokeperms; vm_object_t backing_object, object; @@ -263,8 +263,10 @@ if (!mac_mmap_revocation) return; + prev = &map->header; vm_map_lock(map); - VM_MAP_ENTRY_FOREACH(vme, map) { + for (vme = map->header.right; vme != &map->header; + prev = vme, vme = vm_map_entry_succ(prev)) { if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) { mac_proc_vm_revoke_recurse(td, cred, vme->object.sub_map); @@ -363,7 +365,7 @@ } pmap_protect(map->pmap, vme->start, vme->end, vme->protection & ~revokeperms); - vm_map_try_merge_entries(map, vme->prev, vme); + vm_map_try_merge_entries(map, prev, vme); } } vm_map_unlock(map); Index: sys/vm/vm_map.h =================================================================== --- sys/vm/vm_map.h +++ sys/vm/vm_map.h @@ -99,10 +99,8 @@ * Also included is control information for virtual copy operations. */ struct vm_map_entry { - struct vm_map_entry *prev; /* previous entry */ - struct vm_map_entry *next; /* next entry */ - struct vm_map_entry *left; /* left child in binary search tree */ - struct vm_map_entry *right; /* right child in binary search tree */ + struct vm_map_entry *left; /* left child or previous entry */ + struct vm_map_entry *right; /* right child or next entry */ vm_offset_t start; /* start address */ vm_offset_t end; /* end address */ vm_offset_t next_read; /* vaddr of the next sequential read */ @@ -416,10 +414,25 @@ vm_pindex_t *, vm_prot_t *, boolean_t *); void vm_map_lookup_done (vm_map_t, vm_map_entry_t); boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); + +static inline vm_map_entry_t +vm_map_entry_succ(vm_map_entry_t entry) +{ + vm_map_entry_t after; + + after = entry->right; + if (after->left->start > entry->start) { + do + after = after->left; + while (after->left != entry); + } + return (after); +} + #define VM_MAP_ENTRY_FOREACH(it, map) \ - for ((it) = (map)->header.next; \ + for ((it) = (map)->header.right; \ (it) != &(map)->header; \ - (it) = (it)->next) + (it) = vm_map_entry_succ(it)) int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -574,7 +574,7 @@ entry = td->td_map_def_user; td->td_map_def_user = NULL; while (entry != NULL) { - next = entry->next; + next = entry->right; MPASS((entry->eflags & (MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC)); @@ -862,7 +862,6 @@ _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) { - map->header.next = map->header.prev = &map->header; map->header.eflags = MAP_ENTRY_HEADER; map->needs_wakeup = FALSE; map->system_map = 0; @@ -870,6 +869,7 @@ map->header.end = min; map->header.start = max; map->flags = 0; + map->header.left = map->header.right = &map->header; map->root = NULL; map->timestamp = 0; map->busy = 0; @@ -929,6 +929,12 @@ (behavior & MAP_ENTRY_BEHAV_MASK); } +static inline vm_size_t +vm_size_max(vm_size_t a, vm_size_t b) +{ + return (a > b) ? a : b; +} + /* * vm_map_entry_max_free_{left,right}: * @@ -940,7 +946,7 @@ vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor) { - return (root->left != NULL ? + return (root->left != left_ancestor ? root->left->max_free : root->start - left_ancestor->end); } @@ -948,11 +954,34 @@ vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor) { - return (root->right != NULL ? + return (root->right != right_ancestor ? root->right->max_free : right_ancestor->start - root->end); } -#define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ +/* + * vm_map_entry_{pred,succ}: + * + * Find the {predecessor, successor} of the entry by taking one step + * in the appropriate direction and backtracking as much as necessary. + */ + +/* vm_map_entry_succ is defined in vm_map.h. */ + +static inline vm_map_entry_t +vm_map_entry_pred(vm_map_entry_t entry) +{ + vm_map_entry_t prior; + + prior = entry->left; + if (prior->right->start < entry->start) { + do + prior = prior->right; + while (prior->right != entry); + } + return (prior); +} + +#define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do { \ vm_size_t max_free; \ \ /* \ @@ -964,14 +993,17 @@ max_free = root->max_free; \ KASSERT(max_free >= vm_map_entry_max_free_right(root, rlist), \ ("%s: max_free invariant fails", __func__)); \ - if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \ + if (y == llist ? max_free > 0 : max_free - 1 < y->max_free) \ max_free = vm_map_entry_max_free_right(root, rlist); \ - if (y != NULL && (test)) { \ + if (y != llist && (test)) { \ /* Rotate right and make y root. */ \ - root->left = y->right; \ - y->right = root; \ + if (y->right != root) { \ + root->left = y->right; \ + y->right = root; \ + } \ if (max_free < y->max_free) \ - root->max_free = max_free = MAX(max_free, \ + root->max_free = max_free = \ + vm_size_max(max_free, \ vm_map_entry_max_free_left(root, y)); \ root = y; \ y = root->left; \ @@ -982,10 +1014,10 @@ ("%s: max_free not copied from right", __func__)); \ root->left = rlist; \ rlist = root; \ - root = y; \ + root = (y != llist) ? y : NULL; \ } while (0) -#define SPLAY_RIGHT_STEP(root, y, llist, test) do { \ +#define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do { \ vm_size_t max_free; \ \ /* \ @@ -997,14 +1029,17 @@ max_free = root->max_free; \ KASSERT(max_free >= vm_map_entry_max_free_left(root, llist), \ ("%s: max_free invariant fails", __func__)); \ - if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \ + if (y == rlist ? max_free > 0 : max_free - 1 < y->max_free) \ max_free = vm_map_entry_max_free_left(root, llist); \ - if (y != NULL && (test)) { \ + if (y != rlist && (test)) { \ /* Rotate left and make y root. */ \ - root->right = y->left; \ - y->left = root; \ + if (y->left != root) { \ + root->right = y->left; \ + y->left = root; \ + } \ if (max_free < y->max_free) \ - root->max_free = max_free = MAX(max_free, \ + root->max_free = max_free = \ + vm_size_max(max_free, \ vm_map_entry_max_free_right(root, y)); \ root = y; \ y = root->right; \ @@ -1015,18 +1050,18 @@ ("%s: max_free not copied from left", __func__)); \ root->right = llist; \ llist = root; \ - root = y; \ + root = (y != rlist) ? y : NULL; \ } while (0) /* - * Walk down the tree until we find addr or a NULL pointer where addr would go, - * breaking off left and right subtrees of nodes less than, or greater than - * addr. Treat pointers to nodes with max_free < length as NULL pointers. - * llist and rlist are the two sides in reverse order (bottom-up), with llist - * linked by the right pointer and rlist linked by the left pointer in the - * vm_map_entry, and both lists terminated by &map->header. This function, and - * the subsequent call to vm_map_splay_merge, rely on the start and end address - * values in &map->header. + * Walk down the tree until we find addr or a gap where addr would go, breaking + * off left and right subtrees of nodes less than, or greater than addr. Treat + * subtrees with root->max_free < length as empty trees. llist and rlist are + * the two sides in reverse order (bottom-up), with llist linked by the right + * pointer and rlist linked by the left pointer in the vm_map_entry, and both + * lists terminated by &map->header. This function, and the subsequent call to + * vm_map_splay_merge_{left,right}, rely on the start and end address values in + * &map->header. */ static vm_map_entry_t vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length, @@ -1040,10 +1075,10 @@ KASSERT(llist->end <= root->start && root->end <= rlist->start, ("%s: root not within tree bounds", __func__)); if (addr < root->start) { - SPLAY_LEFT_STEP(root, y, rlist, + SPLAY_LEFT_STEP(root, y, llist, rlist, y->max_free >= length && addr < y->start); } else if (addr >= root->end) { - SPLAY_RIGHT_STEP(root, y, llist, + SPLAY_RIGHT_STEP(root, y, llist, rlist, y->max_free >= length && addr >= y->end); } else break; @@ -1056,25 +1091,34 @@ static void vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *iolist) { - vm_map_entry_t rlist, y; + vm_map_entry_t llist, rlist, y; + rlist = *iolist; + llist = root; root = root->right; - rlist = *iolist; - while (root != NULL) - SPLAY_LEFT_STEP(root, y, rlist, true); - *iolist = rlist; + if (root != rlist) { + do + SPLAY_LEFT_STEP(root, y, llist, rlist, true); + while (root != NULL); + *iolist = rlist; + } } static void vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *iolist) { - vm_map_entry_t llist, y; + vm_map_entry_t llist, rlist, y; + + llist = *iolist; + rlist = root; root = root->left; - llist = *iolist; - while (root != NULL) - SPLAY_RIGHT_STEP(root, y, llist, true); - *iolist = llist; + if (root != llist) { + do + SPLAY_RIGHT_STEP(root, y, llist, rlist, true); + while (root != NULL); + *iolist = llist; + } } static inline void @@ -1091,47 +1135,120 @@ * Walk back up the two spines, flip the pointers and set max_free. The * subtrees of the root go at the bottom of llist and rlist. */ -static void -vm_map_splay_merge(vm_map_t map, vm_map_entry_t root, - vm_map_entry_t llist, vm_map_entry_t rlist) +static vm_size_t +vm_map_splay_merge_left_walk(vm_map_t map, vm_map_entry_t root, + vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist) { - vm_map_entry_t prev; - vm_size_t max_free_left, max_free_right; + do { + /* + * The max_free values of the children of llist are in + * llist->max_free and max_free. Update with the + * max value. + */ + llist->max_free = max_free = + vm_size_max(llist->max_free, max_free); + vm_map_entry_swap(&llist->right, &tail); + vm_map_entry_swap(&tail, &llist); + } while (llist != &map->header); + root->left = tail; + return (max_free); +} - max_free_left = vm_map_entry_max_free_left(root, llist); +/* + * When llist is known to be the predecessor of root. + */ +static inline vm_size_t +vm_map_splay_merge_pred(vm_map_t map, vm_map_entry_t root, + vm_map_entry_t llist) +{ + vm_size_t max_free; + + max_free = root->start - llist->end; + if (llist != &map->header) + max_free = vm_map_splay_merge_left_walk(map, root, + root, max_free, llist); + else { + root->left = llist; + llist->right = root; + } + return (max_free); +} + +/* + * When llist may or may not be the predecessor of root. + */ +static inline vm_size_t +vm_map_splay_merge_left(vm_map_t map, vm_map_entry_t root, + vm_map_entry_t llist) +{ + vm_size_t max_free; + + max_free = vm_map_entry_max_free_left(root, llist); if (llist != &map->header) { - prev = root->left; - do { - /* - * The max_free values of the children of llist are in - * llist->max_free and max_free_left. Update with the - * max value. - */ - llist->max_free = max_free_left = - MAX(llist->max_free, max_free_left); - vm_map_entry_swap(&llist->right, &prev); - vm_map_entry_swap(&prev, &llist); - } while (llist != &map->header); - root->left = prev; + max_free = vm_map_splay_merge_left_walk(map, root, + root->left == llist ? root : root->left, + max_free, llist); + } else if (root->left == llist) + llist->right = root; + return (max_free); +} + +static vm_size_t +vm_map_splay_merge_right_walk(vm_map_t map, vm_map_entry_t root, + vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist) +{ + do { + /* + * The max_free values of the children of rlist are in + * rlist->max_free and max_free. Update with the + * max value. + */ + rlist->max_free = max_free = + vm_size_max(rlist->max_free, max_free); + vm_map_entry_swap(&rlist->left, &tail); + vm_map_entry_swap(&tail, &rlist); + } while (rlist != &map->header); + root->right = tail; + return (max_free); +} + +/* + * When rlist is known to be the succecessor of root. + */ +static inline vm_size_t +vm_map_splay_merge_succ(vm_map_t map, vm_map_entry_t root, + vm_map_entry_t rlist) +{ + vm_size_t max_free; + + max_free = rlist->start - root->end; + if (rlist != &map->header) { + max_free = vm_map_splay_merge_right_walk(map, root, + root, max_free, rlist); + } else { + root->right = rlist; + rlist->left = root; } - max_free_right = vm_map_entry_max_free_right(root, rlist); + return (max_free); +} + +/* + * When rlist may or may not be the succecessor of root. + */ +static inline vm_size_t +vm_map_splay_merge_right(vm_map_t map, vm_map_entry_t root, + vm_map_entry_t rlist) +{ + vm_size_t max_free; + + max_free = vm_map_entry_max_free_right(root, rlist); if (rlist != &map->header) { - prev = root->right; - do { - /* - * The max_free values of the children of rlist are in - * rlist->max_free and max_free_right. Update with the - * max value. - */ - rlist->max_free = max_free_right = - MAX(rlist->max_free, max_free_right); - vm_map_entry_swap(&rlist->left, &prev); - vm_map_entry_swap(&prev, &rlist); - } while (rlist != &map->header); - root->right = prev; - } - root->max_free = MAX(max_free_left, max_free_right); - map->root = root; + max_free = vm_map_splay_merge_right_walk(map, root, + root->right == rlist ? root : root->right, + max_free, rlist); + } else if (root->right == rlist) + rlist->left = root; + return (max_free); } /* @@ -1144,6 +1261,14 @@ * the pointers and compute max_free. The time bound is O(log n) * amortized. * + * The tree is threaded, which means that there are no null pointers. + * When a node has no left child, its left pointer points to its + * predecessor, which the last ancestor on the search path from the root + * where the search branched right. Likewise, when a node has no right + * child, its right pointer points to its successor. The map header node + * is the predecessor of the first map entry, and the successor of the + * last. + * * The new root is the vm_map_entry containing "addr", or else an * adjacent entry (lower if possible) if addr is not in the tree. * @@ -1155,10 +1280,12 @@ vm_map_splay(vm_map_t map, vm_offset_t addr) { vm_map_entry_t llist, rlist, root; + vm_size_t max_free_left, max_free_right; root = vm_map_splay_split(map, addr, 0, &llist, &rlist); if (root != NULL) { - /* do nothing */ + max_free_left = vm_map_splay_merge_left(map, root, llist); + max_free_right = vm_map_splay_merge_right(map, root, rlist); } else if (llist != &map->header) { /* * Recover the greatest node in the left @@ -1166,7 +1293,8 @@ */ root = llist; llist = root->right; - root->right = NULL; + max_free_left = vm_map_splay_merge_left(map, root, llist); + max_free_right = vm_map_splay_merge_succ(map, root, rlist); } else if (rlist != &map->header) { /* * Recover the least node in the right @@ -1174,12 +1302,14 @@ */ root = rlist; rlist = root->left; - root->left = NULL; + max_free_left = vm_map_splay_merge_pred(map, root, llist); + max_free_right = vm_map_splay_merge_right(map, root, rlist); } else { /* There is no root. */ return (NULL); } - vm_map_splay_merge(map, root, llist, rlist); + root->max_free = vm_size_max(max_free_left, max_free_right); + map->root = root; VM_MAP_ASSERT_CONSISTENT(map); return (root); } @@ -1202,11 +1332,11 @@ root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); KASSERT(root == NULL, ("vm_map_entry_link: link object already mapped")); - entry->prev = llist; - entry->next = rlist; - llist->next = rlist->prev = entry; - entry->left = entry->right = NULL; - vm_map_splay_merge(map, entry, llist, rlist); + root = entry; + root->max_free = vm_size_max( + vm_map_splay_merge_pred(map, root, llist), + vm_map_splay_merge_succ(map, root, rlist)); + map->root = root; VM_MAP_ASSERT_CONSISTENT(map); } @@ -1219,44 +1349,37 @@ vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, enum unlink_merge_type op) { - vm_map_entry_t llist, rlist, root, y; + vm_map_entry_t llist, rlist, root; + vm_size_t max_free_left, max_free_right; VM_MAP_ASSERT_LOCKED(map); root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); KASSERT(root != NULL, ("vm_map_entry_unlink: unlink object not mapped")); + vm_map_splay_findprev(root, &llist); vm_map_splay_findnext(root, &rlist); - switch (op) { - case UNLINK_MERGE_NEXT: - rlist->start = root->start; - rlist->offset = root->offset; - y = root->left; + if (op == UNLINK_MERGE_NEXT) { + rlist->start = root->start; + rlist->offset = root->offset; + } + if (llist != &map->header) { + root = llist; + llist = root->right; + max_free_left = vm_map_splay_merge_left(map, root, llist); + max_free_right = vm_map_splay_merge_succ(map, root, rlist); + } else if (rlist != &map->header) { root = rlist; rlist = root->left; - root->left = y; - break; - case UNLINK_MERGE_NONE: - vm_map_splay_findprev(root, &llist); - if (llist != &map->header) { - root = llist; - llist = root->right; - root->right = NULL; - } else if (rlist != &map->header) { - root = rlist; - rlist = root->left; - root->left = NULL; - } else - root = NULL; - break; + max_free_left = vm_map_splay_merge_pred(map, root, llist); + max_free_right = vm_map_splay_merge_right(map, root, rlist); + } else { + map->header.left = map->header.right = &map->header; + root = NULL; } - y = entry->next; - y->prev = entry->prev; - y->prev->next = y; if (root != NULL) - vm_map_splay_merge(map, root, llist, rlist); - else - map->root = NULL; + root->max_free = vm_size_max(max_free_left, max_free_right); + map->root = root; VM_MAP_ASSERT_CONSISTENT(map); map->nentries--; CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, @@ -1281,9 +1404,11 @@ KASSERT(root != NULL, ("%s: resize object not mapped", __func__)); vm_map_splay_findnext(root, &rlist); - root->right = NULL; entry->end += grow_amount; - vm_map_splay_merge(map, root, llist, rlist); + root->max_free = vm_size_max( + vm_map_splay_merge_left(map, root, llist), + vm_map_splay_merge_succ(map, root, rlist)); + map->root = root; VM_MAP_ASSERT_CONSISTENT(map); CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p", __func__, map, map->nentries, entry); @@ -1305,7 +1430,7 @@ vm_offset_t address, vm_map_entry_t *entry) /* OUT */ { - vm_map_entry_t cur, lbound; + vm_map_entry_t cur, lbound, ubound; boolean_t locked; /* @@ -1349,18 +1474,23 @@ * Since the map is only locked for read access, perform a * standard binary search tree lookup for "address". */ - lbound = &map->header; - do { + lbound = ubound = &map->header; + for (;;) { if (address < cur->start) { + ubound = cur; cur = cur->left; + if (cur == lbound) + break; } else if (cur->end <= address) { lbound = cur; cur = cur->right; + if (cur == ubound) + break; } else { *entry = cur; return (TRUE); } - } while (cur != NULL); + } *entry = lbound; return (FALSE); } @@ -1381,7 +1511,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) { - vm_map_entry_t new_entry, prev_entry; + vm_map_entry_t new_entry, prev_entry, next_entry; struct ucred *cred; vm_eflags_t protoeflags; vm_inherit_t inheritance; @@ -1412,7 +1542,8 @@ /* * Assert that the next entry doesn't overlap the end point. */ - if (prev_entry->next->start < end) + next_entry = vm_map_entry_succ(prev_entry); + if (next_entry->start < end) return (KERN_NO_SPACE); if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || @@ -1505,7 +1636,7 @@ map->size += end - prev_entry->end; vm_map_entry_resize(map, prev_entry, end - prev_entry->end); - vm_map_try_merge_entries(map, prev_entry, prev_entry->next); + vm_map_try_merge_entries(map, prev_entry, next_entry); return (KERN_SUCCESS); } @@ -1566,7 +1697,7 @@ * other cases, which are less common. */ vm_map_try_merge_entries(map, prev_entry, new_entry); - vm_map_try_merge_entries(map, new_entry, new_entry->next); + vm_map_try_merge_entries(map, new_entry, next_entry); if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), @@ -1597,7 +1728,7 @@ vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length) { vm_map_entry_t llist, rlist, root, y; - vm_size_t left_length; + vm_size_t left_length, max_free_left, max_free_right; vm_offset_t gap_end; /* @@ -1623,24 +1754,31 @@ gap_end = rlist->start; if (root != NULL) { start = root->end; - if (root->right != NULL) + if (root->right != rlist) gap_end = start; + max_free_left = vm_map_splay_merge_left(map, root, llist); + max_free_right = vm_map_splay_merge_right(map, root, rlist); } else if (rlist != &map->header) { root = rlist; rlist = root->left; - root->left = NULL; + max_free_left = vm_map_splay_merge_pred(map, root, llist); + max_free_right = vm_map_splay_merge_right(map, root, rlist); } else { root = llist; llist = root->right; - root->right = NULL; + root->right = rlist; + rlist->left = root; + max_free_left = vm_map_splay_merge_left(map, root, llist); + max_free_right = rlist->start - root->end; } - vm_map_splay_merge(map, root, llist, rlist); + root->max_free = vm_size_max(max_free_left, max_free_right); + map->root = root; VM_MAP_ASSERT_CONSISTENT(map); if (length <= gap_end - start) return (start); /* With max_free, can immediately tell if no solution. */ - if (root->right == NULL || length > root->right->max_free) + if (root->right == &map->header || length > root->right->max_free) return (vm_map_max(map) - length + 1); /* @@ -1650,28 +1788,33 @@ for (left_length = 0;; left_length = vm_map_entry_max_free_left(root, llist)) { if (length <= left_length) - SPLAY_LEFT_STEP(root, y, rlist, + SPLAY_LEFT_STEP(root, y, llist, rlist, length <= vm_map_entry_max_free_left(y, llist)); else - SPLAY_RIGHT_STEP(root, y, llist, + SPLAY_RIGHT_STEP(root, y, llist, rlist, length > vm_map_entry_max_free_left(y, root)); if (root == NULL) break; } root = llist; llist = root->right; - root->right = NULL; - if (rlist != &map->header) { + root->max_free = vm_map_splay_merge_left(map, root, llist); + if (rlist == &map->header) { + root->right = rlist; + root->max_free = vm_size_max(root->max_free, + rlist->start - root->end); + } else { y = rlist; rlist = y->left; - y->left = NULL; - vm_map_splay_merge(map, y, &map->header, rlist); - y->max_free = MAX( - vm_map_entry_max_free_left(y, root), - vm_map_entry_max_free_right(y, &map->header)); + y->left = root; + y->max_free = vm_size_max( + y->start - root->end, + vm_map_splay_merge_right(map, y, rlist)); root->right = y; + root->max_free = vm_size_max(root->max_free, + y->max_free); } - vm_map_splay_merge(map, root, llist, &map->header); + map->root = root; VM_MAP_ASSERT_CONSISTENT(map); return (root->end); } @@ -2264,7 +2407,7 @@ if (vm_map_lookup_entry(map, start, &entry)) { vm_map_clip_start(map, entry, start); } else - entry = entry->next; + entry = vm_map_entry_succ(entry); vm_map_clip_end(map, entry, end); @@ -2421,12 +2564,13 @@ VM_MAP_RANGE_CHECK(map, start, end); if (!vm_map_lookup_entry(map, start, &entry)) - entry = entry->next; + entry = vm_map_entry_succ(entry); /* * Make a first pass to check for protection violations. */ - for (current = entry; current->start < end; current = current->next) { + for (current = entry; current->start < end; + current = vm_map_entry_succ(current)) { if ((current->eflags & MAP_ENTRY_GUARD) != 0) continue; if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { @@ -2464,7 +2608,8 @@ */ rv = KERN_SUCCESS; vm_map_clip_start(map, entry, start); - for (current = entry; current->start < end; current = current->next) { + for (current = entry; current->start < end; + current = vm_map_entry_succ(current)) { vm_map_clip_end(map, current, end); @@ -2521,9 +2666,11 @@ * Otherwise, just simplify entries, since some may have been modified. * [Note that clipping is not necessary the second time.] */ - for (current = entry; current->start < end; - vm_map_try_merge_entries(map, current->prev, current), - current = current->next) { + for (current = entry, entry = vm_map_entry_pred(current); + current->start < end; + (vm_map_try_merge_entries(map, entry, current), + entry = current, + current = vm_map_entry_succ(current))) { if (rv != KERN_SUCCESS || (current->eflags & MAP_ENTRY_GUARD) != 0) continue; @@ -2561,7 +2708,7 @@ #undef MASK } } - vm_map_try_merge_entries(map, current->prev, current); + vm_map_try_merge_entries(map, entry, current); vm_map_unlock(map); return (rv); } @@ -2621,10 +2768,14 @@ VM_MAP_RANGE_CHECK(map, start, end); if (vm_map_lookup_entry(map, start, &entry)) { - if (modify_map) + if (modify_map) { vm_map_clip_start(map, entry, start); + current = entry; + entry = vm_map_entry_pred(current); + } else + current = entry; } else { - entry = entry->next; + current = vm_map_entry_succ(entry); } if (modify_map) { @@ -2634,8 +2785,8 @@ * We clip the vm_map_entry so that behavioral changes are * limited to the specified address range. */ - for (current = entry; current->start < end; - current = current->next) { + for (; current->start < end; + entry = current, current = vm_map_entry_succ(current)) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) continue; @@ -2666,9 +2817,9 @@ default: break; } - vm_map_try_merge_entries(map, current->prev, current); + vm_map_try_merge_entries(map, entry, current); } - vm_map_try_merge_entries(map, current->prev, current); + vm_map_try_merge_entries(map, entry, current); vm_map_unlock(map); } else { vm_pindex_t pstart, pend; @@ -2680,8 +2831,8 @@ * Since we don't clip the vm_map_entry, we have to clip * the vm_object pindex and count. */ - for (current = entry; current->start < end; - current = current->next) { + for (; current->start < end; + current = vm_map_entry_succ(current)) { vm_offset_t useEnd, useStart; if (current->eflags & MAP_ENTRY_IS_SUB_MAP) @@ -2787,17 +2938,19 @@ if (vm_map_lookup_entry(map, start, &temp_entry)) { entry = temp_entry; vm_map_clip_start(map, entry, start); + temp_entry = vm_map_entry_pred(entry); } else - entry = temp_entry->next; + entry = vm_map_entry_succ(temp_entry); while (entry->start < end) { vm_map_clip_end(map, entry, end); if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || new_inheritance != VM_INHERIT_ZERO) entry->inheritance = new_inheritance; - vm_map_try_merge_entries(map, entry->prev, entry); - entry = entry->next; + vm_map_try_merge_entries(map, temp_entry, entry); + temp_entry = entry; + entry = vm_map_entry_succ(entry); } - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, temp_entry, entry); vm_map_unlock(map); return (KERN_SUCCESS); } @@ -2846,7 +2999,7 @@ *io_end = start; return (NULL); } - entry = entry->next; + entry = vm_map_entry_succ(entry); } return (entry); } @@ -2860,7 +3013,7 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) { - vm_map_entry_t entry, first_entry; + vm_map_entry_t entry, first_entry, next_entry; int rv; bool first_iteration, holes_ok, need_wakeup, user_unwire; @@ -2872,7 +3025,7 @@ VM_MAP_RANGE_CHECK(map, start, end); if (!vm_map_lookup_entry(map, start, &first_entry)) { if (holes_ok) - first_entry = first_entry->next; + first_entry = vm_map_entry_succ(first_entry); else { vm_map_unlock(map); return (KERN_INVALID_ADDRESS); @@ -2911,12 +3064,13 @@ ("owned map entry %p", entry)); entry->eflags |= MAP_ENTRY_IN_TRANSITION; entry->wiring_thread = curthread; + next_entry = vm_map_entry_succ(entry); /* * Check the map for holes in the specified region. * If holes_ok, skip this check. */ if (!holes_ok && - (entry->end < end && entry->next->start > entry->end)) { + (entry->end < end && next_entry->start > entry->end)) { end = entry->end; rv = KERN_INVALID_ADDRESS; break; @@ -2930,15 +3084,19 @@ rv = KERN_INVALID_ARGUMENT; break; } - entry = entry->next; + entry = next_entry; } need_wakeup = false; if (first_entry == NULL && !vm_map_lookup_entry(map, start, &first_entry)) { KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); - first_entry = first_entry->next; + entry = vm_map_entry_succ(first_entry); + } else { + entry = first_entry; + first_entry = vm_map_entry_pred(entry); } - for (entry = first_entry; entry->start < end; entry = entry->next) { + for (; entry->start < end; + first_entry = entry, entry = vm_map_entry_succ(entry)) { /* * If holes_ok was specified, an empty * space in the unwired region could have been mapped @@ -2974,9 +3132,9 @@ entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; need_wakeup = true; } - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, first_entry, entry); } - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, first_entry, entry); vm_map_unlock(map); if (need_wakeup) vm_map_wakeup(map); @@ -3082,7 +3240,7 @@ VM_MAP_RANGE_CHECK(map, start, end); if (!vm_map_lookup_entry(map, start, &first_entry)) { if (holes_ok) - first_entry = first_entry->next; + first_entry = vm_map_entry_succ(first_entry); else return (KERN_INVALID_ADDRESS); } @@ -3186,7 +3344,7 @@ faddr < entry->end) vm_map_wire_entry_failure(map, entry, faddr); - entry = entry->next; + entry = vm_map_entry_succ(entry); } } if (rv != KERN_SUCCESS) { @@ -3204,13 +3362,14 @@ * Check the map for holes in the specified region. * If holes_ok was specified, skip this check. */ + tmp_entry = vm_map_entry_succ(entry); if (!holes_ok && - entry->end < end && entry->next->start > entry->end) { + entry->end < end && tmp_entry->start > entry->end) { end = entry->end; rv = KERN_INVALID_ADDRESS; goto done; } - entry = entry->next; + entry = tmp_entry; } rv = KERN_SUCCESS; done: @@ -3218,9 +3377,13 @@ if (first_entry == NULL && !vm_map_lookup_entry(map, start, &first_entry)) { KASSERT(holes_ok, ("vm_map_wire: lookup failed")); - first_entry = first_entry->next; + entry = vm_map_entry_succ(first_entry); + } else { + entry = first_entry; + first_entry = vm_map_entry_pred(entry); } - for (entry = first_entry; entry->start < end; entry = entry->next) { + for (; entry->start < end; + first_entry = entry, entry = vm_map_entry_succ(entry)) { /* * If holes_ok was specified, an empty * space in the unwired region could have been mapped @@ -3273,9 +3436,9 @@ entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; need_wakeup = true; } - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, first_entry, entry); } - vm_map_try_merge_entries(map, entry->prev, entry); + vm_map_try_merge_entries(map, first_entry, entry); if (need_wakeup) vm_map_wakeup(map); return (rv); @@ -3305,8 +3468,7 @@ boolean_t syncio, boolean_t invalidate) { - vm_map_entry_t current; - vm_map_entry_t entry; + vm_map_entry_t current, entry, next; vm_size_t size; vm_object_t object; vm_ooffset_t offset; @@ -3325,13 +3487,14 @@ /* * Make a first pass to check for user-wired memory and holes. */ - for (current = entry; current->start < end; current = current->next) { + for (current = entry; current->start < end; current = next) { if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { vm_map_unlock_read(map); return (KERN_INVALID_ARGUMENT); } + next = vm_map_entry_succ(current); if (end > current->end && - current->end != current->next->start) { + current->end != next->start) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); } @@ -3375,7 +3538,7 @@ vm_map_lock_read(map); if (last_timestamp == map->timestamp || !vm_map_lookup_entry(map, start, ¤t)) - current = current->next; + current = vm_map_entry_succ(current); } vm_map_unlock_read(map); @@ -3493,7 +3656,7 @@ if (map->system_map) vm_map_entry_deallocate(entry, TRUE); else { - entry->next = curthread->td_map_def_user; + entry->right = curthread->td_map_def_user; curthread->td_map_def_user = entry; } } @@ -3518,7 +3681,7 @@ * Find the start of the region, and clip it */ if (!vm_map_lookup_entry(map, start, &first_entry)) - entry = first_entry->next; + entry = vm_map_entry_succ(first_entry); else { entry = first_entry; vm_map_clip_start(map, entry, start); @@ -3556,7 +3719,7 @@ */ if (!vm_map_lookup_entry(map, saved_start, &tmp_entry)) - entry = tmp_entry->next; + entry = vm_map_entry_succ(tmp_entry); else { entry = tmp_entry; vm_map_clip_start(map, entry, @@ -3567,7 +3730,7 @@ } vm_map_clip_end(map, entry, end); - next = entry->next; + next = vm_map_entry_succ(entry); /* * Unwire before removing addresses from the pmap; otherwise, @@ -3656,7 +3819,7 @@ return (FALSE); /* go to next entry */ start = entry->end; - entry = entry->next; + entry = vm_map_entry_succ(entry); } return (TRUE); } @@ -3765,7 +3928,7 @@ fake_entry->object.vm_object = src_object; fake_entry->start = src_entry->start; fake_entry->end = src_entry->end; - fake_entry->next = curthread->td_map_def_user; + fake_entry->right = curthread->td_map_def_user; curthread->td_map_def_user = fake_entry; } @@ -3873,7 +4036,7 @@ new_map->anon_loc = old_map->anon_loc; - old_entry = old_map->header.next; + old_entry = old_map->header.right; while (old_entry != &old_map->header) { if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) @@ -4025,7 +4188,7 @@ break; } - old_entry = old_entry->next; + old_entry = vm_map_entry_succ(old_entry); } /* * Use inlined vm_map_unlock() to postpone handling the deferred @@ -4112,7 +4275,7 @@ /* * If we can't accommodate max_ssize in the current mapping, no go. */ - if (prev_entry->next->start < addrbos + max_ssize) + if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize) return (KERN_NO_SPACE); /* @@ -4139,7 +4302,7 @@ rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); if (rv != KERN_SUCCESS) return (rv); - new_entry = prev_entry->next; + new_entry = vm_map_entry_succ(prev_entry); KASSERT(new_entry->end == top || new_entry->start == bot, ("Bad entry start/end for new stack entry")); KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || @@ -4161,9 +4324,9 @@ * stack_guard_page for vm_map_growstack(). */ if (orient == MAP_STACK_GROWS_DOWN) - new_entry->prev->next_read = sgp; + vm_map_entry_pred(new_entry)->next_read = sgp; else - new_entry->next->next_read = sgp; + vm_map_entry_succ(new_entry)->next_read = sgp; } else { (void)vm_map_delete(map, bot, top); } @@ -4217,14 +4380,14 @@ if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) return (KERN_SUCCESS); if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { - stack_entry = gap_entry->next; + stack_entry = vm_map_entry_succ(gap_entry); if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || stack_entry->start != gap_entry->end) return (KERN_FAILURE); grow_amount = round_page(stack_entry->start - addr); grow_down = true; } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { - stack_entry = gap_entry->prev; + stack_entry = vm_map_entry_pred(gap_entry); if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || stack_entry->end != gap_entry->start) return (KERN_FAILURE); @@ -4789,6 +4952,7 @@ _vm_map_assert_consistent(vm_map_t map) { vm_map_entry_t entry, prev; + vm_map_entry_t cur, lbound, ubound; vm_size_t max_left, max_right; if (!enable_vmmap_check) @@ -4802,20 +4966,39 @@ KASSERT(entry->start < entry->end, ("map %p start = %jx, end = %jx", map, (uintmax_t)entry->start, (uintmax_t)entry->end)); - KASSERT(entry->end <= entry->next->start, - ("map %p end = %jx, next->start = %jx", map, - (uintmax_t)entry->end, (uintmax_t)entry->next->start)); - KASSERT(entry->left == NULL || + KASSERT(entry->left == &map->header || entry->left->start < entry->start, ("map %p left->start = %jx, start = %jx", map, (uintmax_t)entry->left->start, (uintmax_t)entry->start)); - KASSERT(entry->right == NULL || + KASSERT(entry->right == &map->header || entry->start < entry->right->start, ("map %p start = %jx, right->start = %jx", map, (uintmax_t)entry->start, (uintmax_t)entry->right->start)); - max_left = vm_map_entry_max_free_left(entry, entry->prev); - max_right = vm_map_entry_max_free_right(entry, entry->next); - KASSERT(entry->max_free == MAX(max_left, max_right), + cur = map->root; + lbound = ubound = &map->header; + for (;;) { + if (entry->start < cur->start) { + ubound = cur; + cur = cur->left; + KASSERT(cur != lbound, + ("map %p cannot find %jx", + map, entry->start)); + } else if (cur->end <= entry->start) { + lbound = cur; + cur = cur->right; + KASSERT(cur != ubound, + ("map %p cannot find %jx", + map, entry->start)); + } else { + KASSERT(cur == entry, + ("map %p cannot find %jx", + map, entry->start)); + break; + } + } + max_left = vm_map_entry_max_free_left(entry, lbound); + max_right = vm_map_entry_max_free_right(entry, ubound); + KASSERT(entry->max_free == vm_size_max(max_left, max_right), ("map %p max = %jx, max_left = %jx, max_right = %jx", map, (uintmax_t)entry->max_free, (uintmax_t)max_left, (uintmax_t)max_right)); Index: sys/vm/vm_mmap.c =================================================================== --- sys/vm/vm_mmap.c +++ sys/vm/vm_mmap.c @@ -581,7 +581,7 @@ pkm.pm_address = (uintptr_t) NULL; if (vm_map_lookup_entry(map, addr, &entry)) { for (; entry->start < addr + size; - entry = entry->next) { + entry = vm_map_entry_succ(entry)) { if (vm_map_check_protection(map, entry->start, entry->end, VM_PROT_EXECUTE) == TRUE) { pkm.pm_address = (uintptr_t) addr; @@ -822,12 +822,15 @@ * up the pages elsewhere. */ lastvecindex = -1; - for (current = entry; current->start < end; current = current->next) { + while (entry->start < end) { /* * check for contiguity */ - if (current->end < end && current->next->start > current->end) { + current = entry; + entry = vm_map_entry_succ(current); + if (current->end < end && + entry->start > current->end) { vm_map_unlock_read(map); return (ENOMEM); }