Index: sys/vm/vm_map.h =================================================================== --- sys/vm/vm_map.h +++ sys/vm/vm_map.h @@ -173,15 +173,19 @@ * A map is a set of map entries. These map entries are * organized both as a binary search tree and as a doubly-linked * list. Both structures are ordered based upon the start and - * end addresses contained within each map entry. Sleator and - * Tarjan's top-down splay algorithm is employed to control - * height imbalance in the binary search tree. + * end addresses contained within each map entry. The list + * header has max start value and min end value to act as + * sentinels for sequential search of the doubly-linked list. + * Sleator and Tarjan's top-down splay algorithm is employed to + * control height imbalance in the binary search tree. * * List of locks * (c) const until freed */ struct vm_map { struct vm_map_entry header; /* List of entries */ +#define min_offset header.end /* (c) */ +#define max_offset header.start /* (c) */ struct sx lock; /* Lock for map data */ struct mtx system_mtx; int nentries; /* Number of entries */ @@ -192,8 +196,6 @@ vm_flags_t flags; /* flags for this vm_map */ vm_map_entry_t root; /* Root of a binary search tree */ pmap_t pmap; /* (c) Physical map */ -#define min_offset header.start /* (c) */ -#define max_offset header.end /* (c) */ int busy; }; Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -1003,12 +1003,10 @@ "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, map->nentries, entry, after_where); VM_MAP_ASSERT_LOCKED(map); - KASSERT(after_where == &map->header || - after_where->end <= entry->start, + KASSERT(after_where->end <= entry->start, ("vm_map_entry_link: prev end %jx new start %jx overlap", (uintmax_t)after_where->end, (uintmax_t)entry->start)); - KASSERT(after_where->next == &map->header || - entry->end <= after_where->next->start, + KASSERT(entry->end <= after_where->next->start, ("vm_map_entry_link: new end %jx next start %jx overlap", (uintmax_t)entry->end, (uintmax_t)after_where->next->start)); @@ -1030,8 +1028,7 @@ entry->right = map->root; entry->left = NULL; } - entry->adj_free = (entry->next == &map->header ? map->max_offset : - entry->next->start) - entry->end; + entry->adj_free = entry->next->start - entry->end; vm_map_entry_set_max_free(entry); map->root = entry; } @@ -1050,8 +1047,7 @@ else { root = vm_map_entry_splay(entry->start, entry->left); root->right = entry->right; - root->adj_free = (entry->next == &map->header ? map->max_offset : - entry->next->start) - root->end; + root->adj_free = entry->next->start - root->end; vm_map_entry_set_max_free(root); } map->root = root; @@ -1087,8 +1083,7 @@ if (entry != map->root) map->root = vm_map_entry_splay(entry->start, map->root); - entry->adj_free = (entry->next == &map->header ? map->max_offset : - entry->next->start) - entry->end; + entry->adj_free = entry->next->start - entry->end; vm_map_entry_set_max_free(entry); } @@ -1218,7 +1213,7 @@ /* * Assert that the next entry doesn't overlap the end point. */ - if (prev_entry->next != &map->header && prev_entry->next->start < end) + if (prev_entry->next->start < end) return (KERN_NO_SPACE); if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || @@ -2090,8 +2085,7 @@ /* * Make a first pass to check for protection violations. */ - for (current = entry; current != &map->header && current->start < end; - current = current->next) { + for (current = entry; current->start < end; current = current->next) { if ((current->eflags & MAP_ENTRY_GUARD) != 0) continue; if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { @@ -2109,8 +2103,7 @@ * now will do cow due to allowed write (e.g. debugger sets * breakpoint on text segment) */ - for (current = entry; current != &map->header && current->start < end; - current = current->next) { + for (current = entry; current->start < end; current = current->next) { vm_map_clip_end(map, current, end); @@ -2164,8 +2157,7 @@ * Go back and fix up protections. [Note that clipping is not * necessary the second time.] */ - for (current = entry; current != &map->header && current->start < end; - current = current->next) { + for (current = entry; current->start < end; current = current->next) { if ((current->eflags & MAP_ENTRY_GUARD) != 0) continue; @@ -2274,10 +2266,8 @@ * We clip the vm_map_entry so that behavioral changes are * limited to the specified address range. */ - for (current = entry; - (current != &map->header) && (current->start < end); - current = current->next - ) { + for (current = entry; current->start < end; + current = current->next) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) continue; @@ -2321,10 +2311,8 @@ * Since we don't clip the vm_map_entry, we have to clip * the vm_object pindex and count. */ - for (current = entry; - (current != &map->header) && (current->start < end); - current = current->next - ) { + for (current = entry; current->start < end; + current = current->next) { vm_offset_t useEnd, useStart; if (current->eflags & MAP_ENTRY_IS_SUB_MAP) @@ -2420,7 +2408,7 @@ vm_map_clip_start(map, entry, start); } else entry = temp_entry->next; - while ((entry != &map->header) && (entry->start < end)) { + while (entry->start < end) { vm_map_clip_end(map, entry, end); if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || new_inheritance != VM_INHERIT_ZERO) @@ -2462,7 +2450,7 @@ } last_timestamp = map->timestamp; entry = first_entry; - while (entry != &map->header && entry->start < end) { + while (entry->start < end) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { /* * We have not yet clipped the entry. @@ -2525,8 +2513,7 @@ * If VM_MAP_WIRE_HOLESOK was specified, skip this check. */ if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && - (entry->end < end && (entry->next == &map->header || - entry->next->start > entry->end))) { + (entry->end < end && entry->next->start > entry->end)) { end = entry->end; rv = KERN_INVALID_ADDRESS; goto done; @@ -2552,8 +2539,7 @@ else KASSERT(result, ("vm_map_unwire: lookup failed")); } - for (entry = first_entry; entry != &map->header && entry->start < end; - entry = entry->next) { + for (entry = first_entry; entry->start < end; entry = entry->next) { /* * If VM_MAP_WIRE_HOLESOK was specified, an empty * space in the unwired region could have been mapped @@ -2667,7 +2653,7 @@ } last_timestamp = map->timestamp; entry = first_entry; - while (entry != &map->header && entry->start < end) { + while (entry->start < end) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { /* * We have not yet clipped the entry. @@ -2804,8 +2790,7 @@ */ next_entry: if ((flags & VM_MAP_WIRE_HOLESOK) == 0 && - entry->end < end && (entry->next == &map->header || - entry->next->start > entry->end)) { + entry->end < end && entry->next->start > entry->end) { end = entry->end; rv = KERN_INVALID_ADDRESS; goto done; @@ -2822,8 +2807,7 @@ else KASSERT(result, ("vm_map_wire: lookup failed")); } - for (entry = first_entry; entry != &map->header && entry->start < end; - entry = entry->next) { + for (entry = first_entry; entry->start < end; entry = entry->next) { /* * If VM_MAP_WIRE_HOLESOK was specified, an empty * space in the unwired region could have been mapped @@ -2927,15 +2911,13 @@ /* * Make a first pass to check for user-wired memory and holes. */ - for (current = entry; current != &map->header && current->start < end; - current = current->next) { + for (current = entry; current->start < end; current = current->next) { if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { vm_map_unlock_read(map); return (KERN_INVALID_ARGUMENT); } if (end > current->end && - (current->next == &map->header || - current->end != current->next->start)) { + current->end != current->next->start) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); } @@ -2949,7 +2931,7 @@ * Make a second pass, cleaning/uncaching pages from the indicated * objects as we go. */ - for (current = entry; current != &map->header && current->start < end;) { + for (current = entry; current->start < end;) { offset = current->offset + (start - current->start); size = (end <= current->end ? end : current->end) - start; if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { @@ -3126,7 +3108,7 @@ /* * Step through all entries in this region */ - while ((entry != &map->header) && (entry->start < end)) { + while (entry->start < end) { vm_map_entry_t next; /* @@ -3234,8 +3216,6 @@ entry = tmp_entry; while (start < end) { - if (entry == &map->header) - return (FALSE); /* * No holes allowed! */ @@ -3699,8 +3679,7 @@ /* * If we can't accommodate max_ssize in the current mapping, no go. */ - if ((prev_entry->next != &map->header) && - (prev_entry->next->start < addrbos + max_ssize)) + if (prev_entry->next->start < addrbos + max_ssize) return (KERN_NO_SPACE); /* Index: sys/vm/vm_mmap.c =================================================================== --- sys/vm/vm_mmap.c +++ sys/vm/vm_mmap.c @@ -543,8 +543,7 @@ */ pkm.pm_address = (uintptr_t) NULL; if (vm_map_lookup_entry(map, addr, &entry)) { - for (; - entry != &map->header && entry->start < addr + size; + for (; entry->start < addr + size; entry = entry->next) { if (vm_map_check_protection(map, entry->start, entry->end, VM_PROT_EXECUTE) == TRUE) { @@ -770,16 +769,12 @@ * up the pages elsewhere. */ lastvecindex = -1; - for (current = entry; - (current != &map->header) && (current->start < end); - current = current->next) { + for (current = entry; current->start < end; current = current->next) { /* * check for contiguity */ - if (current->end < end && - (entry->next == &map->header || - current->next->start > current->end)) { + if (current->end < end && current->next->start > current->end) { vm_map_unlock_read(map); return (ENOMEM); }