Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -1034,32 +1034,51 @@ map->root = entry; } +enum unlink_merge_type { + UNLINK_MERGE_PREV, + UNLINK_MERGE_NONE, + UNLINK_MERGE_NEXT +}; + static void -vm_map_entry_unlink(vm_map_t map, - vm_map_entry_t entry) +vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, + enum unlink_merge_type op) { - vm_map_entry_t next, prev, root; + vm_map_entry_t next, prev; VM_MAP_ASSERT_LOCKED(map); if (entry != map->root) vm_map_entry_splay(entry->start, map->root); - if (entry->left == NULL) - root = entry->right; - else { - root = vm_map_entry_splay(entry->start, entry->left); - root->right = entry->right; - root->adj_free = entry->next->start - root->end; - vm_map_entry_set_max_free(root); - } - map->root = root; - prev = entry->prev; next = entry->next; next->prev = prev; prev->next = next; + if (prev == &map->header) + map->root = entry->right; + else { + map->root = vm_map_entry_splay(prev->start, entry->left); + prev->right = entry->right; + switch (op) { + case UNLINK_MERGE_PREV: + prev->end = entry->end; + prev->adj_free = entry->adj_free; + prev->max_free = entry->max_free; + break; + case UNLINK_MERGE_NONE: + prev->adj_free = next->start - prev->end; + prev->max_free = MAX(entry->max_free, prev->adj_free); + break; + case UNLINK_MERGE_NEXT: + next->start = entry->start; + next->offset = entry->offset; + prev->max_free = entry->max_free; + break; + } + } map->nentries--; - CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, - map->nentries, entry); + CTR4(KTR_VM, "vm_map_entry_unlink: " + "map %p, nentries %d, entry %p, merge %d", + map, map->nentries, entry, op); } /* @@ -1647,13 +1666,11 @@ static bool vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) { - vm_size_t prevsize; - prevsize = prev->end - prev->start; return (prev->end == entry->start && prev->object.vm_object == entry->object.vm_object && (prev->object.vm_object == NULL || - prev->offset + prevsize == entry->offset) && + prev->offset + (prev->end - prev->start) == entry->offset) && prev->eflags == entry->eflags && prev->protection == entry->protection && prev->max_protection == entry->max_protection && @@ -1663,22 +1680,21 @@ } static void -vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) +vm_map_entry_merge(vm_map_t map, vm_map_entry_t entry, + enum unlink_merge_type op) { + vm_map_entry_unlink(map, entry, op); + /* - * If the backing object is a vnode object, - * vm_object_deallocate() calls vrele(). - * However, vrele() does not lock the vnode - * because the vnode has additional - * references. Thus, the map lock can be kept - * without causing a lock-order reversal with - * the vnode lock. + * If the backing object is a vnode object, vm_object_deallocate() + * calls vrele(). However, vrele() does not lock the vnode because + * the vnode has additional references. Thus, the map lock can be + * kept without causing a lock-order reversal with the vnode lock. * - * Since we count the number of virtual page - * mappings in object->un_pager.vnp.writemappings, - * the writemappings value should not be adjusted - * when the entry is disposed of. + * Since we count the number of virtual page mappings in + * object->un_pager.vnp.writemappings, the writemappings value + * should not be adjusted when the entry is disposed of. */ if (entry->object.vm_object != NULL) vm_object_deallocate(entry->object.vm_object); @@ -1702,29 +1718,16 @@ void vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) { - vm_map_entry_t next, prev; if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0) return; - prev = entry->prev; - if (vm_map_mergeable_neighbors(prev, entry)) { - vm_map_entry_unlink(map, prev); - entry->start = prev->start; - entry->offset = prev->offset; - if (entry->prev != &map->header) - vm_map_entry_resize_free(map, entry->prev); - vm_map_merged_neighbor_dispose(map, prev); - } + if (vm_map_mergeable_neighbors(entry->prev, entry)) + vm_map_entry_merge(map, entry->prev, UNLINK_MERGE_NEXT); - next = entry->next; - if (vm_map_mergeable_neighbors(entry, next)) { - vm_map_entry_unlink(map, next); - entry->end = next->end; - vm_map_entry_resize_free(map, entry); - vm_map_merged_neighbor_dispose(map, next); - } + if (vm_map_mergeable_neighbors(entry, entry->next)) + vm_map_entry_merge(map, entry->next, UNLINK_MERGE_PREV); } /* * vm_map_clip_start: [ internal use only ] @@ -3006,7 +3009,7 @@ vm_pindex_t offidxstart, offidxend, count, size1; vm_size_t size; - vm_map_entry_unlink(map, entry); + vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); object = entry->object.vm_object; if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { @@ -3916,12 +3919,13 @@ (vm_size_t)(stack_entry->end - stack_entry->start), (vm_size_t)grow_amount, cred != NULL)) { if (gap_entry->start + grow_amount == gap_entry->end) - vm_map_entry_delete(map, gap_entry); - else + vm_map_entry_merge(map, gap_entry, + UNLINK_MERGE_PREV); + else { gap_entry->start += grow_amount; - stack_entry->end += grow_amount; + stack_entry->end += grow_amount; + } map->size += grow_amount; - vm_map_entry_resize_free(map, stack_entry); rv = KERN_SUCCESS; } else rv = KERN_FAILURE;