Index: vm_map.c =================================================================== --- vm_map.c +++ vm_map.c @@ -2059,30 +2059,76 @@ } /* - * vm_map_clip_start: [ internal use only ] + * vm_map_entry_back: * - * Asserts that the given entry begins at or after - * the specified address; if necessary, - * it splits the entry into two. + * Allocate an object to back a map entry. */ -#define vm_map_clip_start(map, entry, startaddr) \ -{ \ - if (startaddr > entry->start) \ - _vm_map_clip_start(map, entry, startaddr); \ +static inline void +vm_map_entry_back(vm_map_entry_t entry) +{ + vm_object_t object; + + KASSERT(entry->object.vm_object == NULL, + ("map entry %p has backing object", entry)); + KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, + ("map entry %p is a submap", entry)); + object = vm_object_allocate(OBJT_DEFAULT, + atop(entry->end - entry->start)); + entry->object.vm_object = object; + entry->offset = 0; + if (entry->cred != NULL) { + object->cred = entry->cred; + object->charge = entry->end - entry->start; + entry->cred = NULL; + } } /* - * This routine is called only when it is known that - * the entry must be split. + * vm_map_entry_charge_object + * + * If there is no object backing this entry, create one. Otherwise, if + * the entry has cred, give it to the backing object. */ +static inline void +vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry) +{ + + VM_MAP_ASSERT_LOCKED(map); + KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, + ("map entry %p is a submap", entry)); + if (entry->object.vm_object == NULL && !map->system_map && + (entry->eflags & MAP_ENTRY_GUARD) == 0) + vm_map_entry_back(entry); + else if (entry->object.vm_object != NULL && + ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && + entry->cred != NULL) { + VM_OBJECT_WLOCK(entry->object.vm_object); + KASSERT(entry->object.vm_object->cred == NULL, + ("OVERCOMMIT: %s: both cred e %p", __func__, entry)); + entry->object.vm_object->cred = entry->cred; + entry->object.vm_object->charge = entry->end - entry->start; + VM_OBJECT_WUNLOCK(entry->object.vm_object); + entry->cred = NULL; + } +} + +/* + * vm_map_clip_start: + * + * Asserts that the given entry begins at or after + * the specified address; if necessary, + * it splits the entry into two. + */ static void -_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) +vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) { vm_map_entry_t new_entry; VM_MAP_ASSERT_LOCKED(map); - KASSERT(entry->end > start && entry->start < start, - ("_vm_map_clip_start: invalid clip of entry %p", entry)); + KASSERT(entry->end > start, + ("%s: invalid clip of entry %p", __func__, entry)); + if (start <= entry->start) + return; /* * Split off the front portion -- note that we must insert the new @@ -2090,38 +2136,7 @@ * starting address. */ vm_map_simplify_entry(map, entry); - - /* - * If there is no object backing this entry, we might as well create - * one now. If we defer it, an object can get created after the map - * is clipped, and individual objects will be created for the split-up - * map. This is a bit of a hack, but is also about the best place to - * put this improvement. - */ - if (entry->object.vm_object == NULL && !map->system_map && - (entry->eflags & MAP_ENTRY_GUARD) == 0) { - vm_object_t object; - object = vm_object_allocate(OBJT_DEFAULT, - atop(entry->end - entry->start)); - entry->object.vm_object = object; - entry->offset = 0; - if (entry->cred != NULL) { - object->cred = entry->cred; - object->charge = entry->end - entry->start; - entry->cred = NULL; - } - } else if (entry->object.vm_object != NULL && - ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && - entry->cred != NULL) { - VM_OBJECT_WLOCK(entry->object.vm_object); - KASSERT(entry->object.vm_object->cred == NULL, - ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); - entry->object.vm_object->cred = entry->cred; - entry->object.vm_object->charge = entry->end - entry->start; - VM_OBJECT_WUNLOCK(entry->object.vm_object); - entry->cred = NULL; - } - + vm_map_entry_charge_object(map, entry); new_entry = vm_map_entry_create(map); *new_entry = *entry; @@ -2147,65 +2162,42 @@ } /* - * vm_map_clip_end: [ internal use only ] * - * Asserts that the given entry ends at or before - * the specified address; if necessary, - * it splits the entry into two. */ -#define vm_map_clip_end(map, entry, endaddr) \ -{ \ - if ((endaddr) < (entry->end)) \ - _vm_map_clip_end((map), (entry), (endaddr)); \ +static inline vm_map_entry_t +vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start, bool modify) +{ + vm_map_entry_t entry; + + if (!vm_map_lookup_entry(map, start, &entry)) + entry = entry->next; + else if (modify) + vm_map_clip_start(map, entry, start); + return (entry); } /* - * This routine is called only when it is known that - * the entry must be split. + * vm_map_clip_end: + * + * Asserts that the given entry ends at or before + * the specified address; if necessary, + * it splits the entry into two. */ static void -_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) +vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) { vm_map_entry_t new_entry; VM_MAP_ASSERT_LOCKED(map); - KASSERT(entry->start < end && entry->end > end, - ("_vm_map_clip_end: invalid clip of entry %p", entry)); + if (end >= entry->end) + return; + KASSERT(entry->start < end, + ("%s: invalid clip of entry %p", __func__, entry)); /* - * If there is no object backing this entry, we might as well create - * one now. If we defer it, an object can get created after the map - * is clipped, and individual objects will be created for the split-up - * map. This is a bit of a hack, but is also about the best place to - * put this improvement. - */ - if (entry->object.vm_object == NULL && !map->system_map && - (entry->eflags & MAP_ENTRY_GUARD) == 0) { - vm_object_t object; - object = vm_object_allocate(OBJT_DEFAULT, - atop(entry->end - entry->start)); - entry->object.vm_object = object; - entry->offset = 0; - if (entry->cred != NULL) { - object->cred = entry->cred; - object->charge = entry->end - entry->start; - entry->cred = NULL; - } - } else if (entry->object.vm_object != NULL && - ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && - entry->cred != NULL) { - VM_OBJECT_WLOCK(entry->object.vm_object); - KASSERT(entry->object.vm_object->cred == NULL, - ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); - entry->object.vm_object->cred = entry->cred; - entry->object.vm_object->charge = entry->end - entry->start; - VM_OBJECT_WUNLOCK(entry->object.vm_object); - entry->cred = NULL; - } - - /* * Create a new entry and insert it AFTER the specified entry */ + vm_map_entry_charge_object(map, entry); new_entry = vm_map_entry_create(map); *new_entry = *entry; @@ -2260,11 +2252,7 @@ VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &entry)) { - vm_map_clip_start(map, entry, start); - } else - entry = entry->next; - + entry = vm_map_lookup_clip_start(map, start, true); vm_map_clip_end(map, entry, end); if ((entry->start == start) && (entry->end == end) && @@ -2418,11 +2406,7 @@ VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &entry)) { - vm_map_clip_start(map, entry, start); - } else { - entry = entry->next; - } + entry = vm_map_lookup_clip_start(map, start, true); /* * Make a first pass to check for protection violations. @@ -2611,12 +2595,7 @@ */ VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &entry)) { - if (modify_map) - vm_map_clip_start(map, entry, start); - } else { - entry = entry->next; - } + entry = vm_map_lookup_clip_start(map, start, modify_map); if (modify_map) { /* @@ -2747,7 +2726,6 @@ vm_inherit_t new_inheritance) { vm_map_entry_t entry; - vm_map_entry_t temp_entry; switch (new_inheritance) { case VM_INHERIT_NONE: @@ -2762,11 +2740,7 @@ return (KERN_SUCCESS); vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &temp_entry)) { - entry = temp_entry; - vm_map_clip_start(map, entry, start); - } else - entry = temp_entry->next; + entry = vm_map_lookup_clip_start(map, start, true); while (entry->start < end) { vm_map_clip_end(map, entry, end); if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || @@ -2856,7 +2830,7 @@ last_timestamp = map->timestamp; continue; } - vm_map_clip_start(map, entry, start); + entry = vm_map_lookup_clip_start(map, start, true); vm_map_clip_end(map, entry, end); /* * Mark the entry in case the map lock is released. (See @@ -3092,7 +3066,7 @@ last_timestamp = map->timestamp; continue; } - vm_map_clip_start(map, entry, start); + entry = vm_map_lookup_clip_start(map, start, true); vm_map_clip_end(map, entry, end); /* * Mark the entry in case the map lock is released. (See @@ -3499,7 +3473,6 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) { vm_map_entry_t entry; - vm_map_entry_t first_entry; VM_MAP_ASSERT_LOCKED(map); if (start == end) @@ -3508,12 +3481,7 @@ /* * Find the start of the region, and clip it */ - if (!vm_map_lookup_entry(map, start, &first_entry)) - entry = first_entry->next; - else { - entry = first_entry; - vm_map_clip_start(map, entry, start); - } + entry = vm_map_lookup_clip_start(map, start, true); /* * Step through all entries in this region @@ -3531,7 +3499,6 @@ vm_map_entry_system_wired_count(entry) != 0)) { unsigned int last_timestamp; vm_offset_t saved_start; - vm_map_entry_t tmp_entry; saved_start = entry->start; entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; @@ -3545,14 +3512,8 @@ * Specifically, the entry may have been * clipped, merged, or deleted. */ - if (!vm_map_lookup_entry(map, saved_start, - &tmp_entry)) - entry = tmp_entry->next; - else { - entry = tmp_entry; - vm_map_clip_start(map, entry, - saved_start); - } + entry = vm_map_lookup_clip_start(map, + saved_start, true); } continue; } @@ -3885,16 +3846,8 @@ */ object = old_entry->object.vm_object; if (object == NULL) { - object = vm_object_allocate(OBJT_DEFAULT, - atop(old_entry->end - old_entry->start)); - old_entry->object.vm_object = object; - old_entry->offset = 0; - if (old_entry->cred != NULL) { - object->cred = old_entry->cred; - object->charge = old_entry->end - - old_entry->start; - old_entry->cred = NULL; - } + vm_map_entry_back(old_entry); + object = old_entry->object.vm_object; } /*