Index: vm_map.c
===================================================================
--- vm_map.c
+++ vm_map.c
@@ -2059,16 +2059,74 @@
 }
 
 /*
+ *	vm_map_entry_attach_backing_object:
+ *
+ *	Allocate an object to back a map entry.
+ */
+static inline void
+vm_map_entry_attach_backing_object(vm_map_entry_t entry)
+{
+	vm_object_t object;
+
+	KASSERT(entry->object.vm_object == NULL,
+	    ("map entry %p has backing object", entry));
+	KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
+	    ("map entry %p is a submap", entry));
+	object = vm_object_allocate(OBJT_DEFAULT,
+	    atop(entry->end - entry->start));
+	entry->object.vm_object = object;
+	entry->offset = 0;
+	if (entry->cred != NULL) {
+		object->cred = entry->cred;
+		object->charge = entry->end - entry->start;
+		entry->cred = NULL;
+	}
+}
+
+/*
+ *	vm_map_entry_find_backing_object:
+ *
+ *	If there is no object backing this entry, we might as well create one
+ *	now.  If we defer it, an object can get created after the map is
+ *	clipped, and individual objects will be created for the split-up map.
+ *	This is a bit of a hack, but is also about the best place to put this
+ *	improvement.
+ */
+static inline void
+vm_map_entry_find_backing_object(vm_map_t map, vm_map_entry_t entry)
+{
+
+	VM_MAP_ASSERT_LOCKED(map);
+	KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
+	    ("map entry %p is a submap", entry));
+	if (entry->object.vm_object == NULL && !map->system_map &&
+	    (entry->eflags & MAP_ENTRY_GUARD) == 0)
+		vm_map_entry_attach_backing_object(entry);
+	else if (entry->object.vm_object != NULL &&
+	    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
+	    entry->cred != NULL) {
+		VM_OBJECT_WLOCK(entry->object.vm_object);
+		KASSERT(entry->object.vm_object->cred == NULL,
+		    ("OVERCOMMIT: %s: both cred e %p", __func__, entry));
+		entry->object.vm_object->cred = entry->cred;
+		entry->object.vm_object->charge = entry->end - entry->start;
+		VM_OBJECT_WUNLOCK(entry->object.vm_object);
+		entry->cred = NULL;
+	}
+}
+
+/*
  *	vm_map_clip_start:	[ internal use only ]
  *
  *	Asserts that the given entry begins at or after
  *	the specified address; if necessary,
  *	it splits the entry into two.
  */
-#define vm_map_clip_start(map, entry, startaddr) \
-{ \
-	if (startaddr > entry->start) \
-		_vm_map_clip_start(map, entry, startaddr); \
+static inline void
+vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
+{
+	if (start > entry->start)
+		_vm_map_clip_start(map, entry, start);
 }
 
 /*
@@ -2090,38 +2148,7 @@
 	 * starting address.
 	 */
 	vm_map_simplify_entry(map, entry);
-
-	/*
-	 * If there is no object backing this entry, we might as well create
-	 * one now.  If we defer it, an object can get created after the map
-	 * is clipped, and individual objects will be created for the split-up
-	 * map.  This is a bit of a hack, but is also about the best place to
-	 * put this improvement.
-	 */
-	if (entry->object.vm_object == NULL && !map->system_map &&
-	    (entry->eflags & MAP_ENTRY_GUARD) == 0) {
-		vm_object_t object;
-		object = vm_object_allocate(OBJT_DEFAULT,
-				atop(entry->end - entry->start));
-		entry->object.vm_object = object;
-		entry->offset = 0;
-		if (entry->cred != NULL) {
-			object->cred = entry->cred;
-			object->charge = entry->end - entry->start;
-			entry->cred = NULL;
-		}
-	} else if (entry->object.vm_object != NULL &&
-		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
-		   entry->cred != NULL) {
-		VM_OBJECT_WLOCK(entry->object.vm_object);
-		KASSERT(entry->object.vm_object->cred == NULL,
-		    ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
-		entry->object.vm_object->cred = entry->cred;
-		entry->object.vm_object->charge = entry->end - entry->start;
-		VM_OBJECT_WUNLOCK(entry->object.vm_object);
-		entry->cred = NULL;
-	}
-
+	vm_map_entry_find_backing_object(map, entry);
 	new_entry = vm_map_entry_create(map);
 	*new_entry = *entry;
 
@@ -2147,16 +2174,33 @@
 }
 
 /*
+ *
+ */
+static inline vm_map_entry_t
+vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start, bool modify,
+    bool modify)
+{
+	vm_map_entry_t entry;
+
+	if (!vm_map_lookup_entry(map, start, &entry))
+		entry = entry->next;
+	else if (modify)
+		vm_map_clip_start(map, entry, start);
+	return (entry);
+}
+
+/*
  *	vm_map_clip_end:	[ internal use only ]
  *
  *	Asserts that the given entry ends at or before
  *	the specified address; if necessary,
  *	it splits the entry into two.
  */
-#define vm_map_clip_end(map, entry, endaddr) \
-{ \
-	if ((endaddr) < (entry->end)) \
-		_vm_map_clip_end((map), (entry), (endaddr)); \
+static inline void
+vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
+{
+	if (end < entry->end)
+		_vm_map_clip_end(map, entry, end);
 }
 
 /*
@@ -2173,39 +2217,9 @@
 	    ("_vm_map_clip_end: invalid clip of entry %p", entry));
 
 	/*
-	 * If there is no object backing this entry, we might as well create
-	 * one now.  If we defer it, an object can get created after the map
-	 * is clipped, and individual objects will be created for the split-up
-	 * map.  This is a bit of a hack, but is also about the best place to
-	 * put this improvement.
-	 */
-	if (entry->object.vm_object == NULL && !map->system_map &&
-	    (entry->eflags & MAP_ENTRY_GUARD) == 0) {
-		vm_object_t object;
-		object = vm_object_allocate(OBJT_DEFAULT,
-				atop(entry->end - entry->start));
-		entry->object.vm_object = object;
-		entry->offset = 0;
-		if (entry->cred != NULL) {
-			object->cred = entry->cred;
-			object->charge = entry->end - entry->start;
-			entry->cred = NULL;
-		}
-	} else if (entry->object.vm_object != NULL &&
-		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
-		   entry->cred != NULL) {
-		VM_OBJECT_WLOCK(entry->object.vm_object);
-		KASSERT(entry->object.vm_object->cred == NULL,
-		    ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
-		entry->object.vm_object->cred = entry->cred;
-		entry->object.vm_object->charge = entry->end - entry->start;
-		VM_OBJECT_WUNLOCK(entry->object.vm_object);
-		entry->cred = NULL;
-	}
-
-	/*
 	 * Create a new entry and insert it AFTER the specified entry
 	 */
+	vm_map_entry_find_backing_object(map, entry);
 	new_entry = vm_map_entry_create(map);
 	*new_entry = *entry;
 
@@ -2260,11 +2274,7 @@
 
 	VM_MAP_RANGE_CHECK(map, start, end);
 
-	if (vm_map_lookup_entry(map, start, &entry)) {
-		vm_map_clip_start(map, entry, start);
-	} else
-		entry = entry->next;
-
+	entry = vm_map_lookup_clip_start(map, start, true);
 	vm_map_clip_end(map, entry, end);
 
 	if ((entry->start == start) && (entry->end == end) &&
@@ -2418,11 +2428,7 @@
 
 	VM_MAP_RANGE_CHECK(map, start, end);
 
-	if (vm_map_lookup_entry(map, start, &entry)) {
-		vm_map_clip_start(map, entry, start);
-	} else {
-		entry = entry->next;
-	}
+	entry = vm_map_lookup_clip_start(map, start, true);
 
 	/*
 	 * Make a first pass to check for protection violations.
@@ -2611,12 +2617,7 @@
 	 */
 	VM_MAP_RANGE_CHECK(map, start, end);
 
-	if (vm_map_lookup_entry(map, start, &entry)) {
-		if (modify_map)
-			vm_map_clip_start(map, entry, start);
-	} else {
-		entry = entry->next;
-	}
+	entry = vm_map_lookup_clip_start(map, start, modify_map);
 
 	if (modify_map) {
 		/*
@@ -2747,7 +2748,6 @@
 	       vm_inherit_t new_inheritance)
 {
 	vm_map_entry_t entry;
-	vm_map_entry_t temp_entry;
 
 	switch (new_inheritance) {
 	case VM_INHERIT_NONE:
@@ -2762,11 +2762,7 @@
 		return (KERN_SUCCESS);
 	vm_map_lock(map);
 	VM_MAP_RANGE_CHECK(map, start, end);
-	if (vm_map_lookup_entry(map, start, &temp_entry)) {
-		entry = temp_entry;
-		vm_map_clip_start(map, entry, start);
-	} else
-		entry = temp_entry->next;
+	entry = vm_map_lookup_clip_start(map, start, true);
 	while (entry->start < end) {
 		vm_map_clip_end(map, entry, end);
 		if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
@@ -2856,7 +2852,7 @@
 			last_timestamp = map->timestamp;
 			continue;
 		}
-		vm_map_clip_start(map, entry, start);
+		entry = vm_map_lookup_clip_start(map, start, true);
 		vm_map_clip_end(map, entry, end);
 		/*
 		 * Mark the entry in case the map lock is released.  (See
@@ -3092,7 +3088,7 @@
 			last_timestamp = map->timestamp;
 			continue;
 		}
-		vm_map_clip_start(map, entry, start);
+		entry = vm_map_lookup_clip_start(map, start, true);
 		vm_map_clip_end(map, entry, end);
 		/*
 		 * Mark the entry in case the map lock is released.  (See
@@ -3499,7 +3495,6 @@
 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
 {
 	vm_map_entry_t entry;
-	vm_map_entry_t first_entry;
 
 	VM_MAP_ASSERT_LOCKED(map);
 	if (start == end)
@@ -3508,12 +3503,7 @@
 	/*
 	 * Find the start of the region, and clip it
 	 */
-	if (!vm_map_lookup_entry(map, start, &first_entry))
-		entry = first_entry->next;
-	else {
-		entry = first_entry;
-		vm_map_clip_start(map, entry, start);
-	}
+	entry = vm_map_lookup_clip_start(map, start, true);
 
 	/*
 	 * Step through all entries in this region
@@ -3531,7 +3521,6 @@
 		    vm_map_entry_system_wired_count(entry) != 0)) {
 			unsigned int last_timestamp;
 			vm_offset_t saved_start;
-			vm_map_entry_t tmp_entry;
 
 			saved_start = entry->start;
 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
@@ -3545,14 +3534,8 @@
 				 * Specifically, the entry may have been
 				 * clipped, merged, or deleted.
 				 */
-				if (!vm_map_lookup_entry(map, saved_start,
-							 &tmp_entry))
-					entry = tmp_entry->next;
-				else {
-					entry = tmp_entry;
-					vm_map_clip_start(map, entry,
-							  saved_start);
-				}
+				entry = vm_map_lookup_clip_start(map,
+				    saved_start, true);
 			}
 			continue;
 		}
@@ -3885,16 +3868,8 @@
 			 */
 			object = old_entry->object.vm_object;
 			if (object == NULL) {
-				object = vm_object_allocate(OBJT_DEFAULT,
-					atop(old_entry->end - old_entry->start));
-				old_entry->object.vm_object = object;
-				old_entry->offset = 0;
-				if (old_entry->cred != NULL) {
-					object->cred = old_entry->cred;
-					object->charge = old_entry->end -
-					    old_entry->start;
-					old_entry->cred = NULL;
-				}
+				vm_map_entry_attach_backing_object(old_entry);
+				object = old_entry->object.vm_object;
 			}
 
 			/*