Index: vm_map.c
===================================================================
--- vm_map.c
+++ vm_map.c
@@ -2453,6 +2453,7 @@
 	vm_map_entry_t current, entry, in_tran;
 	vm_object_t obj;
 	struct ucred *cred;
+	vm_ooffset_t to_reserve;
 	vm_prot_t old_prot;
 
 	if (start == end)
@@ -2460,6 +2461,7 @@
 
 again:
 	in_tran = NULL;
+	to_reserve = 0;
 	vm_map_lock(map);
 
 	/*
@@ -2472,14 +2474,12 @@
 
 	VM_MAP_RANGE_CHECK(map, start, end);
 
-	if (vm_map_lookup_entry(map, start, &entry)) {
-		vm_map_clip_start(map, entry, start);
-	} else {
+	if (!vm_map_lookup_entry(map, start, &entry))
 		entry = entry->next;
-	}
 
 	/*
-	 * Make a first pass to check for protection violations.
+	 * Make a first pass to check for protection violations and calculate
+	 * needed swap space.
 	 */
 	for (current = entry; current->start < end; current = current->next) {
 		if ((current->eflags & MAP_ENTRY_GUARD) != 0)
@@ -2492,8 +2492,27 @@
 			vm_map_unlock(map);
 			return (KERN_PROTECTION_FAILURE);
 		}
-		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
+		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) {
 			in_tran = entry;
+			continue;
+		}
+		if (set_max || ENTRY_CHARGED(current) || in_tran == NULL ||
+		    ((new_prot & ~current->protection) & VM_PROT_WRITE) == 0) {
+			continue;
+		}
+		obj = current->object.vm_object;
+		if (obj == NULL &&
+		    (current->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
+		    !map->system_map)
+			to_reserve += current->end - current->start;
+		else if (obj == NULL &&
+		    ((current->eflags & MAP_ENTRY_NEEDS_COPY) != 0))
+			to_reserve += ulmin(end, current->end) -
+			    ulmax(start, current->start);
+		else if (obj != NULL &&
+		    (obj->type == OBJT_DEFAULT ||
+		    obj->type == OBJT_SWAP))
+			to_reserve += ptoa(obj->size);
 	}
 
 	/*
@@ -2509,72 +2528,60 @@
 		vm_map_unlock_and_wait(map, 0);
 		goto again;
 	}
+	if (!swap_reserve(to_reserve)) {
+		vm_map_unlock(map);
+		return (KERN_RESOURCE_SHORTAGE);
+	}
 
 	/*
-	 * Do an accounting pass for private read-only mappings that
+	 * Look for private read-only mappings that
 	 * now will do cow due to allowed write (e.g. debugger sets
-	 * breakpoint on text segment)
+	 * breakpoint on text segment).  Fix up protections.
 	 */
+	if (entry->start < start)
+		vm_map_clip_start(map, entry, start);
 	for (current = entry; current->start < end; current = current->next) {
-
 		vm_map_clip_end(map, current, end);
-
-		if (set_max ||
-		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
-		    ENTRY_CHARGED(current) ||
-		    (current->eflags & MAP_ENTRY_GUARD) != 0) {
+		if ((current->eflags & MAP_ENTRY_GUARD) != 0)
 			continue;
-		}
+		old_prot = current->protection;
+		do {
+			if (set_max ||
+			    ((new_prot & ~old_prot) & VM_PROT_WRITE) == 0 ||
+			    ENTRY_CHARGED(current)) 
+				break;
+			cred = curthread->td_ucred;
+			obj = current->object.vm_object;
 
-		cred = curthread->td_ucred;
-		obj = current->object.vm_object;
+			if (obj == NULL ||
+			    (current->eflags & MAP_ENTRY_NEEDS_COPY) != 0) {
+				to_reserve -= current->end - current->start;
+				crhold(cred);
+				current->cred = cred;
+				break;
+			}
+			VM_OBJECT_WLOCK(obj);
+			if (obj->type != OBJT_DEFAULT &&
+			    obj->type != OBJT_SWAP) {
+				VM_OBJECT_WUNLOCK(obj);
+				break;
+			}
 
-		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
-			if (!swap_reserve(current->end - current->start)) {
-				vm_map_unlock(map);
-				return (KERN_RESOURCE_SHORTAGE);
-			}
+			/*
+			 * Charge for the whole object allocation now, since
+			 * we cannot distinguish between non-charged and
+			 * charged clipped mapping of the same object later.
+			 */
+			KASSERT(obj->charge == 0,
+			    ("vm_map_protect: object %p overcharged (entry %p)",
+			    obj, current));
+			to_reserve -= ptoa(obj->size);
 			crhold(cred);
-			current->cred = cred;
-			continue;
-		}
-
-		VM_OBJECT_WLOCK(obj);
-		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
+			obj->cred = cred;
+			obj->charge = ptoa(obj->size);
 			VM_OBJECT_WUNLOCK(obj);
-			continue;
-		}
+		} while (false);
 
-		/*
-		 * Charge for the whole object allocation now, since
-		 * we cannot distinguish between non-charged and
-		 * charged clipped mapping of the same object later.
-		 */
-		KASSERT(obj->charge == 0,
-		    ("vm_map_protect: object %p overcharged (entry %p)",
-		    obj, current));
-		if (!swap_reserve(ptoa(obj->size))) {
-			VM_OBJECT_WUNLOCK(obj);
-			vm_map_unlock(map);
-			return (KERN_RESOURCE_SHORTAGE);
-		}
-
-		crhold(cred);
-		obj->cred = cred;
-		obj->charge = ptoa(obj->size);
-		VM_OBJECT_WUNLOCK(obj);
-	}
-
-	/*
-	 * Go back and fix up protections. [Note that clipping is not
-	 * necessary the second time.]
-	 */
-	for (current = entry; current->start < end; current = current->next) {
-		if ((current->eflags & MAP_ENTRY_GUARD) != 0)
-			continue;
-
-		old_prot = current->protection;
-
 		if (set_max)
 			current->protection =
 			    (current->max_protection = new_prot) &
@@ -2607,6 +2614,8 @@
 		}
 		vm_map_simplify_entry(map, current);
 	}
+	KASSERT(to_reserve == 0,
+		("vm_map_protect: wrong amount reserved"));
 	vm_map_unlock(map);
 	return (KERN_SUCCESS);
 }