Page MenuHomeFreeBSD

D22348.id64556.diff
No OneTemporary

D22348.id64556.diff

Index: sys/security/mac/mac_process.c
===================================================================
--- sys/security/mac/mac_process.c
+++ sys/security/mac/mac_process.c
@@ -252,7 +252,7 @@
mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
struct vm_map *map)
{
- vm_map_entry_t vme;
+ vm_map_entry_t prev, vme;
int result;
vm_prot_t revokeperms;
vm_object_t backing_object, object;
@@ -263,8 +263,10 @@
if (!mac_mmap_revocation)
return;
+ prev = &map->header;
vm_map_lock(map);
- VM_MAP_ENTRY_FOREACH(vme, map) {
+ for (vme = vm_map_entry_first(map); vme != &map->header;
+ prev = vme, vme = vm_map_entry_succ(prev)) {
if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) {
mac_proc_vm_revoke_recurse(td, cred,
vme->object.sub_map);
@@ -363,8 +365,7 @@
}
pmap_protect(map->pmap, vme->start, vme->end,
vme->protection & ~revokeperms);
- vm_map_try_merge_entries(map, vm_map_entry_pred(vme),
- vme);
+ vm_map_try_merge_entries(map, prev, vme);
}
}
vm_map_unlock(map);
Index: sys/vm/vm_map.h
===================================================================
--- sys/vm/vm_map.h
+++ sys/vm/vm_map.h
@@ -421,21 +421,21 @@
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
static inline vm_map_entry_t
-vm_map_entry_succ(vm_map_entry_t entry)
+vm_map_entry_first(vm_map_t map)
{
- return (entry->next);
+ return (map->header.next);
}
static inline vm_map_entry_t
-vm_map_entry_pred(vm_map_entry_t entry)
+vm_map_entry_succ(vm_map_entry_t entry)
{
- return (entry->prev);
+ return (entry->next);
}
-#define VM_MAP_ENTRY_FOREACH(it, map) \
- for ((it) = (map)->header.next; \
+#define VM_MAP_ENTRY_FOREACH(it, map) \
+ for ((it) = vm_map_entry_first(map); \
(it) != &(map)->header; \
(it) = vm_map_entry_succ(it))
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
Index: sys/vm/vm_map.c
===================================================================
--- sys/vm/vm_map.c
+++ sys/vm/vm_map.c
@@ -977,6 +977,22 @@
root->right->max_free : right_ancestor->start - root->end);
}
+/*
+ * vm_map_entry_{pred,succ}:
+ *
+ * Find the {predecessor, successor} of the entry by taking one step
+ * in the appropriate direction and backtracking as much as necessary.
+ */
+
+/* vm_map_entry_succ is defined in vm_map.h. */
+
+static inline vm_map_entry_t
+vm_map_entry_pred(vm_map_entry_t entry)
+{
+
+ return (entry->prev);
+}
+
#define SPLAY_LEFT_STEP(root, y, rlist, test) do { \
vm_size_t max_free; \
\
@@ -1411,7 +1427,7 @@
vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
{
- vm_map_entry_t new_entry, prev_entry;
+ vm_map_entry_t new_entry, next_entry, prev_entry;
struct ucred *cred;
vm_eflags_t protoeflags;
vm_inherit_t inheritance;
@@ -1442,7 +1458,8 @@
/*
* Assert that the next entry doesn't overlap the end point.
*/
- if (vm_map_entry_succ(prev_entry)->start < end)
+ next_entry = vm_map_entry_succ(prev_entry);
+ if (next_entry->start < end)
return (KERN_NO_SPACE);
if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
@@ -1535,8 +1552,7 @@
map->size += end - prev_entry->end;
vm_map_entry_resize(map, prev_entry,
end - prev_entry->end);
- vm_map_try_merge_entries(map, prev_entry,
- vm_map_entry_succ(prev_entry));
+ vm_map_try_merge_entries(map, prev_entry, next_entry);
return (KERN_SUCCESS);
}
@@ -1597,7 +1613,7 @@
* other cases, which are less common.
*/
vm_map_try_merge_entries(map, prev_entry, new_entry);
- vm_map_try_merge_entries(map, new_entry, vm_map_entry_succ(new_entry));
+ vm_map_try_merge_entries(map, new_entry, next_entry);
if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
@@ -2075,14 +2091,15 @@
* The map must be locked.
*/
void
-vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, vm_map_entry_t entry)
+vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry,
+ vm_map_entry_t entry)
{
VM_MAP_ASSERT_LOCKED(map);
if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 &&
- vm_map_mergeable_neighbors(prev, entry)) {
- vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT);
- vm_map_merged_neighbor_dispose(map, prev);
+ vm_map_mergeable_neighbors(prev_entry, entry)) {
+ vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT);
+ vm_map_merged_neighbor_dispose(map, prev_entry);
}
}
@@ -2428,7 +2445,7 @@
vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_prot_t new_prot, boolean_t set_max)
{
- vm_map_entry_t current, entry, in_tran;
+ vm_map_entry_t entry, first_entry, in_tran, prev_entry;
vm_object_t obj;
struct ucred *cred;
vm_prot_t old_prot;
@@ -2451,26 +2468,26 @@
VM_MAP_RANGE_CHECK(map, start, end);
- if (!vm_map_lookup_entry(map, start, &entry))
- entry = vm_map_entry_succ(entry);
+ if (!vm_map_lookup_entry(map, start, &first_entry))
+ first_entry = vm_map_entry_succ(first_entry);
/*
* Make a first pass to check for protection violations.
*/
- for (current = entry; current->start < end;
- current = vm_map_entry_succ(current)) {
- if ((current->eflags & MAP_ENTRY_GUARD) != 0)
+ for (entry = first_entry; entry->start < end;
+ entry = vm_map_entry_succ(entry)) {
+ if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
continue;
- if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
+ if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
vm_map_unlock(map);
return (KERN_INVALID_ARGUMENT);
}
- if ((new_prot & current->max_protection) != new_prot) {
+ if ((new_prot & entry->max_protection) != new_prot) {
vm_map_unlock(map);
return (KERN_PROTECTION_FAILURE);
}
- if ((current->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
- in_tran = current;
+ if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
+ in_tran = entry;
}
/*
@@ -2494,30 +2511,30 @@
* some may now be mergeable.
*/
rv = KERN_SUCCESS;
- vm_map_clip_start(map, entry, start);
- for (current = entry; current->start < end;
- current = vm_map_entry_succ(current)) {
+ vm_map_clip_start(map, first_entry, start);
+ for (entry = first_entry; entry->start < end;
+ entry = vm_map_entry_succ(entry)) {
- vm_map_clip_end(map, current, end);
+ vm_map_clip_end(map, entry, end);
if (set_max ||
- ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
- ENTRY_CHARGED(current) ||
- (current->eflags & MAP_ENTRY_GUARD) != 0) {
+ ((new_prot & ~(entry->protection)) & VM_PROT_WRITE) == 0 ||
+ ENTRY_CHARGED(entry) ||
+ (entry->eflags & MAP_ENTRY_GUARD) != 0) {
continue;
}
cred = curthread->td_ucred;
- obj = current->object.vm_object;
+ obj = entry->object.vm_object;
- if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
- if (!swap_reserve(current->end - current->start)) {
+ if (obj == NULL || (entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
+ if (!swap_reserve(entry->end - entry->start)) {
rv = KERN_RESOURCE_SHORTAGE;
- end = current->end;
+ end = entry->end;
break;
}
crhold(cred);
- current->cred = cred;
+ entry->cred = cred;
continue;
}
@@ -2534,11 +2551,11 @@
*/
KASSERT(obj->charge == 0,
("vm_map_protect: object %p overcharged (entry %p)",
- obj, current));
+ obj, entry));
if (!swap_reserve(ptoa(obj->size))) {
VM_OBJECT_WUNLOCK(obj);
rv = KERN_RESOURCE_SHORTAGE;
- end = current->end;
+ end = entry->end;
break;
}
@@ -2553,21 +2570,23 @@
* Otherwise, just simplify entries, since some may have been modified.
* [Note that clipping is not necessary the second time.]
*/
- for (current = entry; current->start < end;
- vm_map_try_merge_entries(map, vm_map_entry_pred(current), current),
- current = vm_map_entry_succ(current)) {
+ for (entry = first_entry, prev_entry = vm_map_entry_pred(entry);
+ entry->start < end;
+ vm_map_try_merge_entries(map, prev_entry, entry),
+ prev_entry = entry,
+ entry = vm_map_entry_succ(entry)) {
if (rv != KERN_SUCCESS ||
- (current->eflags & MAP_ENTRY_GUARD) != 0)
+ (entry->eflags & MAP_ENTRY_GUARD) != 0)
continue;
- old_prot = current->protection;
+ old_prot = entry->protection;
if (set_max)
- current->protection =
- (current->max_protection = new_prot) &
+ entry->protection =
+ (entry->max_protection = new_prot) &
old_prot;
else
- current->protection = new_prot;
+ entry->protection = new_prot;
/*
* For user wired map entries, the normal lazy evaluation of
@@ -2575,25 +2594,25 @@
* undesirable. Instead, immediately copy any pages that are
* copy-on-write and enable write access in the physical map.
*/
- if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
- (current->protection & VM_PROT_WRITE) != 0 &&
+ if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
+ (entry->protection & VM_PROT_WRITE) != 0 &&
(old_prot & VM_PROT_WRITE) == 0)
- vm_fault_copy_entry(map, map, current, current, NULL);
+ vm_fault_copy_entry(map, map, entry, entry, NULL);
/*
* When restricting access, update the physical map. Worry
* about copy-on-write here.
*/
- if ((old_prot & ~current->protection) != 0) {
+ if ((old_prot & ~entry->protection) != 0) {
#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
VM_PROT_ALL)
- pmap_protect(map->pmap, current->start,
- current->end,
- current->protection & MASK(current));
+ pmap_protect(map->pmap, entry->start,
+ entry->end,
+ entry->protection & MASK(entry));
#undef MASK
}
}
- vm_map_try_merge_entries(map, vm_map_entry_pred(current), current);
+ vm_map_try_merge_entries(map, prev_entry, entry);
vm_map_unlock(map);
return (rv);
}
@@ -2613,7 +2632,7 @@
vm_offset_t end,
int behav)
{
- vm_map_entry_t current, entry;
+ vm_map_entry_t entry, prev_entry;
bool modify_map;
/*
@@ -2655,7 +2674,9 @@
if (vm_map_lookup_entry(map, start, &entry)) {
if (modify_map)
vm_map_clip_start(map, entry, start);
+ prev_entry = vm_map_entry_pred(entry);
} else {
+ prev_entry = entry;
entry = vm_map_entry_succ(entry);
}
@@ -2666,43 +2687,41 @@
* We clip the vm_map_entry so that behavioral changes are
* limited to the specified address range.
*/
- for (current = entry; current->start < end;
- current = vm_map_entry_succ(current)) {
- if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
+ for (; entry->start < end;
+ prev_entry = entry, entry = vm_map_entry_succ(entry)) {
+ if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
continue;
- vm_map_clip_end(map, current, end);
+ vm_map_clip_end(map, entry, end);
switch (behav) {
case MADV_NORMAL:
- vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
+ vm_map_entry_set_behavior(entry, MAP_ENTRY_BEHAV_NORMAL);
break;
case MADV_SEQUENTIAL:
- vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
+ vm_map_entry_set_behavior(entry, MAP_ENTRY_BEHAV_SEQUENTIAL);
break;
case MADV_RANDOM:
- vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
+ vm_map_entry_set_behavior(entry, MAP_ENTRY_BEHAV_RANDOM);
break;
case MADV_NOSYNC:
- current->eflags |= MAP_ENTRY_NOSYNC;
+ entry->eflags |= MAP_ENTRY_NOSYNC;
break;
case MADV_AUTOSYNC:
- current->eflags &= ~MAP_ENTRY_NOSYNC;
+ entry->eflags &= ~MAP_ENTRY_NOSYNC;
break;
case MADV_NOCORE:
- current->eflags |= MAP_ENTRY_NOCOREDUMP;
+ entry->eflags |= MAP_ENTRY_NOCOREDUMP;
break;
case MADV_CORE:
- current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
+ entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
break;
default:
break;
}
- vm_map_try_merge_entries(map,
- vm_map_entry_pred(current), current);
+ vm_map_try_merge_entries(map, prev_entry, entry);
}
- vm_map_try_merge_entries(map, vm_map_entry_pred(current),
- current);
+ vm_map_try_merge_entries(map, prev_entry, entry);
vm_map_unlock(map);
} else {
vm_pindex_t pstart, pend;
@@ -2714,11 +2733,11 @@
* Since we don't clip the vm_map_entry, we have to clip
* the vm_object pindex and count.
*/
- for (current = entry; current->start < end;
- current = vm_map_entry_succ(current)) {
+ for (; entry->start < end;
+ entry = vm_map_entry_succ(entry)) {
vm_offset_t useEnd, useStart;
- if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
+ if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
continue;
/*
@@ -2729,21 +2748,21 @@
* backing object can change.
*/
if (behav == MADV_FREE &&
- current->object.vm_object != NULL &&
- current->object.vm_object->backing_object != NULL)
+ entry->object.vm_object != NULL &&
+ entry->object.vm_object->backing_object != NULL)
continue;
- pstart = OFF_TO_IDX(current->offset);
- pend = pstart + atop(current->end - current->start);
- useStart = current->start;
- useEnd = current->end;
+ pstart = OFF_TO_IDX(entry->offset);
+ pend = pstart + atop(entry->end - entry->start);
+ useStart = entry->start;
+ useEnd = entry->end;
- if (current->start < start) {
- pstart += atop(start - current->start);
+ if (entry->start < start) {
+ pstart += atop(start - entry->start);
useStart = start;
}
- if (current->end > end) {
- pend -= atop(current->end - end);
+ if (entry->end > end) {
+ pend -= atop(entry->end - end);
useEnd = end;
}
@@ -2764,7 +2783,7 @@
pmap_advise(map->pmap, useStart, useEnd,
behav);
- vm_object_madvise(current->object.vm_object, pstart,
+ vm_object_madvise(entry->object.vm_object, pstart,
pend, behav);
/*
@@ -2773,11 +2792,11 @@
* paging structures are already populated.
*/
if (behav == MADV_WILLNEED &&
- current->wired_count == 0) {
+ entry->wired_count == 0) {
vm_map_pmap_enter(map,
useStart,
- current->protection,
- current->object.vm_object,
+ entry->protection,
+ entry->object.vm_object,
pstart,
ptoa(pend - pstart),
MAP_PREFAULT_MADVISE
@@ -2802,8 +2821,7 @@
vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_inherit_t new_inheritance)
{
- vm_map_entry_t entry;
- vm_map_entry_t temp_entry;
+ vm_map_entry_t entry, prev_entry;
switch (new_inheritance) {
case VM_INHERIT_NONE:
@@ -2818,20 +2836,21 @@
return (KERN_SUCCESS);
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
- if (vm_map_lookup_entry(map, start, &temp_entry)) {
- entry = temp_entry;
+ if (vm_map_lookup_entry(map, start, &prev_entry)) {
+ entry = prev_entry;
vm_map_clip_start(map, entry, start);
+ prev_entry = vm_map_entry_pred(entry);
} else
- entry = vm_map_entry_succ(temp_entry);
- while (entry->start < end) {
+ entry = vm_map_entry_succ(prev_entry);
+ for (; entry->start < end;
+ prev_entry = entry, entry = vm_map_entry_succ(entry)) {
vm_map_clip_end(map, entry, end);
if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
new_inheritance != VM_INHERIT_ZERO)
entry->inheritance = new_inheritance;
- vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
- entry = vm_map_entry_succ(entry);
+ vm_map_try_merge_entries(map, prev_entry, entry);
}
- vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
+ vm_map_try_merge_entries(map, prev_entry, entry);
vm_map_unlock(map);
return (KERN_SUCCESS);
}
@@ -2894,9 +2913,9 @@
vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
int flags)
{
- vm_map_entry_t entry, first_entry;
+ vm_map_entry_t entry, first_entry, next_entry, prev_entry;
int rv;
- bool first_iteration, holes_ok, need_wakeup, user_unwire;
+ bool holes_ok, need_wakeup, user_unwire;
if (start == end)
return (KERN_SUCCESS);
@@ -2912,28 +2931,26 @@
return (KERN_INVALID_ADDRESS);
}
}
- first_iteration = true;
- entry = first_entry;
rv = KERN_SUCCESS;
- while (entry->start < end) {
+ for (entry = first_entry; entry->start < end; entry = next_entry) {
if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
/*
* We have not yet clipped the entry.
*/
- entry = vm_map_entry_in_transition(map, start, &end,
- holes_ok, entry);
- if (entry == NULL) {
- if (first_iteration) {
+ next_entry = vm_map_entry_in_transition(map, start,
+ &end, holes_ok, entry);
+ if (next_entry == NULL) {
+ if (entry == first_entry) {
vm_map_unlock(map);
return (KERN_INVALID_ADDRESS);
}
rv = KERN_INVALID_ADDRESS;
break;
}
- first_entry = first_iteration ? entry : NULL;
+ first_entry = (entry == first_entry) ?
+ next_entry : NULL;
continue;
}
- first_iteration = false;
vm_map_clip_start(map, entry, start);
vm_map_clip_end(map, entry, end);
/*
@@ -2945,13 +2962,13 @@
("owned map entry %p", entry));
entry->eflags |= MAP_ENTRY_IN_TRANSITION;
entry->wiring_thread = curthread;
+ next_entry = vm_map_entry_succ(entry);
/*
* Check the map for holes in the specified region.
* If holes_ok, skip this check.
*/
if (!holes_ok &&
- (entry->end < end &&
- vm_map_entry_succ(entry)->start > entry->end)) {
+ (entry->end < end && next_entry->start > entry->end)) {
end = entry->end;
rv = KERN_INVALID_ADDRESS;
break;
@@ -2965,16 +2982,19 @@
rv = KERN_INVALID_ARGUMENT;
break;
}
- entry = vm_map_entry_succ(entry);
}
need_wakeup = false;
if (first_entry == NULL &&
!vm_map_lookup_entry(map, start, &first_entry)) {
KASSERT(holes_ok, ("vm_map_unwire: lookup failed"));
- first_entry = vm_map_entry_succ(first_entry);
+ prev_entry = first_entry;
+ entry = vm_map_entry_succ(prev_entry);
+ } else {
+ entry = first_entry;
+ prev_entry = vm_map_entry_pred(entry);
}
- for (entry = first_entry; entry->start < end;
- entry = vm_map_entry_succ(entry)) {
+ for (; entry->start < end;
+ prev_entry = entry, entry = vm_map_entry_succ(entry)) {
/*
* If holes_ok was specified, an empty
* space in the unwired region could have been mapped
@@ -3010,9 +3030,9 @@
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
need_wakeup = true;
}
- vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
+ vm_map_try_merge_entries(map, prev_entry, entry);
}
- vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
+ vm_map_try_merge_entries(map, prev_entry, entry);
vm_map_unlock(map);
if (need_wakeup)
vm_map_wakeup(map);
@@ -3098,12 +3118,12 @@
int
vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
{
- vm_map_entry_t entry, first_entry, tmp_entry;
+ vm_map_entry_t entry, first_entry, prev_entry, next_entry;
vm_offset_t faddr, saved_end, saved_start;
u_long npages;
u_int last_timestamp;
int rv;
- bool first_iteration, holes_ok, need_wakeup, user_wire;
+ bool holes_ok, need_wakeup, user_wire;
vm_prot_t prot;
VM_MAP_ASSERT_LOCKED(map);
@@ -3122,25 +3142,23 @@
else
return (KERN_INVALID_ADDRESS);
}
- first_iteration = true;
- entry = first_entry;
- while (entry->start < end) {
+ for (entry = first_entry; entry->start < end; entry = next_entry) {
if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
/*
* We have not yet clipped the entry.
*/
- entry = vm_map_entry_in_transition(map, start, &end,
- holes_ok, entry);
- if (entry == NULL) {
- if (first_iteration)
+ next_entry = vm_map_entry_in_transition(map, start,
+ &end, holes_ok, entry);
+ if (next_entry == NULL) {
+ if (entry == first_entry)
return (KERN_INVALID_ADDRESS);
rv = KERN_INVALID_ADDRESS;
goto done;
}
- first_entry = first_iteration ? entry : NULL;
+ first_entry = (entry == first_entry) ?
+ next_entry : NULL;
continue;
}
- first_iteration = false;
vm_map_clip_start(map, entry, start);
vm_map_clip_end(map, entry, end);
/*
@@ -3203,15 +3221,13 @@
* deleted.
*/
if (!vm_map_lookup_entry(map, saved_start,
- &tmp_entry))
+ &next_entry))
KASSERT(false,
("vm_map_wire: lookup failed"));
- if (entry == first_entry)
- first_entry = tmp_entry;
- else
- first_entry = NULL;
- entry = tmp_entry;
- while (entry->end < saved_end) {
+ first_entry = (entry == first_entry) ?
+ next_entry : NULL;
+ for (entry = next_entry; entry->end < saved_end;
+ entry = vm_map_entry_succ(entry)) {
/*
* In case of failure, handle entries
* that were not fully wired here;
@@ -3222,7 +3238,6 @@
faddr < entry->end)
vm_map_wire_entry_failure(map,
entry, faddr);
- entry = vm_map_entry_succ(entry);
}
}
if (rv != KERN_SUCCESS) {
@@ -3240,14 +3255,13 @@
* Check the map for holes in the specified region.
* If holes_ok was specified, skip this check.
*/
+ next_entry = vm_map_entry_succ(entry);
if (!holes_ok &&
- entry->end < end &&
- vm_map_entry_succ(entry)->start > entry->end) {
+ entry->end < end && next_entry->start > entry->end) {
end = entry->end;
rv = KERN_INVALID_ADDRESS;
goto done;
}
- entry = vm_map_entry_succ(entry);
}
rv = KERN_SUCCESS;
done:
@@ -3255,10 +3269,14 @@
if (first_entry == NULL &&
!vm_map_lookup_entry(map, start, &first_entry)) {
KASSERT(holes_ok, ("vm_map_wire: lookup failed"));
- first_entry = vm_map_entry_succ(first_entry);
+ prev_entry = first_entry;
+ entry = vm_map_entry_succ(first_entry);
+ } else {
+ prev_entry = vm_map_entry_pred(first_entry);
+ entry = first_entry;
}
- for (entry = first_entry; entry->start < end;
- entry = vm_map_entry_succ(entry)) {
+ for (; entry->start < end;
+ prev_entry = entry, entry = vm_map_entry_succ(entry)) {
/*
* If holes_ok was specified, an empty
* space in the unwired region could have been mapped
@@ -3311,9 +3329,9 @@
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
need_wakeup = true;
}
- vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
+ vm_map_try_merge_entries(map, prev_entry, entry);
}
- vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
+ vm_map_try_merge_entries(map, prev_entry, entry);
if (need_wakeup)
vm_map_wakeup(map);
return (rv);
@@ -3343,8 +3361,7 @@
boolean_t syncio,
boolean_t invalidate)
{
- vm_map_entry_t current;
- vm_map_entry_t entry;
+ vm_map_entry_t entry, first_entry, next_entry;
vm_size_t size;
vm_object_t object;
vm_ooffset_t offset;
@@ -3353,24 +3370,24 @@
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
- if (!vm_map_lookup_entry(map, start, &entry)) {
+ if (!vm_map_lookup_entry(map, start, &first_entry)) {
vm_map_unlock_read(map);
return (KERN_INVALID_ADDRESS);
} else if (start == end) {
- start = entry->start;
- end = entry->end;
+ start = first_entry->start;
+ end = first_entry->end;
}
/*
* Make a first pass to check for user-wired memory and holes.
*/
- for (current = entry; current->start < end;
- current = vm_map_entry_succ(current)) {
- if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
+ for (entry = first_entry; entry->start < end; entry = next_entry) {
+ if (invalidate && (entry->eflags & MAP_ENTRY_USER_WIRED)) {
vm_map_unlock_read(map);
return (KERN_INVALID_ARGUMENT);
}
- if (end > current->end &&
- current->end != vm_map_entry_succ(current)->start) {
+ next_entry = vm_map_entry_succ(entry);
+ if (end > entry->end &&
+ entry->end != next_entry->start) {
vm_map_unlock_read(map);
return (KERN_INVALID_ADDRESS);
}
@@ -3384,15 +3401,15 @@
* Make a second pass, cleaning/uncaching pages from the indicated
* objects as we go.
*/
- for (current = entry; current->start < end;) {
- offset = current->offset + (start - current->start);
- size = (end <= current->end ? end : current->end) - start;
- if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
+ for (entry = first_entry; entry->start < end;) {
+ offset = entry->offset + (start - entry->start);
+ size = (end <= entry->end ? end : entry->end) - start;
+ if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
vm_map_t smap;
vm_map_entry_t tentry;
vm_size_t tsize;
- smap = current->object.sub_map;
+ smap = entry->object.sub_map;
vm_map_lock_read(smap);
(void) vm_map_lookup_entry(smap, offset, &tentry);
tsize = tentry->end - offset;
@@ -3402,7 +3419,7 @@
offset = tentry->offset + (offset - tentry->start);
vm_map_unlock_read(smap);
} else {
- object = current->object.vm_object;
+ object = entry->object.vm_object;
}
vm_object_reference(object);
last_timestamp = map->timestamp;
@@ -3413,8 +3430,8 @@
vm_object_deallocate(object);
vm_map_lock_read(map);
if (last_timestamp == map->timestamp ||
- !vm_map_lookup_entry(map, start, &current))
- current = vm_map_entry_succ(current);
+ !vm_map_lookup_entry(map, start, &entry))
+ entry = vm_map_entry_succ(entry);
}
vm_map_unlock_read(map);
@@ -3913,9 +3930,7 @@
new_map->anon_loc = old_map->anon_loc;
- old_entry = old_map->header.next;
-
- while (old_entry != &old_map->header) {
+ VM_MAP_ENTRY_FOREACH(old_entry, old_map) {
if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
panic("vm_map_fork: encountered a submap");
@@ -3930,7 +3945,8 @@
case VM_INHERIT_SHARE:
/*
- * Clone the entry, creating the shared object if necessary.
+ * Clone the entry, creating the shared object if
+ * necessary.
*/
object = old_entry->object.vm_object;
if (object == NULL) {
@@ -4065,7 +4081,6 @@
break;
}
- old_entry = vm_map_entry_succ(old_entry);
}
/*
* Use inlined vm_map_unlock() to postpone handling the deferred

File Metadata

Mime Type
text/plain
Expires
Sat, Jan 18, 8:07 PM (22 m, 18 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15888980
Default Alt Text
D22348.id64556.diff (25 KB)

Event Timeline