Index: sys/security/mac/mac_process.c =================================================================== --- sys/security/mac/mac_process.c +++ sys/security/mac/mac_process.c @@ -252,7 +252,7 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred, struct vm_map *map) { - vm_map_entry_t vme; + vm_map_entry_t prev, vme; int result; vm_prot_t revokeperms; vm_object_t backing_object, object; @@ -263,8 +263,10 @@ if (!mac_mmap_revocation) return; + prev = &map->header; vm_map_lock(map); - VM_MAP_ENTRY_FOREACH(vme, map) { + for (vme = vm_map_entry_first(map); vme != &map->header; + prev = vme, vme = vm_map_entry_succ(prev)) { if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) { mac_proc_vm_revoke_recurse(td, cred, vme->object.sub_map); @@ -363,8 +365,7 @@ } pmap_protect(map->pmap, vme->start, vme->end, vme->protection & ~revokeperms); - vm_map_try_merge_entries(map, vm_map_entry_pred(vme), - vme); + vm_map_try_merge_entries(map, prev, vme); } } vm_map_unlock(map); Index: sys/vm/vm_map.h =================================================================== --- sys/vm/vm_map.h +++ sys/vm/vm_map.h @@ -421,21 +421,21 @@ boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); static inline vm_map_entry_t -vm_map_entry_succ(vm_map_entry_t entry) +vm_map_entry_first(vm_map_t map) { - return (entry->next); + return (map->header.next); } static inline vm_map_entry_t -vm_map_entry_pred(vm_map_entry_t entry) +vm_map_entry_succ(vm_map_entry_t entry) { - return (entry->prev); + return (entry->next); } -#define VM_MAP_ENTRY_FOREACH(it, map) \ - for ((it) = (map)->header.next; \ +#define VM_MAP_ENTRY_FOREACH(it, map) \ + for ((it) = vm_map_entry_first(map); \ (it) != &(map)->header; \ (it) = vm_map_entry_succ(it)) int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -978,6 +978,21 @@ root->right->max_free : right_ancestor->start - root->end); } +/* + * vm_map_entry_{pred,succ}: + * + * Find the {predecessor, successor} of the entry by taking one step + * in the appropriate direction and backtracking as much as necessary. + */ +static inline vm_map_entry_t +vm_map_entry_pred(vm_map_entry_t entry) +{ + + return (entry->prev); +} + +/* vm_map_entry_succ is defined in vm_map.h. */ + #define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ vm_size_t max_free; \ \ @@ -1412,7 +1427,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) { - vm_map_entry_t new_entry, prev_entry; + vm_map_entry_t new_entry, next_entry, prev_entry; struct ucred *cred; vm_eflags_t protoeflags; vm_inherit_t inheritance; @@ -1443,7 +1458,8 @@ /* * Assert that the next entry doesn't overlap the end point. */ - if (vm_map_entry_succ(prev_entry)->start < end) + next_entry = vm_map_entry_succ(prev_entry); + if (next_entry->start < end) return (KERN_NO_SPACE); if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || @@ -1538,8 +1554,7 @@ map->size += end - prev_entry->end; vm_map_entry_resize(map, prev_entry, end - prev_entry->end); - vm_map_try_merge_entries(map, prev_entry, - vm_map_entry_succ(prev_entry)); + vm_map_try_merge_entries(map, prev_entry, next_entry); return (KERN_SUCCESS); } @@ -1600,7 +1615,7 @@ * other cases, which are less common. */ vm_map_try_merge_entries(map, prev_entry, new_entry); - vm_map_try_merge_entries(map, new_entry, vm_map_entry_succ(new_entry)); + vm_map_try_merge_entries(map, new_entry, next_entry); if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), @@ -2078,14 +2093,15 @@ * The map must be locked. */ void -vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, vm_map_entry_t entry) +vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry, + vm_map_entry_t entry) { VM_MAP_ASSERT_LOCKED(map); if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && - vm_map_mergeable_neighbors(prev, entry)) { - vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT); - vm_map_merged_neighbor_dispose(map, prev); + vm_map_mergeable_neighbors(prev_entry, entry)) { + vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT); + vm_map_merged_neighbor_dispose(map, prev_entry); } } @@ -2430,7 +2446,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_prot_t new_prot, boolean_t set_max) { - vm_map_entry_t current, entry, in_tran; + vm_map_entry_t entry, first_entry, in_tran, prev_entry; vm_object_t obj; struct ucred *cred; vm_prot_t old_prot; @@ -2453,26 +2469,26 @@ VM_MAP_RANGE_CHECK(map, start, end); - if (!vm_map_lookup_entry(map, start, &entry)) - entry = vm_map_entry_succ(entry); + if (!vm_map_lookup_entry(map, start, &first_entry)) + first_entry = vm_map_entry_succ(first_entry); /* * Make a first pass to check for protection violations. */ - for (current = entry; current->start < end; - current = vm_map_entry_succ(current)) { - if ((current->eflags & MAP_ENTRY_GUARD) != 0) + for (entry = first_entry; entry->start < end; + entry = vm_map_entry_succ(entry)) { + if ((entry->eflags & MAP_ENTRY_GUARD) != 0) continue; - if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { + if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { vm_map_unlock(map); return (KERN_INVALID_ARGUMENT); } - if ((new_prot & current->max_protection) != new_prot) { + if ((new_prot & entry->max_protection) != new_prot) { vm_map_unlock(map); return (KERN_PROTECTION_FAILURE); } - if ((current->eflags & MAP_ENTRY_IN_TRANSITION) != 0) - in_tran = current; + if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) + in_tran = entry; } /* @@ -2496,30 +2512,30 @@ * some may now be mergeable. */ rv = KERN_SUCCESS; - vm_map_clip_start(map, entry, start); - for (current = entry; current->start < end; - current = vm_map_entry_succ(current)) { + vm_map_clip_start(map, first_entry, start); + for (entry = first_entry; entry->start < end; + entry = vm_map_entry_succ(entry)) { + vm_map_clip_end(map, entry, end); - vm_map_clip_end(map, current, end); - if (set_max || - ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || - ENTRY_CHARGED(current) || - (current->eflags & MAP_ENTRY_GUARD) != 0) { + ((new_prot & ~(entry->protection)) & VM_PROT_WRITE) == 0 || + ENTRY_CHARGED(entry) || + (entry->eflags & MAP_ENTRY_GUARD) != 0) { continue; } cred = curthread->td_ucred; - obj = current->object.vm_object; + obj = entry->object.vm_object; - if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { - if (!swap_reserve(current->end - current->start)) { + if (obj == NULL || + (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) { + if (!swap_reserve(entry->end - entry->start)) { rv = KERN_RESOURCE_SHORTAGE; - end = current->end; + end = entry->end; break; } crhold(cred); - current->cred = cred; + entry->cred = cred; continue; } @@ -2536,11 +2552,11 @@ */ KASSERT(obj->charge == 0, ("vm_map_protect: object %p overcharged (entry %p)", - obj, current)); + obj, entry)); if (!swap_reserve(ptoa(obj->size))) { VM_OBJECT_WUNLOCK(obj); rv = KERN_RESOURCE_SHORTAGE; - end = current->end; + end = entry->end; break; } @@ -2555,21 +2571,22 @@ * Otherwise, just simplify entries, since some may have been modified. * [Note that clipping is not necessary the second time.] */ - for (current = entry; current->start < end; - vm_map_try_merge_entries(map, vm_map_entry_pred(current), current), - current = vm_map_entry_succ(current)) { + for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry; + entry->start < end; + vm_map_try_merge_entries(map, prev_entry, entry), + prev_entry = entry, entry = vm_map_entry_succ(entry)) { if (rv != KERN_SUCCESS || - (current->eflags & MAP_ENTRY_GUARD) != 0) + (entry->eflags & MAP_ENTRY_GUARD) != 0) continue; - old_prot = current->protection; + old_prot = entry->protection; if (set_max) - current->protection = - (current->max_protection = new_prot) & + entry->protection = + (entry->max_protection = new_prot) & old_prot; else - current->protection = new_prot; + entry->protection = new_prot; /* * For user wired map entries, the normal lazy evaluation of @@ -2577,25 +2594,25 @@ * undesirable. Instead, immediately copy any pages that are * copy-on-write and enable write access in the physical map. */ - if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && - (current->protection & VM_PROT_WRITE) != 0 && + if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 && + (entry->protection & VM_PROT_WRITE) != 0 && (old_prot & VM_PROT_WRITE) == 0) - vm_fault_copy_entry(map, map, current, current, NULL); + vm_fault_copy_entry(map, map, entry, entry, NULL); /* * When restricting access, update the physical map. Worry * about copy-on-write here. */ - if ((old_prot & ~current->protection) != 0) { + if ((old_prot & ~entry->protection) != 0) { #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ VM_PROT_ALL) - pmap_protect(map->pmap, current->start, - current->end, - current->protection & MASK(current)); + pmap_protect(map->pmap, entry->start, + entry->end, + entry->protection & MASK(entry)); #undef MASK } } - vm_map_try_merge_entries(map, vm_map_entry_pred(current), current); + vm_map_try_merge_entries(map, prev_entry, entry); vm_map_unlock(map); return (rv); } @@ -2615,7 +2632,7 @@ vm_offset_t end, int behav) { - vm_map_entry_t current, entry; + vm_map_entry_t entry, prev_entry; bool modify_map; /* @@ -2657,7 +2674,9 @@ if (vm_map_lookup_entry(map, start, &entry)) { if (modify_map) vm_map_clip_start(map, entry, start); + prev_entry = vm_map_entry_pred(entry); } else { + prev_entry = entry; entry = vm_map_entry_succ(entry); } @@ -2668,43 +2687,41 @@ * We clip the vm_map_entry so that behavioral changes are * limited to the specified address range. */ - for (current = entry; current->start < end; - current = vm_map_entry_succ(current)) { - if (current->eflags & MAP_ENTRY_IS_SUB_MAP) + for (; entry->start < end; + prev_entry = entry, entry = vm_map_entry_succ(entry)) { + if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) continue; - vm_map_clip_end(map, current, end); + vm_map_clip_end(map, entry, end); switch (behav) { case MADV_NORMAL: - vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); + vm_map_entry_set_behavior(entry, MAP_ENTRY_BEHAV_NORMAL); break; case MADV_SEQUENTIAL: - vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); + vm_map_entry_set_behavior(entry, MAP_ENTRY_BEHAV_SEQUENTIAL); break; case MADV_RANDOM: - vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); + vm_map_entry_set_behavior(entry, MAP_ENTRY_BEHAV_RANDOM); break; case MADV_NOSYNC: - current->eflags |= MAP_ENTRY_NOSYNC; + entry->eflags |= MAP_ENTRY_NOSYNC; break; case MADV_AUTOSYNC: - current->eflags &= ~MAP_ENTRY_NOSYNC; + entry->eflags &= ~MAP_ENTRY_NOSYNC; break; case MADV_NOCORE: - current->eflags |= MAP_ENTRY_NOCOREDUMP; + entry->eflags |= MAP_ENTRY_NOCOREDUMP; break; case MADV_CORE: - current->eflags &= ~MAP_ENTRY_NOCOREDUMP; + entry->eflags &= ~MAP_ENTRY_NOCOREDUMP; break; default: break; } - vm_map_try_merge_entries(map, - vm_map_entry_pred(current), current); + vm_map_try_merge_entries(map, prev_entry, entry); } - vm_map_try_merge_entries(map, vm_map_entry_pred(current), - current); + vm_map_try_merge_entries(map, prev_entry, entry); vm_map_unlock(map); } else { vm_pindex_t pstart, pend; @@ -2716,11 +2733,11 @@ * Since we don't clip the vm_map_entry, we have to clip * the vm_object pindex and count. */ - for (current = entry; current->start < end; - current = vm_map_entry_succ(current)) { + for (; entry->start < end; + entry = vm_map_entry_succ(entry)) { vm_offset_t useEnd, useStart; - if (current->eflags & MAP_ENTRY_IS_SUB_MAP) + if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) continue; /* @@ -2731,21 +2748,21 @@ * backing object can change. */ if (behav == MADV_FREE && - current->object.vm_object != NULL && - current->object.vm_object->backing_object != NULL) + entry->object.vm_object != NULL && + entry->object.vm_object->backing_object != NULL) continue; - pstart = OFF_TO_IDX(current->offset); - pend = pstart + atop(current->end - current->start); - useStart = current->start; - useEnd = current->end; + pstart = OFF_TO_IDX(entry->offset); + pend = pstart + atop(entry->end - entry->start); + useStart = entry->start; + useEnd = entry->end; - if (current->start < start) { - pstart += atop(start - current->start); + if (entry->start < start) { + pstart += atop(start - entry->start); useStart = start; } - if (current->end > end) { - pend -= atop(current->end - end); + if (entry->end > end) { + pend -= atop(entry->end - end); useEnd = end; } @@ -2766,7 +2783,7 @@ pmap_advise(map->pmap, useStart, useEnd, behav); - vm_object_madvise(current->object.vm_object, pstart, + vm_object_madvise(entry->object.vm_object, pstart, pend, behav); /* @@ -2775,11 +2792,11 @@ * paging structures are already populated. */ if (behav == MADV_WILLNEED && - current->wired_count == 0) { + entry->wired_count == 0) { vm_map_pmap_enter(map, useStart, - current->protection, - current->object.vm_object, + entry->protection, + entry->object.vm_object, pstart, ptoa(pend - pstart), MAP_PREFAULT_MADVISE @@ -2804,8 +2821,7 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_inherit_t new_inheritance) { - vm_map_entry_t entry; - vm_map_entry_t temp_entry; + vm_map_entry_t entry, prev_entry; switch (new_inheritance) { case VM_INHERIT_NONE: @@ -2820,20 +2836,21 @@ return (KERN_SUCCESS); vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); - if (vm_map_lookup_entry(map, start, &temp_entry)) { - entry = temp_entry; + if (vm_map_lookup_entry(map, start, &prev_entry)) { + entry = prev_entry; vm_map_clip_start(map, entry, start); + prev_entry = vm_map_entry_pred(entry); } else - entry = vm_map_entry_succ(temp_entry); - while (entry->start < end) { + entry = vm_map_entry_succ(prev_entry); + for (; entry->start < end; + prev_entry = entry, entry = vm_map_entry_succ(entry)) { vm_map_clip_end(map, entry, end); if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || new_inheritance != VM_INHERIT_ZERO) entry->inheritance = new_inheritance; - vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); - entry = vm_map_entry_succ(entry); + vm_map_try_merge_entries(map, prev_entry, entry); } - vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); + vm_map_try_merge_entries(map, prev_entry, entry); vm_map_unlock(map); return (KERN_SUCCESS); } @@ -2896,9 +2913,9 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) { - vm_map_entry_t entry, first_entry; + vm_map_entry_t entry, first_entry, next_entry, prev_entry; int rv; - bool first_iteration, holes_ok, need_wakeup, user_unwire; + bool holes_ok, need_wakeup, user_unwire; if (start == end) return (KERN_SUCCESS); @@ -2914,28 +2931,26 @@ return (KERN_INVALID_ADDRESS); } } - first_iteration = true; - entry = first_entry; rv = KERN_SUCCESS; - while (entry->start < end) { + for (entry = first_entry; entry->start < end; entry = next_entry) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { /* * We have not yet clipped the entry. */ - entry = vm_map_entry_in_transition(map, start, &end, - holes_ok, entry); - if (entry == NULL) { - if (first_iteration) { + next_entry = vm_map_entry_in_transition(map, start, + &end, holes_ok, entry); + if (next_entry == NULL) { + if (entry == first_entry) { vm_map_unlock(map); return (KERN_INVALID_ADDRESS); } rv = KERN_INVALID_ADDRESS; break; } - first_entry = first_iteration ? entry : NULL; + first_entry = (entry == first_entry) ? + next_entry : NULL; continue; } - first_iteration = false; vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); /* @@ -2947,13 +2962,13 @@ ("owned map entry %p", entry)); entry->eflags |= MAP_ENTRY_IN_TRANSITION; entry->wiring_thread = curthread; + next_entry = vm_map_entry_succ(entry); /* * Check the map for holes in the specified region. * If holes_ok, skip this check. */ if (!holes_ok && - (entry->end < end && - vm_map_entry_succ(entry)->start > entry->end)) { + entry->end < end && next_entry->start > entry->end) { end = entry->end; rv = KERN_INVALID_ADDRESS; break; @@ -2967,16 +2982,19 @@ rv = KERN_INVALID_ARGUMENT; break; } - entry = vm_map_entry_succ(entry); } need_wakeup = false; if (first_entry == NULL && !vm_map_lookup_entry(map, start, &first_entry)) { KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); - first_entry = vm_map_entry_succ(first_entry); + prev_entry = first_entry; + entry = vm_map_entry_succ(first_entry); + } else { + prev_entry = vm_map_entry_pred(first_entry); + entry = first_entry; } - for (entry = first_entry; entry->start < end; - entry = vm_map_entry_succ(entry)) { + for (; entry->start < end; + prev_entry = entry, entry = vm_map_entry_succ(entry)) { /* * If holes_ok was specified, an empty * space in the unwired region could have been mapped @@ -3012,9 +3030,9 @@ entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; need_wakeup = true; } - vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); + vm_map_try_merge_entries(map, prev_entry, entry); } - vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); + vm_map_try_merge_entries(map, prev_entry, entry); vm_map_unlock(map); if (need_wakeup) vm_map_wakeup(map); @@ -3100,12 +3118,12 @@ int vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) { - vm_map_entry_t entry, first_entry, tmp_entry; + vm_map_entry_t entry, first_entry, next_entry, prev_entry; vm_offset_t faddr, saved_end, saved_start; u_long npages; u_int last_timestamp; int rv; - bool first_iteration, holes_ok, need_wakeup, user_wire; + bool holes_ok, need_wakeup, user_wire; vm_prot_t prot; VM_MAP_ASSERT_LOCKED(map); @@ -3124,25 +3142,23 @@ else return (KERN_INVALID_ADDRESS); } - first_iteration = true; - entry = first_entry; - while (entry->start < end) { + for (entry = first_entry; entry->start < end; entry = next_entry) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { /* * We have not yet clipped the entry. */ - entry = vm_map_entry_in_transition(map, start, &end, - holes_ok, entry); - if (entry == NULL) { - if (first_iteration) + next_entry = vm_map_entry_in_transition(map, start, + &end, holes_ok, entry); + if (next_entry == NULL) { + if (entry == first_entry) return (KERN_INVALID_ADDRESS); rv = KERN_INVALID_ADDRESS; goto done; } - first_entry = first_iteration ? entry : NULL; + first_entry = (entry == first_entry) ? + next_entry : NULL; continue; } - first_iteration = false; vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); /* @@ -3205,15 +3221,13 @@ * deleted. */ if (!vm_map_lookup_entry(map, saved_start, - &tmp_entry)) + &next_entry)) KASSERT(false, ("vm_map_wire: lookup failed")); - if (entry == first_entry) - first_entry = tmp_entry; - else - first_entry = NULL; - entry = tmp_entry; - while (entry->end < saved_end) { + first_entry = (entry == first_entry) ? + next_entry : NULL; + for (entry = next_entry; entry->end < saved_end; + entry = vm_map_entry_succ(entry)) { /* * In case of failure, handle entries * that were not fully wired here; @@ -3224,7 +3238,6 @@ faddr < entry->end) vm_map_wire_entry_failure(map, entry, faddr); - entry = vm_map_entry_succ(entry); } } if (rv != KERN_SUCCESS) { @@ -3242,14 +3255,13 @@ * Check the map for holes in the specified region. * If holes_ok was specified, skip this check. */ + next_entry = vm_map_entry_succ(entry); if (!holes_ok && - entry->end < end && - vm_map_entry_succ(entry)->start > entry->end) { + entry->end < end && next_entry->start > entry->end) { end = entry->end; rv = KERN_INVALID_ADDRESS; goto done; } - entry = vm_map_entry_succ(entry); } rv = KERN_SUCCESS; done: @@ -3257,10 +3269,14 @@ if (first_entry == NULL && !vm_map_lookup_entry(map, start, &first_entry)) { KASSERT(holes_ok, ("vm_map_wire: lookup failed")); - first_entry = vm_map_entry_succ(first_entry); + prev_entry = first_entry; + entry = vm_map_entry_succ(first_entry); + } else { + prev_entry = vm_map_entry_pred(first_entry); + entry = first_entry; } - for (entry = first_entry; entry->start < end; - entry = vm_map_entry_succ(entry)) { + for (; entry->start < end; + prev_entry = entry, entry = vm_map_entry_succ(entry)) { /* * If holes_ok was specified, an empty * space in the unwired region could have been mapped @@ -3313,9 +3329,9 @@ entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; need_wakeup = true; } - vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); + vm_map_try_merge_entries(map, prev_entry, entry); } - vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); + vm_map_try_merge_entries(map, prev_entry, entry); if (need_wakeup) vm_map_wakeup(map); return (rv); @@ -3345,8 +3361,7 @@ boolean_t syncio, boolean_t invalidate) { - vm_map_entry_t current; - vm_map_entry_t entry; + vm_map_entry_t entry, first_entry, next_entry; vm_size_t size; vm_object_t object; vm_ooffset_t offset; @@ -3355,24 +3370,24 @@ vm_map_lock_read(map); VM_MAP_RANGE_CHECK(map, start, end); - if (!vm_map_lookup_entry(map, start, &entry)) { + if (!vm_map_lookup_entry(map, start, &first_entry)) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); } else if (start == end) { - start = entry->start; - end = entry->end; + start = first_entry->start; + end = first_entry->end; } /* * Make a first pass to check for user-wired memory and holes. */ - for (current = entry; current->start < end; - current = vm_map_entry_succ(current)) { - if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { + for (entry = first_entry; entry->start < end; entry = next_entry) { + if (invalidate && (entry->eflags & MAP_ENTRY_USER_WIRED)) { vm_map_unlock_read(map); return (KERN_INVALID_ARGUMENT); } - if (end > current->end && - current->end != vm_map_entry_succ(current)->start) { + next_entry = vm_map_entry_succ(entry); + if (end > entry->end && + entry->end != next_entry->start) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); } @@ -3386,15 +3401,15 @@ * Make a second pass, cleaning/uncaching pages from the indicated * objects as we go. */ - for (current = entry; current->start < end;) { - offset = current->offset + (start - current->start); - size = (end <= current->end ? end : current->end) - start; - if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { + for (entry = first_entry; entry->start < end;) { + offset = entry->offset + (start - entry->start); + size = (end <= entry->end ? end : entry->end) - start; + if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { vm_map_t smap; vm_map_entry_t tentry; vm_size_t tsize; - smap = current->object.sub_map; + smap = entry->object.sub_map; vm_map_lock_read(smap); (void) vm_map_lookup_entry(smap, offset, &tentry); tsize = tentry->end - offset; @@ -3404,7 +3419,7 @@ offset = tentry->offset + (offset - tentry->start); vm_map_unlock_read(smap); } else { - object = current->object.vm_object; + object = entry->object.vm_object; } vm_object_reference(object); last_timestamp = map->timestamp; @@ -3415,8 +3430,8 @@ vm_object_deallocate(object); vm_map_lock_read(map); if (last_timestamp == map->timestamp || - !vm_map_lookup_entry(map, start, ¤t)) - current = vm_map_entry_succ(current); + !vm_map_lookup_entry(map, start, &entry)) + entry = vm_map_entry_succ(entry); } vm_map_unlock_read(map); @@ -3914,10 +3929,8 @@ new_map->anon_loc = old_map->anon_loc; - old_entry = old_map->header.next; - - while (old_entry != &old_map->header) { - if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) + VM_MAP_ENTRY_FOREACH(old_entry, old_map) { + if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) panic("vm_map_fork: encountered a submap"); inh = old_entry->inheritance; @@ -3931,7 +3944,8 @@ case VM_INHERIT_SHARE: /* - * Clone the entry, creating the shared object if necessary. + * Clone the entry, creating the shared object if + * necessary. */ object = old_entry->object.vm_object; if (object == NULL) { @@ -4066,7 +4080,6 @@ break; } - old_entry = vm_map_entry_succ(old_entry); } /* * Use inlined vm_map_unlock() to postpone handling the deferred @@ -4569,7 +4582,7 @@ /* * Handle submaps. */ - if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { + if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { vm_map_t old_map = map; *var_map = map = entry->object.sub_map; @@ -4741,7 +4754,7 @@ /* * Fail if the entry refers to a submap. */ - if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) + if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) return (KERN_FAILURE); /* @@ -4905,7 +4918,7 @@ if (entry->wired_count != 0) db_printf(", wired"); } - if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { + if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { db_printf(", share=%p, offset=0x%jx\n", (void *)entry->object.sub_map, (uintmax_t)entry->offset);