Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_map.c
Show First 20 Lines • Show All 1,540 Lines • ▼ Show 20 Lines | if (prev_entry->inheritance == inheritance && | ||||
prev_entry->wired_count == 0) { | prev_entry->wired_count == 0) { | ||||
KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == | KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == | ||||
0, ("prev_entry %p has incoherent wiring", | 0, ("prev_entry %p has incoherent wiring", | ||||
prev_entry)); | prev_entry)); | ||||
if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) | if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) | ||||
map->size += end - prev_entry->end; | map->size += end - prev_entry->end; | ||||
vm_map_entry_resize(map, prev_entry, | vm_map_entry_resize(map, prev_entry, | ||||
end - prev_entry->end); | end - prev_entry->end); | ||||
vm_map_simplify_entry(map, prev_entry); | vm_map_try_merge_entries(map, prev_entry, prev_entry->next); | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
/* | /* | ||||
* If we can extend the object but cannot extend the | * If we can extend the object but cannot extend the | ||||
* map entry, we have to create a new map entry. We | * map entry, we have to create a new map entry. We | ||||
* must bump the ref count on the extended object to | * must bump the ref count on the extended object to | ||||
* account for it. object may be NULL. | * account for it. object may be NULL. | ||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) | ||||
map->size += new_entry->end - new_entry->start; | map->size += new_entry->end - new_entry->start; | ||||
/* | /* | ||||
* Try to coalesce the new entry with both the previous and next | * Try to coalesce the new entry with both the previous and next | ||||
* entries in the list. Previously, we only attempted to coalesce | * entries in the list. Previously, we only attempted to coalesce | ||||
* with the previous entry when object is NULL. Here, we handle the | * with the previous entry when object is NULL. Here, we handle the | ||||
* other cases, which are less common. | * other cases, which are less common. | ||||
*/ | */ | ||||
vm_map_simplify_entry(map, new_entry); | vm_map_try_merge_entries(map, prev_entry, new_entry); | ||||
vm_map_try_merge_entries(map, new_entry, new_entry->next); | |||||
if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { | if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { | ||||
vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), | vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), | ||||
end - start, cow & MAP_PREFAULT_PARTIAL); | end - start, cow & MAP_PREFAULT_PARTIAL); | ||||
} | } | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 452 Lines • ▼ Show 20 Lines | vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) | ||||
if (entry->object.vm_object != NULL) | if (entry->object.vm_object != NULL) | ||||
vm_object_deallocate(entry->object.vm_object); | vm_object_deallocate(entry->object.vm_object); | ||||
if (entry->cred != NULL) | if (entry->cred != NULL) | ||||
crfree(entry->cred); | crfree(entry->cred); | ||||
vm_map_entry_dispose(map, entry); | vm_map_entry_dispose(map, entry); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_simplify_entry: | * vm_map_try_merge_entries: | ||||
* | * | ||||
* Simplify the given map entry by merging with either neighbor. This | * Compare the given map entry to its predecessor, and merge it into its | ||||
* routine also has the ability to merge with both neighbors. | * predecessor if possible. The entry remains valid, and may be extended. | ||||
* The predecessor may be deleted. | |||||
* | * | ||||
alc: The clause beginning with "and" seems wrong. Specifically, it has the "direction" of the merge… | |||||
* The map must be locked. | * The map must be locked. | ||||
* | |||||
* This routine guarantees that the passed entry remains valid (though | |||||
* possibly extended). When merging, this routine may delete one or | |||||
* both neighbors. | |||||
*/ | */ | ||||
void | void | ||||
vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) | vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, vm_map_entry_t entry) | ||||
Not Done Inline ActionsI'd suggest a name like vm_map_try_merge_entries() instead. markj: I'd suggest a name like vm_map_try_merge_entries() instead. | |||||
{ | { | ||||
vm_map_entry_t next, prev; | |||||
if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) != 0) | VM_MAP_ASSERT_LOCKED(map); | ||||
Not Done Inline ActionsAssert that the map is exclusively locked. kib: Assert that the map is exclusively locked. | |||||
return; | if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && | ||||
prev = entry->prev; | vm_map_mergeable_neighbors(prev, entry)) { | ||||
if (vm_map_mergeable_neighbors(prev, entry)) { | |||||
vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT); | vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT); | ||||
vm_map_merged_neighbor_dispose(map, prev); | vm_map_merged_neighbor_dispose(map, prev); | ||||
} | } | ||||
next = entry->next; | |||||
if (vm_map_mergeable_neighbors(entry, next)) { | |||||
vm_map_entry_unlink(map, next, UNLINK_MERGE_PREV); | |||||
vm_map_merged_neighbor_dispose(map, next); | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* vm_map_entry_back: | * vm_map_entry_back: | ||||
* | * | ||||
* Allocate an object to back a map entry. | * Allocate an object to back a map entry. | ||||
*/ | */ | ||||
static inline void | static inline void | ||||
vm_map_entry_back(vm_map_entry_t entry) | vm_map_entry_back(vm_map_entry_t entry) | ||||
▲ Show 20 Lines • Show All 56 Lines • ▼ Show 20 Lines | #define vm_map_clip_start(map, entry, startaddr) \ | ||||
if (startaddr > entry->start) \ | if (startaddr > entry->start) \ | ||||
_vm_map_clip_start(map, entry, startaddr); \ | _vm_map_clip_start(map, entry, startaddr); \ | ||||
} | } | ||||
/* | /* | ||||
* This routine is called only when it is known that | * This routine is called only when it is known that | ||||
* the entry must be split. | * the entry must be split. | ||||
*/ | */ | ||||
static void | static vm_map_entry_t | ||||
_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) | _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) | ||||
{ | { | ||||
vm_map_entry_t new_entry; | vm_map_entry_t new_entry; | ||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
KASSERT(entry->end > start && entry->start < start, | KASSERT(entry->end > start && entry->start < start, | ||||
("_vm_map_clip_start: invalid clip of entry %p", entry)); | ("_vm_map_clip_start: invalid clip of entry %p", entry)); | ||||
Show All 25 Lines | if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { | ||||
/* | /* | ||||
* The object->un_pager.vnp.writemappings for the | * The object->un_pager.vnp.writemappings for the | ||||
* object of MAP_ENTRY_VN_WRITECNT type entry shall be | * object of MAP_ENTRY_VN_WRITECNT type entry shall be | ||||
* kept as is here. The virtual pages are | * kept as is here. The virtual pages are | ||||
* re-distributed among the clipped entries, so the sum is | * re-distributed among the clipped entries, so the sum is | ||||
* left the same. | * left the same. | ||||
*/ | */ | ||||
} | } | ||||
return (new_entry); | |||||
} | } | ||||
/* | /* | ||||
* vm_map_clip_end: [ internal use only ] | * vm_map_clip_end: [ internal use only ] | ||||
* | * | ||||
* Asserts that the given entry ends at or before | * Asserts that the given entry ends at or before | ||||
* the specified address; if necessary, | * the specified address; if necessary, | ||||
* it splits the entry into two. | * it splits the entry into two. | ||||
▲ Show 20 Lines • Show All 202 Lines • ▼ Show 20 Lines | vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, | ||||
} | } | ||||
if (p_start != NULL) | if (p_start != NULL) | ||||
pmap_enter_object(map->pmap, start, addr + ptoa(psize), | pmap_enter_object(map->pmap, start, addr + ptoa(psize), | ||||
p_start, prot); | p_start, prot); | ||||
VM_OBJECT_RUNLOCK(object); | VM_OBJECT_RUNLOCK(object); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_lookup_entry_and_prev: [ internal use only ] | |||||
* | |||||
* Finds the map entry that contains the predecessor to the given address, | |||||
* or is the precessor of the entry that starts with the given address. | |||||
* Set entry to the map entry that will contain the given address after | |||||
* clipping. | |||||
*/ | |||||
static vm_map_entry | |||||
vm_map_lookup_entry_and_prev(vm_map_t map, vm_offset_t start, | |||||
vm_map_entry_t *entry) | |||||
{ | |||||
vm_map_entry_t prev; | |||||
if (start == 0) | |||||
prev = &map->header; | |||||
else | |||||
vm_map_lookup_entry(map, start - 1, &prev); | |||||
*entry = prev->end > start ? prev : prev->next; | |||||
return (prev); | |||||
} | |||||
/* | |||||
* vm_map_protect: | * vm_map_protect: | ||||
* | * | ||||
* Sets the protection of the specified address | * Sets the protection of the specified address | ||||
* region in the target map. If "set_max" is | * region in the target map. If "set_max" is | ||||
* specified, the maximum protection is to be set; | * specified, the maximum protection is to be set; | ||||
* otherwise, only the current protection is affected. | * otherwise, only the current protection is affected. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, | vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, | ||||
vm_prot_t new_prot, boolean_t set_max) | vm_prot_t new_prot, boolean_t set_max) | ||||
{ | { | ||||
vm_map_entry_t current, entry, in_tran; | vm_map_entry_t current, entry, in_tran, prev; | ||||
vm_object_t obj; | vm_object_t obj; | ||||
struct ucred *cred; | struct ucred *cred; | ||||
vm_prot_t old_prot; | vm_prot_t old_prot; | ||||
int rv; | int rv; | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
again: | again: | ||||
in_tran = NULL; | in_tran = NULL; | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
/* | /* | ||||
* Ensure that we are not concurrently wiring pages. vm_map_wire() may | * Ensure that we are not concurrently wiring pages. vm_map_wire() may | ||||
* need to fault pages into the map and will drop the map lock while | * need to fault pages into the map and will drop the map lock while | ||||
* doing so, and the VM object may end up in an inconsistent state if we | * doing so, and the VM object may end up in an inconsistent state if we | ||||
* update the protection on the map entry in between faults. | * update the protection on the map entry in between faults. | ||||
*/ | */ | ||||
vm_map_wait_busy(map); | vm_map_wait_busy(map); | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
prev = vm_map_lookup_entry_and_prev(map, start, &entry); | |||||
if (!vm_map_lookup_entry(map, start, &entry)) | |||||
entry = entry->next; | |||||
/* | /* | ||||
Not Done Inline ActionsWhat happens here when "start == 0"? alc: What happens here when "start == 0"? | |||||
Done Inline ActionsWe get prev==&map->header. Before the most recent change, something bad would have happened. dougm: We get prev==&map->header. Before the most recent change, something bad would have happened. | |||||
* Make a first pass to check for protection violations. | * Make a first pass to check for protection violations. | ||||
*/ | */ | ||||
for (current = entry; current->start < end; current = current->next) { | for (current = entry; current->start < end; current = current->next) { | ||||
Not Done Inline Actionsextra () kib: extra () | |||||
if ((current->eflags & MAP_ENTRY_GUARD) != 0) | if ((current->eflags & MAP_ENTRY_GUARD) != 0) | ||||
continue; | continue; | ||||
if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { | if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (KERN_INVALID_ARGUMENT); | return (KERN_INVALID_ARGUMENT); | ||||
} | } | ||||
if ((new_prot & current->max_protection) != new_prot) { | if ((new_prot & current->max_protection) != new_prot) { | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
Show All 20 Lines | again: | ||||
/* | /* | ||||
* Before changing the protections, try to reserve swap space for any | * Before changing the protections, try to reserve swap space for any | ||||
* private (i.e., copy-on-write) mappings that are transitioning from | * private (i.e., copy-on-write) mappings that are transitioning from | ||||
* read-only to read/write access. If a reservation fails, break out | * read-only to read/write access. If a reservation fails, break out | ||||
* of this loop early and let the next loop simplify the entries, since | * of this loop early and let the next loop simplify the entries, since | ||||
* some may now be mergeable. | * some may now be mergeable. | ||||
*/ | */ | ||||
rv = KERN_SUCCESS; | rv = KERN_SUCCESS; | ||||
vm_map_clip_start(map, entry, start); | if (prev == entry) | ||||
prev = _vm_map_clip_start(map, entry, start); | |||||
for (current = entry; current->start < end; current = current->next) { | for (current = entry; current->start < end; current = current->next) { | ||||
vm_map_clip_end(map, current, end); | vm_map_clip_end(map, current, end); | ||||
if (set_max || | if (set_max || | ||||
((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || | ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || | ||||
ENTRY_CHARGED(current) || | ENTRY_CHARGED(current) || | ||||
(current->eflags & MAP_ENTRY_GUARD) != 0) { | (current->eflags & MAP_ENTRY_GUARD) != 0) { | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | again: | ||||
} | } | ||||
/* | /* | ||||
* If enough swap space was available, go back and fix up protections. | * If enough swap space was available, go back and fix up protections. | ||||
* Otherwise, just simplify entries, since some may have been modified. | * Otherwise, just simplify entries, since some may have been modified. | ||||
* [Note that clipping is not necessary the second time.] | * [Note that clipping is not necessary the second time.] | ||||
*/ | */ | ||||
for (current = entry; current->start < end; | for (current = entry; current->start < end; | ||||
vm_map_simplify_entry(map, current), current = current->next) { | vm_map_try_merge_entries(map, prev, current), | ||||
prev = current, current = prev->next) { | |||||
Not Done Inline ActionsDon't we want to attempt simplification after updating current->protection? markj: Don't we want to attempt simplification after updating `current->protection`? | |||||
Not Done Inline ActionsSorry, never mind. markj: Sorry, never mind. | |||||
Done Inline ActionsWe do. We are now. After this change, we still would be. Is this a suggestion about correctness or style - that, do you just want to move the simplification to line before "For user wired map entries...." to make clearer why we're simplifying, or are you suggesting that something gets broken here? dougm: We do. We are now. After this change, we still would be.
Is this a suggestion about… | |||||
if (rv != KERN_SUCCESS || | if (rv != KERN_SUCCESS || | ||||
(current->eflags & MAP_ENTRY_GUARD) != 0) | (current->eflags & MAP_ENTRY_GUARD) != 0) | ||||
continue; | continue; | ||||
old_prot = current->protection; | old_prot = current->protection; | ||||
if (set_max) | if (set_max) | ||||
current->protection = | current->protection = | ||||
Show All 21 Lines | |||||
#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ | #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ | ||||
VM_PROT_ALL) | VM_PROT_ALL) | ||||
pmap_protect(map->pmap, current->start, | pmap_protect(map->pmap, current->start, | ||||
current->end, | current->end, | ||||
current->protection & MASK(current)); | current->protection & MASK(current)); | ||||
#undef MASK | #undef MASK | ||||
} | } | ||||
} | } | ||||
vm_map_try_merge_entries(map, prev, current); | |||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_madvise: | * vm_map_madvise: | ||||
* | * | ||||
* This routine traverses a processes map handling the madvise | * This routine traverses a processes map handling the madvise | ||||
* system call. Advisories are classified as either those effecting | * system call. Advisories are classified as either those effecting | ||||
* the vm_map_entry structure, or those effecting the underlying | * the vm_map_entry structure, or those effecting the underlying | ||||
* objects. | * objects. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_madvise( | vm_map_madvise( | ||||
vm_map_t map, | vm_map_t map, | ||||
vm_offset_t start, | vm_offset_t start, | ||||
vm_offset_t end, | vm_offset_t end, | ||||
int behav) | int behav) | ||||
{ | { | ||||
vm_map_entry_t current, entry; | vm_map_entry_t current, entry, prev; | ||||
bool modify_map; | bool modify_map; | ||||
/* | /* | ||||
* Some madvise calls directly modify the vm_map_entry, in which case | * Some madvise calls directly modify the vm_map_entry, in which case | ||||
* we need to use an exclusive lock on the map and we need to perform | * we need to use an exclusive lock on the map and we need to perform | ||||
* various clipping operations. Otherwise we only need a read-lock | * various clipping operations. Otherwise we only need a read-lock | ||||
* on the map. | * on the map. | ||||
*/ | */ | ||||
Show All 21 Lines | vm_map_madvise( | ||||
default: | default: | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
/* | /* | ||||
* Locate starting entry and clip if necessary. | * Locate starting entry and clip if necessary. | ||||
*/ | */ | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
prev = vm_map_lookup_entry_and_prev(map, start, &entry); | |||||
if (vm_map_lookup_entry(map, start, &entry)) { | |||||
if (modify_map) | |||||
vm_map_clip_start(map, entry, start); | |||||
} else { | |||||
entry = entry->next; | |||||
} | |||||
if (modify_map) { | if (modify_map) { | ||||
/* | /* | ||||
* madvise behaviors that are implemented in the vm_map_entry. | * madvise behaviors that are implemented in the vm_map_entry. | ||||
* | * | ||||
* We clip the vm_map_entry so that behavioral changes are | * We clip the vm_map_entry so that behavioral changes are | ||||
* limited to the specified address range. | * limited to the specified address range. | ||||
*/ | */ | ||||
if (prev == entry) | |||||
prev = _vm_map_clip_start(map, entry, start); | |||||
for (current = entry; current->start < end; | for (current = entry; current->start < end; | ||||
current = current->next) { | prev = current, current = prev->next) { | ||||
if (current->eflags & MAP_ENTRY_IS_SUB_MAP) | if (current->eflags & MAP_ENTRY_IS_SUB_MAP) | ||||
continue; | continue; | ||||
vm_map_clip_end(map, current, end); | vm_map_clip_end(map, current, end); | ||||
switch (behav) { | switch (behav) { | ||||
case MADV_NORMAL: | case MADV_NORMAL: | ||||
vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); | vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); | ||||
Show All 14 Lines | for (current = entry; current->start < end; | ||||
current->eflags |= MAP_ENTRY_NOCOREDUMP; | current->eflags |= MAP_ENTRY_NOCOREDUMP; | ||||
break; | break; | ||||
case MADV_CORE: | case MADV_CORE: | ||||
current->eflags &= ~MAP_ENTRY_NOCOREDUMP; | current->eflags &= ~MAP_ENTRY_NOCOREDUMP; | ||||
break; | break; | ||||
default: | default: | ||||
break; | break; | ||||
} | } | ||||
vm_map_simplify_entry(map, current); | vm_map_try_merge_entries(map, prev, current); | ||||
} | } | ||||
vm_map_try_merge_entries(map, prev, current); | |||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
} else { | } else { | ||||
vm_pindex_t pstart, pend; | vm_pindex_t pstart, pend; | ||||
/* | /* | ||||
* madvise behaviors that are implemented in the underlying | * madvise behaviors that are implemented in the underlying | ||||
* vm_object. | * vm_object. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 71 Lines • ▼ Show 20 Lines | |||||
* range in the target map. Inheritance | * range in the target map. Inheritance | ||||
* affects how the map will be shared with | * affects how the map will be shared with | ||||
* child maps at the time of vmspace_fork. | * child maps at the time of vmspace_fork. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, | vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, | ||||
vm_inherit_t new_inheritance) | vm_inherit_t new_inheritance) | ||||
{ | { | ||||
vm_map_entry_t entry; | vm_map_entry_t entry, prev; | ||||
vm_map_entry_t temp_entry; | |||||
switch (new_inheritance) { | switch (new_inheritance) { | ||||
case VM_INHERIT_NONE: | case VM_INHERIT_NONE: | ||||
case VM_INHERIT_COPY: | case VM_INHERIT_COPY: | ||||
case VM_INHERIT_SHARE: | case VM_INHERIT_SHARE: | ||||
case VM_INHERIT_ZERO: | case VM_INHERIT_ZERO: | ||||
break; | break; | ||||
default: | default: | ||||
return (KERN_INVALID_ARGUMENT); | return (KERN_INVALID_ARGUMENT); | ||||
} | } | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
if (vm_map_lookup_entry(map, start, &temp_entry)) { | prev = vm_map_lookup_entry_and_prev(map, start, &entry); | ||||
entry = temp_entry; | if (entry == prev) | ||||
vm_map_clip_start(map, entry, start); | prev = _vm_map_clip_start(map, entry, start); | ||||
} else | |||||
entry = temp_entry->next; | |||||
while (entry->start < end) { | while (entry->start < end) { | ||||
vm_map_clip_end(map, entry, end); | vm_map_clip_end(map, entry, end); | ||||
if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || | if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || | ||||
new_inheritance != VM_INHERIT_ZERO) | new_inheritance != VM_INHERIT_ZERO) | ||||
entry->inheritance = new_inheritance; | entry->inheritance = new_inheritance; | ||||
vm_map_simplify_entry(map, entry); | vm_map_try_merge_entries(map, prev, entry); | ||||
prev = entry; | |||||
entry = entry->next; | entry = entry->next; | ||||
} | } | ||||
vm_map_try_merge_entries(map, prev, entry); | |||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_entry_in_transition: | * vm_map_entry_in_transition: | ||||
* | * | ||||
* Release the map lock, and sleep until the entry is no longer in | * Release the map lock, and sleep until the entry is no longer in | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | |||||
* vm_map_unwire: | * vm_map_unwire: | ||||
* | * | ||||
* Implements both kernel and user unwiring. | * Implements both kernel and user unwiring. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, | vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, | ||||
int flags) | int flags) | ||||
{ | { | ||||
vm_map_entry_t entry, first_entry; | vm_map_entry_t entry, prev; | ||||
int rv; | int rv; | ||||
bool first_iteration, holes_ok, need_wakeup, user_unwire; | bool first_iteration, holes_ok, need_wakeup, user_unwire; | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; | holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; | ||||
user_unwire = (flags & VM_MAP_WIRE_USER) != 0; | user_unwire = (flags & VM_MAP_WIRE_USER) != 0; | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
if (!vm_map_lookup_entry(map, start, &first_entry)) { | prev = vm_map_lookup_entry_and_prev(map, start, &entry); | ||||
if (holes_ok) | if (prev != entry && entry->start > start && !holes_ok) { | ||||
first_entry = first_entry->next; | |||||
else { | |||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
} | } | ||||
} | |||||
first_iteration = true; | first_iteration = true; | ||||
entry = first_entry; | |||||
rv = KERN_SUCCESS; | rv = KERN_SUCCESS; | ||||
while (entry->start < end) { | while (entry->start < end) { | ||||
if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { | if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { | ||||
/* | /* | ||||
* We have not yet clipped the entry. | * We have not yet clipped the entry. | ||||
*/ | */ | ||||
entry = vm_map_entry_in_transition(map, start, &end, | entry = vm_map_entry_in_transition(map, start, &end, | ||||
holes_ok, entry); | holes_ok, entry); | ||||
if (entry == NULL) { | if (entry == NULL) { | ||||
if (first_iteration) { | if (first_iteration) { | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
} | } | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
break; | break; | ||||
} | } | ||||
first_entry = first_iteration ? entry : NULL; | prev = NULL; | ||||
continue; | continue; | ||||
} | } | ||||
first_iteration = false; | first_iteration = false; | ||||
if (prev == entry) | |||||
prev = _vm_map_clip_start(map, entry, start); | |||||
else | |||||
vm_map_clip_start(map, entry, start); | vm_map_clip_start(map, entry, start); | ||||
vm_map_clip_start(map, entry, start); | |||||
vm_map_clip_end(map, entry, end); | vm_map_clip_end(map, entry, end); | ||||
/* | /* | ||||
* Mark the entry in case the map lock is released. (See | * Mark the entry in case the map lock is released. (See | ||||
* above.) | * above.) | ||||
*/ | */ | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | ||||
entry->wiring_thread == NULL, | entry->wiring_thread == NULL, | ||||
("owned map entry %p", entry)); | ("%s: owned map entry %p", __func__, entry)); | ||||
entry->eflags |= MAP_ENTRY_IN_TRANSITION; | entry->eflags |= MAP_ENTRY_IN_TRANSITION; | ||||
entry->wiring_thread = curthread; | entry->wiring_thread = curthread; | ||||
/* | /* | ||||
* Check the map for holes in the specified region. | * Check the map for holes in the specified region. | ||||
* If holes_ok, skip this check. | * If holes_ok, skip this check. | ||||
*/ | */ | ||||
if (!holes_ok && | if (!holes_ok && | ||||
(entry->end < end && entry->next->start > entry->end)) { | (entry->end < end && entry->next->start > entry->end)) { | ||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
break; | break; | ||||
} | } | ||||
/* | /* | ||||
* If system unwiring, require that the entry is system wired. | * If system unwiring, require that the entry is system wired. | ||||
*/ | */ | ||||
if (!user_unwire && | if (!user_unwire && | ||||
vm_map_entry_system_wired_count(entry) == 0) { | vm_map_entry_system_wired_count(entry) == 0) { | ||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_INVALID_ARGUMENT; | rv = KERN_INVALID_ARGUMENT; | ||||
break; | break; | ||||
} | } | ||||
entry = entry->next; | entry = entry->next; | ||||
} | } | ||||
need_wakeup = false; | need_wakeup = false; | ||||
if (first_entry == NULL && | if (prev == NULL) { | ||||
!vm_map_lookup_entry(map, start, &first_entry)) { | prev = vm_map_lookup_entry_and_prev(map, start, &entry); | ||||
KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); | KASSERT(entry->start == start || holes_ok, | ||||
first_entry = first_entry->next; | ("%s: lookup failed", __func__)); | ||||
} | } | ||||
for (entry = first_entry; entry->start < end; entry = entry->next) { | for (entry = prev->next; entry->start < end; | ||||
Not Done Inline ActionsThis pattern repeats more than once. Might be it makes sense to create a helper, e.g. vm_map_lookup_entry_and_prev() ? kib: This pattern repeats more than once. Might be it makes sense to create a helper, e.g. | |||||
prev = entry, entry = entry->next) { | |||||
/* | /* | ||||
* If holes_ok was specified, an empty | * If holes_ok was specified, an empty | ||||
* space in the unwired region could have been mapped | * space in the unwired region could have been mapped | ||||
* while the map lock was dropped for draining | * while the map lock was dropped for draining | ||||
* MAP_ENTRY_IN_TRANSITION. Moreover, another thread | * MAP_ENTRY_IN_TRANSITION. Moreover, another thread | ||||
* could be simultaneously wiring this new mapping | * could be simultaneously wiring this new mapping | ||||
* entry. Detect these cases and skip any entries | * entry. Detect these cases and skip any entries | ||||
* marked as in transition by us. | * marked as in transition by us. | ||||
*/ | */ | ||||
if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | ||||
entry->wiring_thread != curthread) { | entry->wiring_thread != curthread) { | ||||
KASSERT(holes_ok, | KASSERT(holes_ok, | ||||
("vm_map_unwire: !HOLESOK and new/changed entry")); | ("%s: !HOLESOK and new/changed entry", __func__)); | ||||
continue; | continue; | ||||
} | } | ||||
if (rv == KERN_SUCCESS && (!user_unwire || | if (rv == KERN_SUCCESS && (!user_unwire || | ||||
(entry->eflags & MAP_ENTRY_USER_WIRED))) { | (entry->eflags & MAP_ENTRY_USER_WIRED))) { | ||||
if (entry->wired_count == 1) | if (entry->wired_count == 1) | ||||
vm_map_entry_unwire(map, entry); | vm_map_entry_unwire(map, entry); | ||||
else | else | ||||
entry->wired_count--; | entry->wired_count--; | ||||
if (user_unwire) | if (user_unwire) | ||||
entry->eflags &= ~MAP_ENTRY_USER_WIRED; | entry->eflags &= ~MAP_ENTRY_USER_WIRED; | ||||
} | } | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | ||||
("vm_map_unwire: in-transition flag missing %p", entry)); | ("%s: in-transition flag missing %p", __func__, entry)); | ||||
KASSERT(entry->wiring_thread == curthread, | KASSERT(entry->wiring_thread == curthread, | ||||
("vm_map_unwire: alien wire %p", entry)); | ("%s: alien wire %p", __func__, entry)); | ||||
entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; | entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; | ||||
entry->wiring_thread = NULL; | entry->wiring_thread = NULL; | ||||
if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | ||||
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | ||||
need_wakeup = true; | need_wakeup = true; | ||||
} | } | ||||
vm_map_simplify_entry(map, entry); | vm_map_try_merge_entries(map, prev, entry); | ||||
} | } | ||||
vm_map_try_merge_entries(map, prev, entry); | |||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
if (need_wakeup) | if (need_wakeup) | ||||
vm_map_wakeup(map); | vm_map_wakeup(map); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
static void | static void | ||||
vm_map_wire_user_count_sub(u_long npages) | vm_map_wire_user_count_sub(u_long npages) | ||||
▲ Show 20 Lines • Show All 69 Lines • ▼ Show 20 Lines | |||||
* vm_map_wire_locked: | * vm_map_wire_locked: | ||||
* | * | ||||
* Implements both kernel and user wiring. Returns with the map locked, | * Implements both kernel and user wiring. Returns with the map locked, | ||||
* the map lock may be dropped. | * the map lock may be dropped. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) | vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) | ||||
{ | { | ||||
vm_map_entry_t entry, first_entry, tmp_entry; | vm_map_entry_t entry, prev, tmp_entry; | ||||
vm_offset_t faddr, saved_end, saved_start; | vm_offset_t faddr, saved_end, saved_start; | ||||
u_long npages; | u_long npages; | ||||
u_int last_timestamp; | u_int last_timestamp; | ||||
int rv; | int rv; | ||||
bool first_iteration, holes_ok, need_wakeup, user_wire; | bool first_iteration, holes_ok, need_wakeup, user_wire; | ||||
vm_prot_t prot; | vm_prot_t prot; | ||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
prot = 0; | prot = 0; | ||||
if (flags & VM_MAP_WIRE_WRITE) | if (flags & VM_MAP_WIRE_WRITE) | ||||
prot |= VM_PROT_WRITE; | prot |= VM_PROT_WRITE; | ||||
holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; | holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; | ||||
user_wire = (flags & VM_MAP_WIRE_USER) != 0; | user_wire = (flags & VM_MAP_WIRE_USER) != 0; | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
if (!vm_map_lookup_entry(map, start, &first_entry)) { | prev = vm_map_lookup_entry_and_prev(map, start, &entry); | ||||
if (holes_ok) | if (prev != entry && entry->start > start && !holes_ok) | ||||
first_entry = first_entry->next; | |||||
else | |||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
} | |||||
first_iteration = true; | first_iteration = true; | ||||
entry = first_entry; | |||||
while (entry->start < end) { | while (entry->start < end) { | ||||
if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { | if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { | ||||
/* | /* | ||||
* We have not yet clipped the entry. | * We have not yet clipped the entry. | ||||
*/ | */ | ||||
entry = vm_map_entry_in_transition(map, start, &end, | entry = vm_map_entry_in_transition(map, start, &end, | ||||
holes_ok, entry); | holes_ok, entry); | ||||
if (entry == NULL) { | if (entry == NULL) { | ||||
if (first_iteration) | if (first_iteration) | ||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
goto done; | goto done; | ||||
} | } | ||||
first_entry = first_iteration ? entry : NULL; | prev = NULL; | ||||
continue; | continue; | ||||
} | } | ||||
first_iteration = false; | first_iteration = false; | ||||
if (prev == entry) | |||||
prev = _vm_map_clip_start(map, entry, start); | |||||
else | |||||
vm_map_clip_start(map, entry, start); | vm_map_clip_start(map, entry, start); | ||||
vm_map_clip_end(map, entry, end); | vm_map_clip_end(map, entry, end); | ||||
/* | /* | ||||
* Mark the entry in case the map lock is released. (See | * Mark the entry in case the map lock is released. (See | ||||
* above.) | * above.) | ||||
*/ | */ | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | ||||
entry->wiring_thread == NULL, | entry->wiring_thread == NULL, | ||||
("owned map entry %p", entry)); | ("%s: owned map entry %p", __func__, entry)); | ||||
entry->eflags |= MAP_ENTRY_IN_TRANSITION; | entry->eflags |= MAP_ENTRY_IN_TRANSITION; | ||||
entry->wiring_thread = curthread; | entry->wiring_thread = curthread; | ||||
if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 | if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 | ||||
|| (entry->protection & prot) != prot) { | || (entry->protection & prot) != prot) { | ||||
entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; | entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; | ||||
if (!holes_ok) { | if (!holes_ok) { | ||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
Show All 38 Lines | if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 | ||||
* Look again for the entry because the map was | * Look again for the entry because the map was | ||||
* modified while it was unlocked. The entry | * modified while it was unlocked. The entry | ||||
* may have been clipped, but NOT merged or | * may have been clipped, but NOT merged or | ||||
* deleted. | * deleted. | ||||
*/ | */ | ||||
if (!vm_map_lookup_entry(map, saved_start, | if (!vm_map_lookup_entry(map, saved_start, | ||||
&tmp_entry)) | &tmp_entry)) | ||||
KASSERT(false, | KASSERT(false, | ||||
("vm_map_wire: lookup failed")); | ("%s: lookup failed", __func__)); | ||||
if (entry == first_entry) | prev = NULL; | ||||
first_entry = tmp_entry; | |||||
else | |||||
first_entry = NULL; | |||||
entry = tmp_entry; | entry = tmp_entry; | ||||
while (entry->end < saved_end) { | while (entry->end < saved_end) { | ||||
/* | /* | ||||
* In case of failure, handle entries | * In case of failure, handle entries | ||||
* that were not fully wired here; | * that were not fully wired here; | ||||
* fully wired entries are handled | * fully wired entries are handled | ||||
* later. | * later. | ||||
*/ | */ | ||||
Show All 25 Lines | if (!holes_ok && | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
goto done; | goto done; | ||||
} | } | ||||
entry = entry->next; | entry = entry->next; | ||||
} | } | ||||
rv = KERN_SUCCESS; | rv = KERN_SUCCESS; | ||||
done: | done: | ||||
need_wakeup = false; | need_wakeup = false; | ||||
if (first_entry == NULL && | if (prev == NULL) { | ||||
!vm_map_lookup_entry(map, start, &first_entry)) { | prev = vm_map_lookup_entry_and_prev(map, start, &entry); | ||||
KASSERT(holes_ok, ("vm_map_wire: lookup failed")); | KASSERT(entry->start == start || holes_ok, | ||||
first_entry = first_entry->next; | ("%s: lookup failed", __func__)); | ||||
} | } | ||||
for (entry = first_entry; entry->start < end; entry = entry->next) { | for (entry = prev->next; entry->start < end; | ||||
prev = entry, entry = entry->next) { | |||||
/* | /* | ||||
* If holes_ok was specified, an empty | * If holes_ok was specified, an empty | ||||
* space in the unwired region could have been mapped | * space in the unwired region could have been mapped | ||||
* while the map lock was dropped for faulting in the | * while the map lock was dropped for faulting in the | ||||
* pages or draining MAP_ENTRY_IN_TRANSITION. | * pages or draining MAP_ENTRY_IN_TRANSITION. | ||||
* Moreover, another thread could be simultaneously | * Moreover, another thread could be simultaneously | ||||
* wiring this new mapping entry. Detect these cases | * wiring this new mapping entry. Detect these cases | ||||
* and skip any entries marked as in transition not by us. | * and skip any entries marked as in transition not by us. | ||||
*/ | */ | ||||
if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | ||||
entry->wiring_thread != curthread) { | entry->wiring_thread != curthread) { | ||||
KASSERT(holes_ok, | KASSERT(holes_ok, | ||||
("vm_map_wire: !HOLESOK and new/changed entry")); | ("%s: !HOLESOK and new/changed entry", __func__)); | ||||
continue; | continue; | ||||
} | } | ||||
if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { | if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { | ||||
/* do nothing */ | /* do nothing */ | ||||
} else if (rv == KERN_SUCCESS) { | } else if (rv == KERN_SUCCESS) { | ||||
if (user_wire) | if (user_wire) | ||||
entry->eflags |= MAP_ENTRY_USER_WIRED; | entry->eflags |= MAP_ENTRY_USER_WIRED; | ||||
Show All 13 Lines | if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { | ||||
vm_map_entry_unwire(map, entry); | vm_map_entry_unwire(map, entry); | ||||
if (user_wire) | if (user_wire) | ||||
vm_map_wire_user_count_sub( | vm_map_wire_user_count_sub( | ||||
atop(entry->end - entry->start)); | atop(entry->end - entry->start)); | ||||
} else | } else | ||||
entry->wired_count--; | entry->wired_count--; | ||||
} | } | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | ||||
("vm_map_wire: in-transition flag missing %p", entry)); | ("%s: in-transition flag missing %p", __func__, entry)); | ||||
KASSERT(entry->wiring_thread == curthread, | KASSERT(entry->wiring_thread == curthread, | ||||
("vm_map_wire: alien wire %p", entry)); | ("%s: alien wire %p", __func__, entry)); | ||||
entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | | entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | | ||||
MAP_ENTRY_WIRE_SKIPPED); | MAP_ENTRY_WIRE_SKIPPED); | ||||
entry->wiring_thread = NULL; | entry->wiring_thread = NULL; | ||||
if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | ||||
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | ||||
need_wakeup = true; | need_wakeup = true; | ||||
} | } | ||||
vm_map_simplify_entry(map, entry); | vm_map_try_merge_entries(map, prev, entry); | ||||
} | } | ||||
vm_map_try_merge_entries(map, prev, entry); | |||||
if (need_wakeup) | if (need_wakeup) | ||||
vm_map_wakeup(map); | vm_map_wakeup(map); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_sync | * vm_map_sync | ||||
* | * | ||||
▲ Show 20 Lines • Show All 621 Lines • ▼ Show 20 Lines | case VM_INHERIT_SHARE: | ||||
&old_entry->offset, | &old_entry->offset, | ||||
old_entry->end - old_entry->start); | old_entry->end - old_entry->start); | ||||
old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; | old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; | ||||
/* Transfer the second reference too. */ | /* Transfer the second reference too. */ | ||||
vm_object_reference( | vm_object_reference( | ||||
old_entry->object.vm_object); | old_entry->object.vm_object); | ||||
/* | /* | ||||
* As in vm_map_simplify_entry(), the | * As in vm_map_merged_neighbor_dispose(), | ||||
* vnode lock will not be acquired in | * the vnode lock will not be acquired in | ||||
* this call to vm_object_deallocate(). | * this call to vm_object_deallocate(). | ||||
*/ | */ | ||||
vm_object_deallocate(object); | vm_object_deallocate(object); | ||||
object = old_entry->object.vm_object; | object = old_entry->object.vm_object; | ||||
} | } | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
vm_object_clear_flag(object, OBJ_ONEMAPPING); | vm_object_clear_flag(object, OBJ_ONEMAPPING); | ||||
if (old_entry->cred != NULL) { | if (old_entry->cred != NULL) { | ||||
▲ Show 20 Lines • Show All 946 Lines • Show Last 20 Lines |
The clause beginning with "and" seems wrong. Specifically, it has the "direction" of the merge backwards.