Changeset View
Standalone View
vm_map.c
Show First 20 Lines • Show All 2,833 Lines • ▼ Show 20 Lines | while (entry->start < end) { | ||||
vm_map_simplify_entry(map, entry); | vm_map_simplify_entry(map, entry); | ||||
entry = entry->next; | entry = entry->next; | ||||
} | } | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_unwire: | * vm_map_get_avail_entry: | ||||
* | * | ||||
* Implements both kernel and user unwiring. | * Get the first entry not less than start, or entry->start, | ||||
alc: The following is true: "Release and reacquire the map lock." But, it misses the more important… | |||||
* once that entry is not in transition. Return NULL and | |||||
* set end if the entry comes after an unacceptible hole. | |||||
Done Inline ActionsTypo: "unacceptable" markj: Typo: "unacceptable" | |||||
*/ | */ | ||||
int | static vm_map_entry_t | ||||
Done Inline ActionsI would call it vm_map_entry_in_transition ... kib: I would call it vm_map_entry_in_transition ... | |||||
vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, | vm_map_get_avail_entry(vm_map_t map, vm_offset_t start, vm_offset_t *end, | ||||
int flags) | bool holes_ok, vm_map_entry_t entry, vm_map_entry_t *first_entry) | ||||
{ | { | ||||
vm_map_entry_t entry, first_entry, tmp_entry; | |||||
vm_offset_t saved_start; | |||||
unsigned int last_timestamp; | unsigned int last_timestamp; | ||||
int rv; | bool first_call; | ||||
boolean_t need_wakeup, result, user_unwire; | |||||
if (start == end) | first_call = entry == NULL; | ||||
Done Inline Actions... and asserted the map lock and MAP_ENTRY_IN_TRANSITION flag there. kib: ... and asserted the map lock and MAP_ENTRY_IN_TRANSITION flag there. | |||||
return (KERN_SUCCESS); | for (;;) { | ||||
user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; | if (entry == NULL) { | ||||
Done Inline ActionsI almost always dislike the reuse of the function arguments as locals. Would use of different name for the argument allow to eliminate first_call bool as well ? kib: I almost always dislike the reuse of the function arguments as locals. Would use of different… | |||||
vm_map_lock(map); | if (!vm_map_lookup_entry(map, start, &entry)) { | ||||
VM_MAP_RANGE_CHECK(map, start, end); | if (!holes_ok) { | ||||
if (!vm_map_lookup_entry(map, start, &first_entry)) { | *end = start; | ||||
if (flags & VM_MAP_WIRE_HOLESOK) | return (NULL); | ||||
first_entry = first_entry->next; | |||||
else { | |||||
vm_map_unlock(map); | |||||
return (KERN_INVALID_ADDRESS); | |||||
} | } | ||||
entry = entry->next; | |||||
} | } | ||||
Not Done Inline ActionsThis comment is wrong - the wait is not interruptible. I think the previous comment meant to suggest a possible improvement. markj: This comment is wrong - the wait is not interruptible. I think the previous comment meant to… | |||||
Not Done Inline ActionsThe comment lacks a question mark at the end, instead of dot. kib: The comment lacks a question mark at the end, instead of dot. | |||||
Not Done Inline Actions... and also why the comment appeared inside the body of an otherwise empty "if". alc: ... and also why the comment appeared inside the body of an otherwise empty "if". | |||||
Done Inline ActionsThe formatting and presentation of this comment has changed only as a result of reviewer feedback. I'm happy to leave it in whatever state pleases everyone else. I don't know what that state is. dougm: The formatting and presentation of this comment has changed only as a result of reviewer… | |||||
last_timestamp = map->timestamp; | } | ||||
entry = first_entry; | if (entry->start >= *end || | ||||
while (entry->start < end) { | (entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0) { | ||||
if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { | if (first_call) | ||||
*first_entry = entry; | |||||
return (entry); | |||||
} | |||||
/* | /* | ||||
* We have not yet clipped the entry. | * We have not yet clipped the entry. | ||||
*/ | */ | ||||
saved_start = (start >= entry->start) ? start : | start = ulmax(start, entry->start); | ||||
Done Inline Actionsthis assumes that vm_offset_t is same as unsigned long ? kib: this assumes that vm_offset_t is same as unsigned long ? | |||||
entry->start; | |||||
entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; | entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; | ||||
last_timestamp = map->timestamp; | |||||
if (vm_map_unlock_and_wait(map, 0)) { | if (vm_map_unlock_and_wait(map, 0)) { | ||||
/* | /* | ||||
* Allow interruption of user unwiring? | * Allow interruption of user unwiring? | ||||
*/ | */ | ||||
} | } | ||||
Done Inline ActionsI think if () proven to not be useful, I suggest removing it and converting the comment to one-liner. kib: I think if () proven to not be useful, I suggest removing it and converting the comment to one… | |||||
vm_map_lock(map); | vm_map_lock(map); | ||||
if (last_timestamp+1 != map->timestamp) { | if (last_timestamp+1 != map->timestamp) { | ||||
Done Inline ActionsFix style, '+' should have spaces around it. kib: Fix style, '+' should have spaces around it. | |||||
/* | /* | ||||
* Look again for the entry because the map was | * Look again for the entry because the map was | ||||
* modified while it was unlocked. | * modified while it was unlocked. Specifically, the | ||||
* Specifically, the entry may have been | * entry may have been clipped, merged, or deleted. | ||||
* clipped, merged, or deleted. | |||||
*/ | */ | ||||
if (!vm_map_lookup_entry(map, saved_start, | entry = NULL; | ||||
&tmp_entry)) { | } | ||||
if (flags & VM_MAP_WIRE_HOLESOK) | if (!first_call) | ||||
tmp_entry = tmp_entry->next; | *first_entry = NULL; | ||||
else { | } | ||||
if (saved_start == start) { | } | ||||
/* | /* | ||||
* First_entry has been deleted. | * vm_map_unwire: | ||||
* | |||||
* Implements both kernel and user unwiring. | |||||
*/ | */ | ||||
int | |||||
vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, | |||||
int flags) | |||||
{ | |||||
vm_map_entry_t entry, first_entry; | |||||
int rv; | |||||
boolean_t result; | |||||
bool holes_ok, need_wakeup, user_unwire; | |||||
if (start == end) | |||||
return (KERN_SUCCESS); | |||||
user_unwire = (flags & VM_MAP_WIRE_USER) != 0; | |||||
holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; | |||||
vm_map_lock(map); | |||||
VM_MAP_RANGE_CHECK(map, start, end); | |||||
entry = vm_map_get_avail_entry(map, start, &end, holes_ok, | |||||
NULL, &first_entry); | |||||
if (entry == NULL) { | |||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
} | } | ||||
end = saved_start; | rv = KERN_SUCCESS; | ||||
rv = KERN_INVALID_ADDRESS; | while (entry->start < end) { | ||||
goto done; | |||||
} | |||||
} | |||||
if (entry == first_entry) | |||||
first_entry = tmp_entry; | |||||
else | |||||
first_entry = NULL; | |||||
entry = tmp_entry; | |||||
} | |||||
last_timestamp = map->timestamp; | |||||
continue; | |||||
} | |||||
vm_map_clip_start(map, entry, start); | vm_map_clip_start(map, entry, start); | ||||
Not Done Inline ActionsI'm puzzled by this. There may be multiple entries within the range [start, end) and it appears that the flag first_pass is being set to false just because we are past the first entry. alc: I'm puzzled by this. There may be multiple entries within the range [start, end) and it… | |||||
Done Inline ActionsThat's right. That determines whether we trust the value of 'first_pass' or NULL it and force recalculation later. dougm: That's right. That determines whether we trust the value of 'first_pass' or NULL it and force… | |||||
Not Done Inline ActionsThen, perhaps, "pass" is the wrong word. I interpret a "pass" as completing iteration over all entries in the range [start, end). alc: Then, perhaps, "pass" is the wrong word. I interpret a "pass" as completing iteration over all… | |||||
vm_map_clip_end(map, entry, end); | vm_map_clip_end(map, entry, end); | ||||
/* | /* | ||||
* Mark the entry in case the map lock is released. (See | * Mark the entry in case the map lock is released. (See | ||||
* above.) | * above.) | ||||
*/ | */ | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | ||||
entry->wiring_thread == NULL, | entry->wiring_thread == NULL, | ||||
("owned map entry %p", entry)); | ("owned map entry %p", entry)); | ||||
entry->eflags |= MAP_ENTRY_IN_TRANSITION; | entry->eflags |= MAP_ENTRY_IN_TRANSITION; | ||||
entry->wiring_thread = curthread; | entry->wiring_thread = curthread; | ||||
/* | /* | ||||
* Check the map for holes in the specified region. | * Check the map for holes in the specified region. | ||||
* If VM_MAP_WIRE_HOLESOK was specified, skip this check. | * If VM_MAP_WIRE_HOLESOK was specified, skip this check. | ||||
*/ | */ | ||||
if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && | if ((!holes_ok) && | ||||
Done Inline ActionsExtra parentheses. markj: Extra parentheses. | |||||
(entry->end < end && entry->next->start > entry->end)) { | (entry->end < end && entry->next->start > entry->end)) { | ||||
Done Inline ActionsCommit this separately. You can change line split, moving the entry->end < end expression to the previous line. kib: Commit this separately. You can change line split, moving the `entry->end < end` expression to… | |||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
goto done; | break; | ||||
} | } | ||||
/* | /* | ||||
* If system unwiring, require that the entry is system wired. | * If system unwiring, require that the entry is system wired. | ||||
*/ | */ | ||||
if (!user_unwire && | if (!user_unwire && | ||||
vm_map_entry_system_wired_count(entry) == 0) { | vm_map_entry_system_wired_count(entry) == 0) { | ||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_INVALID_ARGUMENT; | rv = KERN_INVALID_ARGUMENT; | ||||
goto done; | break; | ||||
} | } | ||||
entry = entry->next; | entry = vm_map_get_avail_entry(map, start, &end, holes_ok, | ||||
entry->next, &first_entry); | |||||
if (entry == NULL) { | |||||
rv = KERN_INVALID_ADDRESS; | |||||
break; | |||||
} | } | ||||
rv = KERN_SUCCESS; | } | ||||
done: | need_wakeup = false; | ||||
need_wakeup = FALSE; | |||||
if (first_entry == NULL) { | if (first_entry == NULL) { | ||||
result = vm_map_lookup_entry(map, start, &first_entry); | result = vm_map_lookup_entry(map, start, &first_entry); | ||||
if (!result && (flags & VM_MAP_WIRE_HOLESOK)) | if (!result) { | ||||
KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); | |||||
Done Inline ActionsI think you can just write if (!vm_map_lookup_entry(...)) like you do elsewhere in this diff, and get rid of result. markj: I think you can just write `if (!vm_map_lookup_entry(...))` like you do elsewhere in this diff… | |||||
first_entry = first_entry->next; | first_entry = first_entry->next; | ||||
else | |||||
KASSERT(result, ("vm_map_unwire: lookup failed")); | |||||
} | } | ||||
} | |||||
for (entry = first_entry; entry->start < end; entry = entry->next) { | for (entry = first_entry; entry->start < end; entry = entry->next) { | ||||
/* | /* | ||||
* If VM_MAP_WIRE_HOLESOK was specified, an empty | * If VM_MAP_WIRE_HOLESOK was specified, an empty | ||||
* space in the unwired region could have been mapped | * space in the unwired region could have been mapped | ||||
* while the map lock was dropped for draining | * while the map lock was dropped for draining | ||||
* MAP_ENTRY_IN_TRANSITION. Moreover, another thread | * MAP_ENTRY_IN_TRANSITION. Moreover, another thread | ||||
* could be simultaneously wiring this new mapping | * could be simultaneously wiring this new mapping | ||||
* entry. Detect these cases and skip any entries | * entry. Detect these cases and skip any entries | ||||
* marked as in transition by us. | * marked as in transition by us. | ||||
*/ | */ | ||||
if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | ||||
entry->wiring_thread != curthread) { | entry->wiring_thread != curthread) { | ||||
KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, | KASSERT(holes_ok, | ||||
("vm_map_unwire: !HOLESOK and new/changed entry")); | ("vm_map_unwire: !HOLESOK and new/changed entry")); | ||||
continue; | continue; | ||||
} | } | ||||
if (rv == KERN_SUCCESS && (!user_unwire || | if (rv == KERN_SUCCESS && (!user_unwire || | ||||
(entry->eflags & MAP_ENTRY_USER_WIRED))) { | (entry->eflags & MAP_ENTRY_USER_WIRED))) { | ||||
if (entry->wired_count == 1) | if (entry->wired_count == 1) | ||||
vm_map_entry_unwire(map, entry); | vm_map_entry_unwire(map, entry); | ||||
else | else | ||||
entry->wired_count--; | entry->wired_count--; | ||||
if (user_unwire) | if (user_unwire) | ||||
entry->eflags &= ~MAP_ENTRY_USER_WIRED; | entry->eflags &= ~MAP_ENTRY_USER_WIRED; | ||||
} | } | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | ||||
("vm_map_unwire: in-transition flag missing %p", entry)); | ("vm_map_unwire: in-transition flag missing %p", entry)); | ||||
KASSERT(entry->wiring_thread == curthread, | KASSERT(entry->wiring_thread == curthread, | ||||
("vm_map_unwire: alien wire %p", entry)); | ("vm_map_unwire: alien wire %p", entry)); | ||||
entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; | entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; | ||||
entry->wiring_thread = NULL; | entry->wiring_thread = NULL; | ||||
if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | ||||
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | ||||
need_wakeup = TRUE; | need_wakeup = true; | ||||
} | } | ||||
vm_map_simplify_entry(map, entry); | vm_map_simplify_entry(map, entry); | ||||
} | } | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
if (need_wakeup) | if (need_wakeup) | ||||
vm_map_wakeup(map); | vm_map_wakeup(map); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 77 Lines • ▼ Show 20 Lines | |||||
int | int | ||||
vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) | vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) | ||||
{ | { | ||||
vm_map_entry_t entry, first_entry, tmp_entry; | vm_map_entry_t entry, first_entry, tmp_entry; | ||||
vm_offset_t faddr, saved_end, saved_start; | vm_offset_t faddr, saved_end, saved_start; | ||||
u_long npages; | u_long npages; | ||||
u_int last_timestamp; | u_int last_timestamp; | ||||
int rv; | int rv; | ||||
boolean_t need_wakeup, result, user_wire; | boolean_t result; | ||||
bool holes_ok, need_wakeup, user_wire; | |||||
vm_prot_t prot; | vm_prot_t prot; | ||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
prot = 0; | prot = 0; | ||||
if (flags & VM_MAP_WIRE_WRITE) | if (flags & VM_MAP_WIRE_WRITE) | ||||
prot |= VM_PROT_WRITE; | prot |= VM_PROT_WRITE; | ||||
user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; | user_wire = (flags & VM_MAP_WIRE_USER) != 0; | ||||
holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; | |||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
if (!vm_map_lookup_entry(map, start, &first_entry)) { | entry = vm_map_get_avail_entry(map, start, &end, holes_ok, | ||||
if (flags & VM_MAP_WIRE_HOLESOK) | NULL, &first_entry); | ||||
first_entry = first_entry->next; | if (entry == NULL) | ||||
else | vm_map_unlock(map); | ||||
return (KERN_INVALID_ADDRESS); | rv = KERN_SUCCESS; | ||||
} | |||||
last_timestamp = map->timestamp; | |||||
entry = first_entry; | |||||
while (entry->start < end) { | while (entry->start < end) { | ||||
if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { | |||||
/* | |||||
* We have not yet clipped the entry. | |||||
*/ | |||||
saved_start = (start >= entry->start) ? start : | |||||
entry->start; | |||||
entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; | |||||
if (vm_map_unlock_and_wait(map, 0)) { | |||||
/* | |||||
* Allow interruption of user wiring? | |||||
*/ | |||||
} | |||||
vm_map_lock(map); | |||||
if (last_timestamp + 1 != map->timestamp) { | |||||
/* | |||||
* Look again for the entry because the map was | |||||
* modified while it was unlocked. | |||||
* Specifically, the entry may have been | |||||
* clipped, merged, or deleted. | |||||
*/ | |||||
if (!vm_map_lookup_entry(map, saved_start, | |||||
&tmp_entry)) { | |||||
if (flags & VM_MAP_WIRE_HOLESOK) | |||||
tmp_entry = tmp_entry->next; | |||||
else { | |||||
if (saved_start == start) { | |||||
/* | |||||
* first_entry has been deleted. | |||||
*/ | |||||
return (KERN_INVALID_ADDRESS); | |||||
} | |||||
end = saved_start; | |||||
rv = KERN_INVALID_ADDRESS; | |||||
goto done; | |||||
} | |||||
} | |||||
if (entry == first_entry) | |||||
first_entry = tmp_entry; | |||||
else | |||||
first_entry = NULL; | |||||
entry = tmp_entry; | |||||
} | |||||
last_timestamp = map->timestamp; | |||||
continue; | |||||
} | |||||
vm_map_clip_start(map, entry, start); | vm_map_clip_start(map, entry, start); | ||||
vm_map_clip_end(map, entry, end); | vm_map_clip_end(map, entry, end); | ||||
/* | /* | ||||
* Mark the entry in case the map lock is released. (See | * Mark the entry in case the map lock is released. (See | ||||
* above.) | * above.) | ||||
*/ | */ | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | ||||
entry->wiring_thread == NULL, | entry->wiring_thread == NULL, | ||||
("owned map entry %p", entry)); | ("owned map entry %p", entry)); | ||||
entry->eflags |= MAP_ENTRY_IN_TRANSITION; | entry->eflags |= MAP_ENTRY_IN_TRANSITION; | ||||
entry->wiring_thread = curthread; | entry->wiring_thread = curthread; | ||||
if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 | if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 | ||||
|| (entry->protection & prot) != prot) { | || (entry->protection & prot) != prot) { | ||||
entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; | entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; | ||||
if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { | if (!holes_ok) { | ||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
goto done; | break; | ||||
} | } | ||||
goto next_entry; | } else if (entry->wired_count == 0) { | ||||
} | |||||
if (entry->wired_count == 0) { | |||||
entry->wired_count++; | entry->wired_count++; | ||||
npages = atop(entry->end - entry->start); | npages = atop(entry->end - entry->start); | ||||
if (user_wire && !vm_map_wire_user_count_add(npages)) { | if (user_wire && !vm_map_wire_user_count_add(npages)) { | ||||
vm_map_wire_entry_failure(map, entry, | vm_map_wire_entry_failure(map, entry, | ||||
entry->start); | entry->start); | ||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_RESOURCE_SHORTAGE; | rv = KERN_RESOURCE_SHORTAGE; | ||||
goto done; | break; | ||||
} | } | ||||
/* | /* | ||||
* Release the map lock, relying on the in-transition | * Release the map lock, relying on the in-transition | ||||
* mark. Mark the map busy for fork. | * mark. Mark the map busy for fork. | ||||
*/ | */ | ||||
saved_start = entry->start; | saved_start = entry->start; | ||||
saved_end = entry->end; | saved_end = entry->end; | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 | ||||
} | } | ||||
} | } | ||||
last_timestamp = map->timestamp; | last_timestamp = map->timestamp; | ||||
if (rv != KERN_SUCCESS) { | if (rv != KERN_SUCCESS) { | ||||
vm_map_wire_entry_failure(map, entry, faddr); | vm_map_wire_entry_failure(map, entry, faddr); | ||||
if (user_wire) | if (user_wire) | ||||
vm_map_wire_user_count_sub(npages); | vm_map_wire_user_count_sub(npages); | ||||
end = entry->end; | end = entry->end; | ||||
goto done; | break; | ||||
} | } | ||||
} else if (!user_wire || | } else if (!user_wire || | ||||
(entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { | (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { | ||||
entry->wired_count++; | entry->wired_count++; | ||||
} | } | ||||
/* | /* | ||||
* Check the map for holes in the specified region. | * Check the map for holes in the specified region. | ||||
* If VM_MAP_WIRE_HOLESOK was specified, skip this check. | * If VM_MAP_WIRE_HOLESOK was specified, skip this check. | ||||
*/ | */ | ||||
next_entry: | if (!holes_ok && | ||||
if ((flags & VM_MAP_WIRE_HOLESOK) == 0 && | |||||
entry->end < end && entry->next->start > entry->end) { | entry->end < end && entry->next->start > entry->end) { | ||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
goto done; | break; | ||||
} | } | ||||
entry = entry->next; | entry = vm_map_get_avail_entry(map, start, &end, holes_ok, | ||||
entry->next, &first_entry); | |||||
if (entry == NULL) { | |||||
rv = KERN_INVALID_ADDRESS; | |||||
break; | |||||
} | } | ||||
rv = KERN_SUCCESS; | } | ||||
done: | need_wakeup = false; | ||||
Done Inline ActionsWhen you break out of the loop due to tmp_entry == NULL and setting rv == KERN_INVALID_ADDRESS, wouldn't this assignment obliterate rv ? Before, that break was 'goto done'. kib: When you break out of the loop due to tmp_entry == NULL and setting rv == KERN_INVALID_ADDRESS… | |||||
need_wakeup = FALSE; | |||||
if (first_entry == NULL) { | if (first_entry == NULL) { | ||||
result = vm_map_lookup_entry(map, start, &first_entry); | result = vm_map_lookup_entry(map, start, &first_entry); | ||||
if (!result && (flags & VM_MAP_WIRE_HOLESOK)) | if (!result) { | ||||
KASSERT(holes_ok, ("vm_map_wire: lookup failed")); | |||||
first_entry = first_entry->next; | first_entry = first_entry->next; | ||||
else | |||||
KASSERT(result, ("vm_map_wire: lookup failed")); | |||||
} | } | ||||
} | |||||
for (entry = first_entry; entry->start < end; entry = entry->next) { | for (entry = first_entry; entry->start < end; entry = entry->next) { | ||||
/* | /* | ||||
* If VM_MAP_WIRE_HOLESOK was specified, an empty | * If VM_MAP_WIRE_HOLESOK was specified, an empty | ||||
* space in the unwired region could have been mapped | * space in the unwired region could have been mapped | ||||
* while the map lock was dropped for faulting in the | * while the map lock was dropped for faulting in the | ||||
* pages or draining MAP_ENTRY_IN_TRANSITION. | * pages or draining MAP_ENTRY_IN_TRANSITION. | ||||
* Moreover, another thread could be simultaneously | * Moreover, another thread could be simultaneously | ||||
* wiring this new mapping entry. Detect these cases | * wiring this new mapping entry. Detect these cases | ||||
* and skip any entries marked as in transition not by us. | * and skip any entries marked as in transition not by us. | ||||
*/ | */ | ||||
if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | ||||
entry->wiring_thread != curthread) { | entry->wiring_thread != curthread) { | ||||
KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, | KASSERT(holes_ok, | ||||
("vm_map_wire: !HOLESOK and new/changed entry")); | ("vm_map_wire: !HOLESOK and new/changed entry")); | ||||
continue; | continue; | ||||
} | } | ||||
if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) | if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { | ||||
goto next_entry_done; | /* do nothing */ | ||||
} else if (rv == KERN_SUCCESS) { | |||||
if (rv == KERN_SUCCESS) { | |||||
if (user_wire) | if (user_wire) | ||||
entry->eflags |= MAP_ENTRY_USER_WIRED; | entry->eflags |= MAP_ENTRY_USER_WIRED; | ||||
} else if (entry->wired_count == -1) { | } else if (entry->wired_count == -1) { | ||||
/* | /* | ||||
* Wiring failed on this entry. Thus, unwiring is | * Wiring failed on this entry. Thus, unwiring is | ||||
* unnecessary. | * unnecessary. | ||||
*/ | */ | ||||
entry->wired_count = 0; | entry->wired_count = 0; | ||||
} else if (!user_wire || | } else if (!user_wire || | ||||
(entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { | (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { | ||||
/* | /* | ||||
* Undo the wiring. Wiring succeeded on this entry | * Undo the wiring. Wiring succeeded on this entry | ||||
* but failed on a later entry. | * but failed on a later entry. | ||||
*/ | */ | ||||
if (entry->wired_count == 1) { | if (entry->wired_count == 1) { | ||||
vm_map_entry_unwire(map, entry); | vm_map_entry_unwire(map, entry); | ||||
if (user_wire) | if (user_wire) | ||||
vm_map_wire_user_count_sub( | vm_map_wire_user_count_sub( | ||||
atop(entry->end - entry->start)); | atop(entry->end - entry->start)); | ||||
} else | } else | ||||
entry->wired_count--; | entry->wired_count--; | ||||
} | } | ||||
next_entry_done: | |||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | ||||
("vm_map_wire: in-transition flag missing %p", entry)); | ("vm_map_wire: in-transition flag missing %p", entry)); | ||||
KASSERT(entry->wiring_thread == curthread, | KASSERT(entry->wiring_thread == curthread, | ||||
("vm_map_wire: alien wire %p", entry)); | ("vm_map_wire: alien wire %p", entry)); | ||||
entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | | entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | | ||||
MAP_ENTRY_WIRE_SKIPPED); | MAP_ENTRY_WIRE_SKIPPED); | ||||
entry->wiring_thread = NULL; | entry->wiring_thread = NULL; | ||||
if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | ||||
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | ||||
need_wakeup = TRUE; | need_wakeup = true; | ||||
} | } | ||||
vm_map_simplify_entry(map, entry); | vm_map_simplify_entry(map, entry); | ||||
} | } | ||||
if (need_wakeup) | if (need_wakeup) | ||||
vm_map_wakeup(map); | vm_map_wakeup(map); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 1,587 Lines • Show Last 20 Lines |
The following is true: "Release and reacquire the map lock." But, it misses the more important aspect of what's happening here. We are sleeping until the given map entry is no longer in transition.