Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_map.c
Show First 20 Lines • Show All 131 Lines • ▼ Show 20 Lines | |||||
static int vmspace_zinit(void *mem, int size, int flags); | static int vmspace_zinit(void *mem, int size, int flags); | ||||
static int vm_map_zinit(void *mem, int ize, int flags); | static int vm_map_zinit(void *mem, int ize, int flags); | ||||
static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, | static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, | ||||
vm_offset_t max); | vm_offset_t max); | ||||
static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); | static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); | ||||
static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); | static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); | ||||
static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); | static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); | ||||
static int vm_map_growstack(vm_map_t map, vm_offset_t addr, | static int vm_map_growstack(vm_map_t map, vm_offset_t addr, | ||||
vm_map_entry_t gap_entry); | vm_map_entry_t gap_entry, vm_map_entry_t stack_entry); | ||||
static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, | static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, | ||||
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); | vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
static void vm_map_zdtor(void *mem, int size, void *arg); | static void vm_map_zdtor(void *mem, int size, void *arg); | ||||
static void vmspace_zdtor(void *mem, int size, void *arg); | static void vmspace_zdtor(void *mem, int size, void *arg); | ||||
#endif | #endif | ||||
static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, | static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, | ||||
vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, | vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, | ||||
▲ Show 20 Lines • Show All 747 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Initialize an existing vm_map structure | * Initialize an existing vm_map structure | ||||
* such as that in the vmspace structure. | * such as that in the vmspace structure. | ||||
*/ | */ | ||||
static void | static void | ||||
_vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) | _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) | ||||
{ | { | ||||
map->header.next = map->header.prev = &map->header; | map->header.next = &map->header; | ||||
map->header.eflags = MAP_ENTRY_HEADER; | map->header.eflags = MAP_ENTRY_HEADER; | ||||
map->needs_wakeup = FALSE; | map->needs_wakeup = FALSE; | ||||
map->system_map = 0; | map->system_map = 0; | ||||
map->pmap = pmap; | map->pmap = pmap; | ||||
map->header.end = min; | map->header.end = min; | ||||
map->header.start = max; | map->header.start = max; | ||||
map->flags = 0; | map->flags = 0; | ||||
map->root = NULL; | map->root = NULL; | ||||
▲ Show 20 Lines • Show All 73 Lines • ▼ Show 20 Lines | |||||
static inline vm_size_t | static inline vm_size_t | ||||
vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor) | vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor) | ||||
{ | { | ||||
return (root->right != NULL ? | return (root->right != NULL ? | ||||
root->right->max_free : right_ancestor->start - root->end); | root->right->max_free : right_ancestor->start - root->end); | ||||
} | } | ||||
/* | |||||
* vm_map_splay_split, vm_map_splay_merge: | |||||
* | |||||
* The Sleator and Tarjan top-down splay algorithm with the following | |||||
* variation. Max_free must be computed bottom-up, so on the downward | |||||
* pass (vm_map_splay_split), maintain the left and right spines in | |||||
* reverse order, and ensure that the max_free values for those nodes | |||||
* store the values of their descendents not on the search path. Later, | |||||
* make a second pass up each side (vm_map_splay_merge) to fix the | |||||
* pointers and compute max_free. The time bound is O(log n) amortized. | |||||
*/ | |||||
#define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ | #define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ | ||||
vm_size_t max_free; \ | vm_size_t max_free; \ | ||||
\ | \ | ||||
/* \ | /* \ | ||||
* Infer root->right->max_free == root->max_free when \ | * Infer root->right->max_free == root->max_free when \ | ||||
* y->max_free < root->max_free || root->max_free == 0. \ | * y->max_free < root->max_free || root->max_free == 0. \ | ||||
* Otherwise, look right to find it. \ | * Otherwise, look right to find it. \ | ||||
*/ \ | */ \ | ||||
▲ Show 20 Lines • Show All 167 Lines • ▼ Show 20 Lines | if (rlist != &map->header) { | ||||
} while (rlist != &map->header); | } while (rlist != &map->header); | ||||
root->right = prev; | root->right = prev; | ||||
} | } | ||||
root->max_free = MAX(max_free_left, max_free_right); | root->max_free = MAX(max_free_left, max_free_right); | ||||
map->root = root; | map->root = root; | ||||
} | } | ||||
/* | /* | ||||
* vm_map_splay: | |||||
* | |||||
* The Sleator and Tarjan top-down splay algorithm with the | |||||
* following variation. Max_free must be computed bottom-up, so | |||||
* on the downward pass, maintain the left and right spines in | |||||
* reverse order. Then, make a second pass up each side to fix | |||||
* the pointers and compute max_free. The time bound is O(log n) | |||||
* amortized. | |||||
* | |||||
* The new root is the vm_map_entry containing "addr", or else an | |||||
* adjacent entry (lower if possible) if addr is not in the tree. | |||||
* | |||||
* The map must be locked, and leaves it so. | |||||
* | |||||
* Returns: the new root. | |||||
*/ | |||||
static vm_map_entry_t | |||||
vm_map_splay(vm_map_t map, vm_offset_t addr) | |||||
{ | |||||
vm_map_entry_t llist, rlist, root; | |||||
root = vm_map_splay_split(map, addr, 0, &llist, &rlist); | |||||
if (root != NULL) { | |||||
/* do nothing */ | |||||
} else if (llist != &map->header) { | |||||
/* | |||||
* Recover the greatest node in the left | |||||
* subtree and make it the root. | |||||
*/ | |||||
root = llist; | |||||
llist = root->right; | |||||
root->right = NULL; | |||||
} else if (rlist != &map->header) { | |||||
/* | |||||
* Recover the least node in the right | |||||
* subtree and make it the root. | |||||
*/ | |||||
root = rlist; | |||||
rlist = root->left; | |||||
root->left = NULL; | |||||
} else { | |||||
/* There is no root. */ | |||||
return (NULL); | |||||
} | |||||
vm_map_splay_merge(map, root, llist, rlist); | |||||
VM_MAP_ASSERT_CONSISTENT(map); | |||||
return (root); | |||||
} | |||||
/* | |||||
* vm_map_entry_{un,}link: | * vm_map_entry_{un,}link: | ||||
* | * | ||||
* Insert/remove entries from maps. | * Insert/remove entries from maps. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) | vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) | ||||
{ | { | ||||
vm_map_entry_t llist, rlist, root; | vm_map_entry_t llist, rlist, root; | ||||
CTR3(KTR_VM, | CTR3(KTR_VM, | ||||
"vm_map_entry_link: map %p, nentries %d, entry %p", map, | "vm_map_entry_link: map %p, nentries %d, entry %p", map, | ||||
map->nentries, entry); | map->nentries, entry); | ||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
map->nentries++; | map->nentries++; | ||||
root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); | root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); | ||||
KASSERT(root == NULL, | KASSERT(root == NULL, | ||||
("vm_map_entry_link: link object already mapped")); | ("vm_map_entry_link: link object already mapped")); | ||||
entry->prev = llist; | llist->next = entry; | ||||
entry->next = rlist; | entry->next = rlist; | ||||
llist->next = rlist->prev = entry; | |||||
entry->left = entry->right = NULL; | entry->left = entry->right = NULL; | ||||
vm_map_splay_merge(map, entry, llist, rlist); | vm_map_splay_merge(map, entry, llist, rlist); | ||||
VM_MAP_ASSERT_CONSISTENT(map); | VM_MAP_ASSERT_CONSISTENT(map); | ||||
} | } | ||||
enum unlink_merge_type { | enum unlink_merge_type { | ||||
UNLINK_MERGE_PREV, | |||||
UNLINK_MERGE_NONE, | UNLINK_MERGE_NONE, | ||||
UNLINK_MERGE_NEXT | UNLINK_MERGE_NEXT | ||||
}; | }; | ||||
static void | static void | ||||
vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, | vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, | ||||
enum unlink_merge_type op) | enum unlink_merge_type op) | ||||
{ | { | ||||
vm_map_entry_t llist, rlist, root, y; | vm_map_entry_t llist, rlist, root; | ||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); | root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); | ||||
KASSERT(root != NULL, | KASSERT(root != NULL, | ||||
("vm_map_entry_unlink: unlink object not mapped")); | ("vm_map_entry_unlink: unlink object not mapped")); | ||||
switch (op) { | |||||
case UNLINK_MERGE_PREV: | |||||
vm_map_splay_findprev(root, &llist); | vm_map_splay_findprev(root, &llist); | ||||
llist->end = root->end; | |||||
y = root->right; | |||||
root = llist; | |||||
llist = root->right; | |||||
root->right = y; | |||||
break; | |||||
case UNLINK_MERGE_NEXT: | |||||
vm_map_splay_findnext(root, &rlist); | vm_map_splay_findnext(root, &rlist); | ||||
llist->next = rlist; | |||||
switch (op) { | |||||
case UNLINK_MERGE_NEXT: | |||||
rlist->start = root->start; | rlist->start = root->start; | ||||
rlist->offset = root->offset; | rlist->offset = root->offset; | ||||
y = root->left; | |||||
root = rlist; | root = rlist; | ||||
rlist = root->left; | rlist = root->left; | ||||
root->left = y; | root->left = NULL; | ||||
break; | break; | ||||
case UNLINK_MERGE_NONE: | case UNLINK_MERGE_NONE: | ||||
vm_map_splay_findprev(root, &llist); | |||||
vm_map_splay_findnext(root, &rlist); | |||||
if (llist != &map->header) { | if (llist != &map->header) { | ||||
root = llist; | root = llist; | ||||
llist = root->right; | llist = root->right; | ||||
root->right = NULL; | root->right = NULL; | ||||
} else if (rlist != &map->header) { | } else if (rlist != &map->header) { | ||||
root = rlist; | root = rlist; | ||||
rlist = root->left; | rlist = root->left; | ||||
root->left = NULL; | root->left = NULL; | ||||
} else | } else | ||||
root = NULL; | root = NULL; | ||||
break; | break; | ||||
} | } | ||||
y = entry->next; | |||||
y->prev = entry->prev; | |||||
y->prev->next = y; | |||||
if (root != NULL) | if (root != NULL) | ||||
vm_map_splay_merge(map, root, llist, rlist); | vm_map_splay_merge(map, root, llist, rlist); | ||||
else | else | ||||
map->root = NULL; | map->root = NULL; | ||||
VM_MAP_ASSERT_CONSISTENT(map); | VM_MAP_ASSERT_CONSISTENT(map); | ||||
map->nentries--; | map->nentries--; | ||||
CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, | CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, | ||||
map->nentries, entry); | map->nentries, entry); | ||||
Show All 21 Lines | vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount) | ||||
entry->end += grow_amount; | entry->end += grow_amount; | ||||
vm_map_splay_merge(map, root, llist, rlist); | vm_map_splay_merge(map, root, llist, rlist); | ||||
VM_MAP_ASSERT_CONSISTENT(map); | VM_MAP_ASSERT_CONSISTENT(map); | ||||
CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p", | CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p", | ||||
__func__, map, map->nentries, entry); | __func__, map, map->nentries, entry); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_lookup_entry: [ internal use only ] | * vm_map_lookup_helper: [ internal use only ] | ||||
* | * | ||||
* Finds the map entry containing (or | * Finds the map entry containing, or a map entry adjacent to, the | ||||
* immediately preceding) the specified address | * specified address in the given map. The boolean result indicates | ||||
* in the given map; the entry is returned | * whether a map entry contains the address. If no map entry contains the | ||||
* in the "entry" parameter. The boolean | * address, the parameter lesseq decides whether to provide the entry | ||||
* result indicates whether the address is | * before the address, or the one after it. | ||||
* actually contained in the map. | * | ||||
* The entry is returned in the "entry" parameter. If the address is | |||||
* contained in the map, and the parameter nbr is not NULL, then an | |||||
* adjacent map entry is saved in nbr. If ((*entry)->eflags & | |||||
* MAP_ENTRY_STACK_GAP_DN), then nbr stores the next entry, and otherwise | |||||
* stores the previous one. | |||||
*/ | */ | ||||
boolean_t | static bool | ||||
vm_map_lookup_entry( | vm_map_lookup_helper(vm_map_t map, vm_offset_t addr, bool lesseq, | ||||
vm_map_t map, | vm_map_entry_t *entry, vm_map_entry_t *nbr) | ||||
kib: OUT comment looks not very useful and somewhat cryptic now that the function definition is… | |||||
vm_offset_t address, | |||||
vm_map_entry_t *entry) /* OUT */ | |||||
{ | { | ||||
vm_map_entry_t cur, lbound; | vm_map_entry_t llist, rlist, root; | ||||
boolean_t locked; | bool locked, found; | ||||
/* | /* | ||||
* If the map is empty, then the map entry immediately preceding | * If the map is empty, then the map entry immediately preceding | ||||
* "address" is the map's header. | * "addr" is the map's header. | ||||
*/ | */ | ||||
cur = map->root; | root = map->root; | ||||
if (cur == NULL) { | if (root == NULL) { | ||||
*entry = &map->header; | *entry = &map->header; | ||||
return (FALSE); | return (false); | ||||
} | } | ||||
if (address >= cur->start && cur->end > address) { | |||||
*entry = cur; | |||||
return (TRUE); | |||||
} | |||||
if ((locked = vm_map_locked(map)) || | if ((locked = vm_map_locked(map)) || | ||||
sx_try_upgrade(&map->lock)) { | sx_try_upgrade(&map->lock)) { | ||||
alcUnsubmitted Done Inline ActionsPlease delete this blank line. alc: Please delete this blank line. | |||||
/* | /* | ||||
* Splay requires a write lock on the map. However, it only | * Splay requires a write lock on the map. However, it only | ||||
* restructures the binary search tree; it does not otherwise | * restructures the binary search tree; it does not otherwise | ||||
* change the map. Thus, the map's timestamp need not change | * change the map. Thus, the map's timestamp need not change | ||||
* on a temporary upgrade. | * on a temporary upgrade. | ||||
*/ | */ | ||||
cur = vm_map_splay(map, address); | root = vm_map_splay_split(map, addr, 0, &llist, &rlist); | ||||
if (!locked) | found = root != NULL; | ||||
sx_downgrade(&map->lock); | if (root != NULL) { | ||||
*entry = root; | |||||
if (nbr == NULL) | |||||
; /* Ignore. */ | |||||
alcUnsubmitted Done Inline ActionsStyle: Insufficient indentation alc: Style: Insufficient indentation | |||||
else if ((root->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { | |||||
vm_map_splay_findnext(root, &rlist); | |||||
root->right = NULL; | |||||
*nbr = rlist; | |||||
} else { | |||||
vm_map_splay_findprev(root, &llist); | |||||
root->left = NULL; | |||||
*nbr = llist; | |||||
} | |||||
} else if (llist != &map->header) { | |||||
/* | /* | ||||
* If "address" is contained within a map entry, the new root | * Recover the greatest node in the left | ||||
* is that map entry. Otherwise, the new root is a map entry | * subtree and make it the root. | ||||
* immediately before or after "address". | |||||
*/ | */ | ||||
if (address < cur->start) { | *entry = lesseq ? llist : rlist; | ||||
*entry = &map->header; | root = llist; | ||||
return (FALSE); | llist = root->right; | ||||
root->right = NULL; | |||||
} else { | |||||
/* | |||||
* Recover the least node in the right | |||||
* subtree and make it the root. | |||||
*/ | |||||
*entry = lesseq ? llist : rlist; | |||||
root = rlist; | |||||
rlist = root->left; | |||||
root->left = NULL; | |||||
} | } | ||||
*entry = cur; | vm_map_splay_merge(map, root, llist, rlist); | ||||
return (address < cur->end); | VM_MAP_ASSERT_CONSISTENT(map); | ||||
if (!locked) | |||||
sx_downgrade(&map->lock); | |||||
return (found); | |||||
} | } | ||||
/* | /* | ||||
* Since the map is only locked for read access, perform a | * Since the map is only locked for read access, perform a | ||||
* standard binary search tree lookup for "address". | * standard binary search tree lookup for "addr". | ||||
*/ | */ | ||||
lbound = &map->header; | llist = rlist = &map->header; | ||||
do { | do { | ||||
if (address < cur->start) { | if (addr < root->start) { | ||||
cur = cur->left; | rlist = root; | ||||
} else if (cur->end <= address) { | root = root->left; | ||||
lbound = cur; | } else if (root->end <= addr) { | ||||
cur = cur->right; | llist = root; | ||||
root = root->right; | |||||
} else { | } else { | ||||
*entry = cur; | *entry = root; | ||||
return (TRUE); | if (nbr == NULL) | ||||
; /* Ignore. */ | |||||
alcUnsubmitted Done Inline ActionsDitto alc: Ditto | |||||
else if ((root->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { | |||||
/* Make nbr the successor to root. */ | |||||
if (root->right != NULL) { | |||||
rlist = root->right; | |||||
while (rlist->left != NULL) | |||||
rlist = rlist->left; | |||||
} | } | ||||
} while (cur != NULL); | *nbr = rlist; | ||||
*entry = lbound; | } else { | ||||
return (FALSE); | /* Make nbr the predecessor to root. */ | ||||
if (root->left != NULL) { | |||||
llist = root->left; | |||||
while (llist->right != NULL) | |||||
llist = llist->right; | |||||
} | } | ||||
*nbr = llist; | |||||
} | |||||
return (true); | |||||
} | |||||
} while (root != NULL); | |||||
*entry = lesseq ? llist : rlist; | |||||
return (false); | |||||
} | |||||
bool | |||||
vm_map_lookup_entry(vm_map_t map, vm_offset_t addr, | |||||
vm_map_entry_t *entry) /* OUT */ | |||||
{ | |||||
return (vm_map_lookup_helper(map, addr, true, entry, NULL)); | |||||
} | |||||
static bool | |||||
vm_map_lookup_entry_ge(vm_map_t map, vm_offset_t addr, | |||||
vm_map_entry_t *entry) /* OUT */ | |||||
{ | |||||
return (vm_map_lookup_helper(map, addr, false, entry, NULL)); | |||||
} | |||||
/* | /* | ||||
* vm_map_insert: | * vm_map_insert: | ||||
* | * | ||||
* Inserts the given whole VM object into the target | * Inserts the given whole VM object into the target | ||||
* map at the specified address range. The object's | * map at the specified address range. The object's | ||||
* size should match that of the address range. | * size should match that of the address range. | ||||
* | * | ||||
* Requires that the map be locked, and leaves it so. | * Requires that the map be locked, and leaves it so. | ||||
▲ Show 20 Lines • Show All 745 Lines • ▼ Show 20 Lines | #define vm_map_clip_start(map, entry, startaddr) \ | ||||
if (startaddr > entry->start) \ | if (startaddr > entry->start) \ | ||||
_vm_map_clip_start(map, entry, startaddr); \ | _vm_map_clip_start(map, entry, startaddr); \ | ||||
} | } | ||||
/* | /* | ||||
* This routine is called only when it is known that | * This routine is called only when it is known that | ||||
* the entry must be split. | * the entry must be split. | ||||
*/ | */ | ||||
static void | static vm_map_entry_t | ||||
_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) | _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) | ||||
{ | { | ||||
vm_map_entry_t new_entry; | vm_map_entry_t new_entry; | ||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
KASSERT(entry->end > start && entry->start < start, | KASSERT(entry->end > start && entry->start < start, | ||||
("_vm_map_clip_start: invalid clip of entry %p", entry)); | ("_vm_map_clip_start: invalid clip of entry %p", entry)); | ||||
Show All 25 Lines | if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { | ||||
/* | /* | ||||
* The object->un_pager.vnp.writemappings for the | * The object->un_pager.vnp.writemappings for the | ||||
* object of MAP_ENTRY_WRITECNT type entry shall be | * object of MAP_ENTRY_WRITECNT type entry shall be | ||||
* kept as is here. The virtual pages are | * kept as is here. The virtual pages are | ||||
* re-distributed among the clipped entries, so the sum is | * re-distributed among the clipped entries, so the sum is | ||||
* left the same. | * left the same. | ||||
*/ | */ | ||||
} | } | ||||
return (new_entry); | |||||
} | } | ||||
/* | /* | ||||
* vm_map_clip_end: [ internal use only ] | * vm_map_clip_end: [ internal use only ] | ||||
* | * | ||||
* Asserts that the given entry ends at or before | * Asserts that the given entry ends at or before | ||||
* the specified address; if necessary, | * the specified address; if necessary, | ||||
* it splits the entry into two. | * it splits the entry into two. | ||||
▲ Show 20 Lines • Show All 77 Lines • ▼ Show 20 Lines | vm_map_submap( | ||||
vm_map_lock(submap); | vm_map_lock(submap); | ||||
submap->flags |= MAP_IS_SUB_MAP; | submap->flags |= MAP_IS_SUB_MAP; | ||||
vm_map_unlock(submap); | vm_map_unlock(submap); | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
if (vm_map_lookup_entry(map, start, &entry)) { | if (vm_map_lookup_entry_ge(map, start, &entry)) | ||||
vm_map_clip_start(map, entry, start); | vm_map_clip_start(map, entry, start); | ||||
} else | |||||
entry = entry->next; | |||||
vm_map_clip_end(map, entry, end); | vm_map_clip_end(map, entry, end); | ||||
if ((entry->start == start) && (entry->end == end) && | if ((entry->start == start) && (entry->end == end) && | ||||
((entry->eflags & MAP_ENTRY_COW) == 0) && | ((entry->eflags & MAP_ENTRY_COW) == 0) && | ||||
(entry->object.vm_object == NULL)) { | (entry->object.vm_object == NULL)) { | ||||
entry->object.sub_map = submap; | entry->object.sub_map = submap; | ||||
entry->eflags |= MAP_ENTRY_IS_SUB_MAP; | entry->eflags |= MAP_ENTRY_IS_SUB_MAP; | ||||
▲ Show 20 Lines • Show All 105 Lines • ▼ Show 20 Lines | vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, | ||||
} | } | ||||
if (p_start != NULL) | if (p_start != NULL) | ||||
pmap_enter_object(map->pmap, start, addr + ptoa(psize), | pmap_enter_object(map->pmap, start, addr + ptoa(psize), | ||||
p_start, prot); | p_start, prot); | ||||
VM_OBJECT_RUNLOCK(object); | VM_OBJECT_RUNLOCK(object); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_lookup_entry_and_prev: [ internal use only ] | |||||
* | |||||
* Finds the map entry that contains the predecessor to the given address, | |||||
* or is the precessor of the entry that starts with the given address. | |||||
alcUnsubmitted Done Inline Actions"precessor"? alc: "precessor"? | |||||
* Set entry to the map entry that will contain the given address after | |||||
* clipping. | |||||
*/ | |||||
static vm_map_entry_t | |||||
vm_map_lookup_entry_and_prev(vm_map_t map, vm_offset_t start, | |||||
vm_map_entry_t *entry) | |||||
{ | |||||
vm_map_entry_t prev; | |||||
if (start == 0) | |||||
prev = &map->header; | |||||
else | |||||
vm_map_lookup_entry(map, start - 1, &prev); | |||||
*entry = prev->end > start ? prev : prev->next; | |||||
return (prev); | |||||
} | |||||
/* | |||||
* vm_map_protect: | * vm_map_protect: | ||||
* | * | ||||
* Sets the protection of the specified address | * Sets the protection of the specified address | ||||
* region in the target map. If "set_max" is | * region in the target map. If "set_max" is | ||||
* specified, the maximum protection is to be set; | * specified, the maximum protection is to be set; | ||||
* otherwise, only the current protection is affected. | * otherwise, only the current protection is affected. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, | vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, | ||||
vm_prot_t new_prot, boolean_t set_max) | vm_prot_t new_prot, boolean_t set_max) | ||||
{ | { | ||||
vm_map_entry_t current, entry, in_tran; | vm_map_entry_t current, entry, in_tran, prev; | ||||
vm_object_t obj; | vm_object_t obj; | ||||
struct ucred *cred; | struct ucred *cred; | ||||
vm_prot_t old_prot; | vm_prot_t old_prot; | ||||
int rv; | int rv; | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
again: | again: | ||||
in_tran = NULL; | in_tran = NULL; | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
/* | /* | ||||
* Ensure that we are not concurrently wiring pages. vm_map_wire() may | * Ensure that we are not concurrently wiring pages. vm_map_wire() may | ||||
* need to fault pages into the map and will drop the map lock while | * need to fault pages into the map and will drop the map lock while | ||||
* doing so, and the VM object may end up in an inconsistent state if we | * doing so, and the VM object may end up in an inconsistent state if we | ||||
* update the protection on the map entry in between faults. | * update the protection on the map entry in between faults. | ||||
*/ | */ | ||||
vm_map_wait_busy(map); | vm_map_wait_busy(map); | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
prev = vm_map_lookup_entry_and_prev(map, start, &entry); | |||||
if (!vm_map_lookup_entry(map, start, &entry)) | |||||
entry = entry->next; | |||||
/* | /* | ||||
* Make a first pass to check for protection violations. | * Make a first pass to check for protection violations. | ||||
*/ | */ | ||||
for (current = entry; current->start < end; current = current->next) { | for (current = entry; current->start < end; current = current->next) { | ||||
if ((current->eflags & MAP_ENTRY_GUARD) != 0) | if ((current->eflags & MAP_ENTRY_GUARD) != 0) | ||||
continue; | continue; | ||||
if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { | if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
Show All 24 Lines | again: | ||||
/* | /* | ||||
* Before changing the protections, try to reserve swap space for any | * Before changing the protections, try to reserve swap space for any | ||||
* private (i.e., copy-on-write) mappings that are transitioning from | * private (i.e., copy-on-write) mappings that are transitioning from | ||||
* read-only to read/write access. If a reservation fails, break out | * read-only to read/write access. If a reservation fails, break out | ||||
* of this loop early and let the next loop simplify the entries, since | * of this loop early and let the next loop simplify the entries, since | ||||
* some may now be mergeable. | * some may now be mergeable. | ||||
*/ | */ | ||||
rv = KERN_SUCCESS; | rv = KERN_SUCCESS; | ||||
vm_map_clip_start(map, entry, start); | if (prev == entry) | ||||
prev = _vm_map_clip_start(map, entry, start); | |||||
for (current = entry; current->start < end; current = current->next) { | for (current = entry; current->start < end; current = current->next) { | ||||
vm_map_clip_end(map, current, end); | vm_map_clip_end(map, current, end); | ||||
if (set_max || | if (set_max || | ||||
((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || | ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || | ||||
ENTRY_CHARGED(current) || | ENTRY_CHARGED(current) || | ||||
(current->eflags & MAP_ENTRY_GUARD) != 0) { | (current->eflags & MAP_ENTRY_GUARD) != 0) { | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | again: | ||||
} | } | ||||
/* | /* | ||||
* If enough swap space was available, go back and fix up protections. | * If enough swap space was available, go back and fix up protections. | ||||
* Otherwise, just simplify entries, since some may have been modified. | * Otherwise, just simplify entries, since some may have been modified. | ||||
* [Note that clipping is not necessary the second time.] | * [Note that clipping is not necessary the second time.] | ||||
*/ | */ | ||||
for (current = entry; current->start < end; | for (current = entry; current->start < end; | ||||
vm_map_try_merge_entries(map, current->prev, current), | vm_map_try_merge_entries(map, prev, current), | ||||
current = current->next) { | prev = current, current = prev->next) { | ||||
if (rv != KERN_SUCCESS || | if (rv != KERN_SUCCESS || | ||||
(current->eflags & MAP_ENTRY_GUARD) != 0) | (current->eflags & MAP_ENTRY_GUARD) != 0) | ||||
continue; | continue; | ||||
old_prot = current->protection; | old_prot = current->protection; | ||||
if (set_max) | if (set_max) | ||||
current->protection = | current->protection = | ||||
Show All 21 Lines | |||||
#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ | #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ | ||||
VM_PROT_ALL) | VM_PROT_ALL) | ||||
pmap_protect(map->pmap, current->start, | pmap_protect(map->pmap, current->start, | ||||
current->end, | current->end, | ||||
current->protection & MASK(current)); | current->protection & MASK(current)); | ||||
#undef MASK | #undef MASK | ||||
} | } | ||||
} | } | ||||
vm_map_try_merge_entries(map, current->prev, current); | vm_map_try_merge_entries(map, prev, current); | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_madvise: | * vm_map_madvise: | ||||
* | * | ||||
* This routine traverses a processes map handling the madvise | * This routine traverses a processes map handling the madvise | ||||
* system call. Advisories are classified as either those effecting | * system call. Advisories are classified as either those effecting | ||||
* the vm_map_entry structure, or those effecting the underlying | * the vm_map_entry structure, or those effecting the underlying | ||||
* objects. | * objects. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_madvise( | vm_map_madvise( | ||||
vm_map_t map, | vm_map_t map, | ||||
vm_offset_t start, | vm_offset_t start, | ||||
vm_offset_t end, | vm_offset_t end, | ||||
int behav) | int behav) | ||||
{ | { | ||||
vm_map_entry_t current, entry; | vm_map_entry_t current, entry, prev; | ||||
bool modify_map; | bool modify_map; | ||||
/* | /* | ||||
* Some madvise calls directly modify the vm_map_entry, in which case | * Some madvise calls directly modify the vm_map_entry, in which case | ||||
* we need to use an exclusive lock on the map and we need to perform | * we need to use an exclusive lock on the map and we need to perform | ||||
* various clipping operations. Otherwise we only need a read-lock | * various clipping operations. Otherwise we only need a read-lock | ||||
* on the map. | * on the map. | ||||
*/ | */ | ||||
Show All 21 Lines | vm_map_madvise( | ||||
default: | default: | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
/* | /* | ||||
* Locate starting entry and clip if necessary. | * Locate starting entry and clip if necessary. | ||||
*/ | */ | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
prev = vm_map_lookup_entry_and_prev(map, start, &entry); | |||||
if (vm_map_lookup_entry(map, start, &entry)) { | |||||
if (modify_map) | |||||
vm_map_clip_start(map, entry, start); | |||||
} else { | |||||
entry = entry->next; | |||||
} | |||||
if (modify_map) { | if (modify_map) { | ||||
/* | /* | ||||
* madvise behaviors that are implemented in the vm_map_entry. | * madvise behaviors that are implemented in the vm_map_entry. | ||||
* | * | ||||
* We clip the vm_map_entry so that behavioral changes are | * We clip the vm_map_entry so that behavioral changes are | ||||
* limited to the specified address range. | * limited to the specified address range. | ||||
*/ | */ | ||||
if (prev == entry) | |||||
prev = _vm_map_clip_start(map, entry, start); | |||||
for (current = entry; current->start < end; | for (current = entry; current->start < end; | ||||
current = current->next) { | prev = current, current = current->next) { | ||||
if (current->eflags & MAP_ENTRY_IS_SUB_MAP) | if (current->eflags & MAP_ENTRY_IS_SUB_MAP) | ||||
continue; | continue; | ||||
vm_map_clip_end(map, current, end); | vm_map_clip_end(map, current, end); | ||||
switch (behav) { | switch (behav) { | ||||
case MADV_NORMAL: | case MADV_NORMAL: | ||||
vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); | vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); | ||||
Show All 14 Lines | for (current = entry; current->start < end; | ||||
current->eflags |= MAP_ENTRY_NOCOREDUMP; | current->eflags |= MAP_ENTRY_NOCOREDUMP; | ||||
break; | break; | ||||
case MADV_CORE: | case MADV_CORE: | ||||
current->eflags &= ~MAP_ENTRY_NOCOREDUMP; | current->eflags &= ~MAP_ENTRY_NOCOREDUMP; | ||||
break; | break; | ||||
default: | default: | ||||
break; | break; | ||||
} | } | ||||
vm_map_try_merge_entries(map, current->prev, current); | vm_map_try_merge_entries(map, prev, current); | ||||
} | } | ||||
vm_map_try_merge_entries(map, current->prev, current); | vm_map_try_merge_entries(map, prev, current); | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
} else { | } else { | ||||
vm_pindex_t pstart, pend; | vm_pindex_t pstart, pend; | ||||
/* | /* | ||||
* madvise behaviors that are implemented in the underlying | * madvise behaviors that are implemented in the underlying | ||||
* vm_object. | * vm_object. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 83 Lines • ▼ Show 20 Lines | |||||
* range in the target map. Inheritance | * range in the target map. Inheritance | ||||
* affects how the map will be shared with | * affects how the map will be shared with | ||||
* child maps at the time of vmspace_fork. | * child maps at the time of vmspace_fork. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, | vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, | ||||
vm_inherit_t new_inheritance) | vm_inherit_t new_inheritance) | ||||
{ | { | ||||
vm_map_entry_t entry; | vm_map_entry_t entry, prev; | ||||
vm_map_entry_t temp_entry; | |||||
switch (new_inheritance) { | switch (new_inheritance) { | ||||
case VM_INHERIT_NONE: | case VM_INHERIT_NONE: | ||||
case VM_INHERIT_COPY: | case VM_INHERIT_COPY: | ||||
case VM_INHERIT_SHARE: | case VM_INHERIT_SHARE: | ||||
case VM_INHERIT_ZERO: | case VM_INHERIT_ZERO: | ||||
break; | break; | ||||
default: | default: | ||||
return (KERN_INVALID_ARGUMENT); | return (KERN_INVALID_ARGUMENT); | ||||
} | } | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
if (vm_map_lookup_entry(map, start, &temp_entry)) { | prev = vm_map_lookup_entry_and_prev(map, start, &entry); | ||||
entry = temp_entry; | if (entry == prev) | ||||
vm_map_clip_start(map, entry, start); | prev = _vm_map_clip_start(map, entry, start); | ||||
} else | |||||
entry = temp_entry->next; | |||||
while (entry->start < end) { | while (entry->start < end) { | ||||
vm_map_clip_end(map, entry, end); | vm_map_clip_end(map, entry, end); | ||||
if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || | if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || | ||||
new_inheritance != VM_INHERIT_ZERO) | new_inheritance != VM_INHERIT_ZERO) | ||||
entry->inheritance = new_inheritance; | entry->inheritance = new_inheritance; | ||||
vm_map_try_merge_entries(map, entry->prev, entry); | vm_map_try_merge_entries(map, prev, entry); | ||||
prev = entry; | |||||
entry = entry->next; | entry = entry->next; | ||||
} | } | ||||
vm_map_try_merge_entries(map, entry->prev, entry); | vm_map_try_merge_entries(map, prev, entry); | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_entry_in_transition: | * vm_map_entry_in_transition: | ||||
* | * | ||||
* Release the map lock, and sleep until the entry is no longer in | * Release the map lock, and sleep until the entry is no longer in | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | |||||
* vm_map_unwire: | * vm_map_unwire: | ||||
* | * | ||||
* Implements both kernel and user unwiring. | * Implements both kernel and user unwiring. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, | vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, | ||||
int flags) | int flags) | ||||
{ | { | ||||
vm_map_entry_t entry, first_entry; | vm_map_entry_t entry, prev; | ||||
int rv; | int rv; | ||||
bool first_iteration, holes_ok, need_wakeup, user_unwire; | bool first_iteration, holes_ok, need_wakeup, user_unwire; | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; | holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; | ||||
user_unwire = (flags & VM_MAP_WIRE_USER) != 0; | user_unwire = (flags & VM_MAP_WIRE_USER) != 0; | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
if (!vm_map_lookup_entry(map, start, &first_entry)) { | prev = vm_map_lookup_entry_and_prev(map, start, &entry); | ||||
if (holes_ok) | if (prev != entry && entry->start > start && !holes_ok) { | ||||
first_entry = first_entry->next; | |||||
else { | |||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
} | } | ||||
} | |||||
first_iteration = true; | first_iteration = true; | ||||
entry = first_entry; | |||||
rv = KERN_SUCCESS; | rv = KERN_SUCCESS; | ||||
while (entry->start < end) { | while (entry->start < end) { | ||||
if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { | if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { | ||||
/* | /* | ||||
* We have not yet clipped the entry. | * We have not yet clipped the entry. | ||||
*/ | */ | ||||
entry = vm_map_entry_in_transition(map, start, &end, | entry = vm_map_entry_in_transition(map, start, &end, | ||||
holes_ok, entry); | holes_ok, entry); | ||||
if (entry == NULL) { | if (entry == NULL) { | ||||
if (first_iteration) { | if (first_iteration) { | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
} | } | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
break; | break; | ||||
} | } | ||||
first_entry = first_iteration ? entry : NULL; | prev = NULL; | ||||
continue; | continue; | ||||
} | } | ||||
first_iteration = false; | first_iteration = false; | ||||
if (prev == entry) | |||||
prev = _vm_map_clip_start(map, entry, start); | |||||
else | |||||
vm_map_clip_start(map, entry, start); | vm_map_clip_start(map, entry, start); | ||||
vm_map_clip_start(map, entry, start); | |||||
vm_map_clip_end(map, entry, end); | vm_map_clip_end(map, entry, end); | ||||
/* | /* | ||||
* Mark the entry in case the map lock is released. (See | * Mark the entry in case the map lock is released. (See | ||||
* above.) | * above.) | ||||
*/ | */ | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | ||||
entry->wiring_thread == NULL, | entry->wiring_thread == NULL, | ||||
("owned map entry %p", entry)); | ("%s: owned map entry %p", __func__, entry)); | ||||
entry->eflags |= MAP_ENTRY_IN_TRANSITION; | entry->eflags |= MAP_ENTRY_IN_TRANSITION; | ||||
entry->wiring_thread = curthread; | entry->wiring_thread = curthread; | ||||
/* | /* | ||||
* Check the map for holes in the specified region. | * Check the map for holes in the specified region. | ||||
* If holes_ok, skip this check. | * If holes_ok, skip this check. | ||||
*/ | */ | ||||
if (!holes_ok && | if (!holes_ok && | ||||
(entry->end < end && entry->next->start > entry->end)) { | (entry->end < end && entry->next->start > entry->end)) { | ||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
break; | break; | ||||
} | } | ||||
/* | /* | ||||
* If system unwiring, require that the entry is system wired. | * If system unwiring, require that the entry is system wired. | ||||
*/ | */ | ||||
if (!user_unwire && | if (!user_unwire && | ||||
vm_map_entry_system_wired_count(entry) == 0) { | vm_map_entry_system_wired_count(entry) == 0) { | ||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_INVALID_ARGUMENT; | rv = KERN_INVALID_ARGUMENT; | ||||
break; | break; | ||||
} | } | ||||
entry = entry->next; | entry = entry->next; | ||||
} | } | ||||
need_wakeup = false; | need_wakeup = false; | ||||
if (first_entry == NULL && | if (prev == NULL) { | ||||
!vm_map_lookup_entry(map, start, &first_entry)) { | prev = vm_map_lookup_entry_and_prev(map, start, &entry); | ||||
KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); | KASSERT(entry->start == start || holes_ok, | ||||
first_entry = first_entry->next; | ("%s: lookup failed", __func__)); | ||||
} | } | ||||
for (entry = first_entry; entry->start < end; entry = entry->next) { | for (entry = prev->next; entry->start < end; | ||||
prev = entry, entry = entry->next) { | |||||
/* | /* | ||||
* If holes_ok was specified, an empty | * If holes_ok was specified, an empty | ||||
* space in the unwired region could have been mapped | * space in the unwired region could have been mapped | ||||
* while the map lock was dropped for draining | * while the map lock was dropped for draining | ||||
* MAP_ENTRY_IN_TRANSITION. Moreover, another thread | * MAP_ENTRY_IN_TRANSITION. Moreover, another thread | ||||
* could be simultaneously wiring this new mapping | * could be simultaneously wiring this new mapping | ||||
* entry. Detect these cases and skip any entries | * entry. Detect these cases and skip any entries | ||||
* marked as in transition by us. | * marked as in transition by us. | ||||
*/ | */ | ||||
if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | ||||
entry->wiring_thread != curthread) { | entry->wiring_thread != curthread) { | ||||
KASSERT(holes_ok, | KASSERT(holes_ok, | ||||
("vm_map_unwire: !HOLESOK and new/changed entry")); | ("%s: !HOLESOK and new/changed entry", __func__)); | ||||
continue; | continue; | ||||
} | } | ||||
if (rv == KERN_SUCCESS && (!user_unwire || | if (rv == KERN_SUCCESS && (!user_unwire || | ||||
(entry->eflags & MAP_ENTRY_USER_WIRED))) { | (entry->eflags & MAP_ENTRY_USER_WIRED))) { | ||||
if (entry->wired_count == 1) | if (entry->wired_count == 1) | ||||
vm_map_entry_unwire(map, entry); | vm_map_entry_unwire(map, entry); | ||||
else | else | ||||
entry->wired_count--; | entry->wired_count--; | ||||
if (user_unwire) | if (user_unwire) | ||||
entry->eflags &= ~MAP_ENTRY_USER_WIRED; | entry->eflags &= ~MAP_ENTRY_USER_WIRED; | ||||
} | } | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | ||||
("vm_map_unwire: in-transition flag missing %p", entry)); | ("%s: in-transition flag missing %p", __func__, entry)); | ||||
KASSERT(entry->wiring_thread == curthread, | KASSERT(entry->wiring_thread == curthread, | ||||
("vm_map_unwire: alien wire %p", entry)); | ("%s: alien wire %p", __func__, entry)); | ||||
entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; | entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; | ||||
entry->wiring_thread = NULL; | entry->wiring_thread = NULL; | ||||
if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | ||||
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | ||||
need_wakeup = true; | need_wakeup = true; | ||||
} | } | ||||
vm_map_try_merge_entries(map, entry->prev, entry); | vm_map_try_merge_entries(map, prev, entry); | ||||
} | } | ||||
vm_map_try_merge_entries(map, entry->prev, entry); | vm_map_try_merge_entries(map, prev, entry); | ||||
vm_map_unlock(map); | vm_map_unlock(map); | ||||
if (need_wakeup) | if (need_wakeup) | ||||
vm_map_wakeup(map); | vm_map_wakeup(map); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
static void | static void | ||||
vm_map_wire_user_count_sub(u_long npages) | vm_map_wire_user_count_sub(u_long npages) | ||||
▲ Show 20 Lines • Show All 69 Lines • ▼ Show 20 Lines | |||||
* vm_map_wire_locked: | * vm_map_wire_locked: | ||||
* | * | ||||
* Implements both kernel and user wiring. Returns with the map locked, | * Implements both kernel and user wiring. Returns with the map locked, | ||||
* the map lock may be dropped. | * the map lock may be dropped. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) | vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) | ||||
{ | { | ||||
vm_map_entry_t entry, first_entry, tmp_entry; | vm_map_entry_t entry, prev, tmp_entry; | ||||
vm_offset_t faddr, saved_end, saved_start; | vm_offset_t faddr, saved_end, saved_start; | ||||
u_long npages; | u_long npages; | ||||
u_int last_timestamp; | u_int last_timestamp; | ||||
int rv; | int rv; | ||||
bool first_iteration, holes_ok, need_wakeup, user_wire; | bool first_iteration, holes_ok, need_wakeup, user_wire; | ||||
vm_prot_t prot; | vm_prot_t prot; | ||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
prot = 0; | prot = 0; | ||||
if (flags & VM_MAP_WIRE_WRITE) | if (flags & VM_MAP_WIRE_WRITE) | ||||
prot |= VM_PROT_WRITE; | prot |= VM_PROT_WRITE; | ||||
holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; | holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; | ||||
user_wire = (flags & VM_MAP_WIRE_USER) != 0; | user_wire = (flags & VM_MAP_WIRE_USER) != 0; | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
if (!vm_map_lookup_entry(map, start, &first_entry)) { | prev = vm_map_lookup_entry_and_prev(map, start, &entry); | ||||
if (holes_ok) | if (prev != entry && entry->start > start && !holes_ok) | ||||
first_entry = first_entry->next; | |||||
else | |||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
} | |||||
first_iteration = true; | first_iteration = true; | ||||
entry = first_entry; | |||||
while (entry->start < end) { | while (entry->start < end) { | ||||
if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { | if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { | ||||
/* | /* | ||||
* We have not yet clipped the entry. | * We have not yet clipped the entry. | ||||
*/ | */ | ||||
entry = vm_map_entry_in_transition(map, start, &end, | entry = vm_map_entry_in_transition(map, start, &end, | ||||
holes_ok, entry); | holes_ok, entry); | ||||
if (entry == NULL) { | if (entry == NULL) { | ||||
if (first_iteration) | if (first_iteration) | ||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
goto done; | goto done; | ||||
} | } | ||||
first_entry = first_iteration ? entry : NULL; | prev = NULL; | ||||
continue; | continue; | ||||
} | } | ||||
first_iteration = false; | first_iteration = false; | ||||
if (prev == entry) | |||||
prev = _vm_map_clip_start(map, entry, start); | |||||
else | |||||
vm_map_clip_start(map, entry, start); | vm_map_clip_start(map, entry, start); | ||||
vm_map_clip_end(map, entry, end); | vm_map_clip_end(map, entry, end); | ||||
/* | /* | ||||
* Mark the entry in case the map lock is released. (See | * Mark the entry in case the map lock is released. (See | ||||
* above.) | * above.) | ||||
*/ | */ | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && | ||||
entry->wiring_thread == NULL, | entry->wiring_thread == NULL, | ||||
("owned map entry %p", entry)); | ("%s: owned map entry %p", __func__, entry)); | ||||
entry->eflags |= MAP_ENTRY_IN_TRANSITION; | entry->eflags |= MAP_ENTRY_IN_TRANSITION; | ||||
entry->wiring_thread = curthread; | entry->wiring_thread = curthread; | ||||
if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 | if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 | ||||
|| (entry->protection & prot) != prot) { | || (entry->protection & prot) != prot) { | ||||
entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; | entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; | ||||
if (!holes_ok) { | if (!holes_ok) { | ||||
end = entry->end; | end = entry->end; | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
Show All 38 Lines | if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 | ||||
* Look again for the entry because the map was | * Look again for the entry because the map was | ||||
* modified while it was unlocked. The entry | * modified while it was unlocked. The entry | ||||
* may have been clipped, but NOT merged or | * may have been clipped, but NOT merged or | ||||
* deleted. | * deleted. | ||||
*/ | */ | ||||
if (!vm_map_lookup_entry(map, saved_start, | if (!vm_map_lookup_entry(map, saved_start, | ||||
&tmp_entry)) | &tmp_entry)) | ||||
KASSERT(false, | KASSERT(false, | ||||
("vm_map_wire: lookup failed")); | ("%s: lookup failed", __func__)); | ||||
if (entry == first_entry) | prev = NULL; | ||||
first_entry = tmp_entry; | |||||
else | |||||
first_entry = NULL; | |||||
entry = tmp_entry; | entry = tmp_entry; | ||||
while (entry->end < saved_end) { | while (entry->end < saved_end) { | ||||
/* | /* | ||||
* In case of failure, handle entries | * In case of failure, handle entries | ||||
* that were not fully wired here; | * that were not fully wired here; | ||||
* fully wired entries are handled | * fully wired entries are handled | ||||
* later. | * later. | ||||
*/ | */ | ||||
Show All 25 Lines | if (!holes_ok && | ||||
rv = KERN_INVALID_ADDRESS; | rv = KERN_INVALID_ADDRESS; | ||||
goto done; | goto done; | ||||
} | } | ||||
entry = entry->next; | entry = entry->next; | ||||
} | } | ||||
rv = KERN_SUCCESS; | rv = KERN_SUCCESS; | ||||
done: | done: | ||||
need_wakeup = false; | need_wakeup = false; | ||||
if (first_entry == NULL && | if (prev == NULL) { | ||||
!vm_map_lookup_entry(map, start, &first_entry)) { | prev = vm_map_lookup_entry_and_prev(map, start, &entry); | ||||
KASSERT(holes_ok, ("vm_map_wire: lookup failed")); | KASSERT(entry->start == start || holes_ok, | ||||
first_entry = first_entry->next; | ("%s: lookup failed", __func__)); | ||||
} | } | ||||
for (entry = first_entry; entry->start < end; entry = entry->next) { | for (entry = prev->next; entry->start < end; | ||||
prev = entry, entry = entry->next) { | |||||
/* | /* | ||||
* If holes_ok was specified, an empty | * If holes_ok was specified, an empty | ||||
* space in the unwired region could have been mapped | * space in the unwired region could have been mapped | ||||
* while the map lock was dropped for faulting in the | * while the map lock was dropped for faulting in the | ||||
* pages or draining MAP_ENTRY_IN_TRANSITION. | * pages or draining MAP_ENTRY_IN_TRANSITION. | ||||
* Moreover, another thread could be simultaneously | * Moreover, another thread could be simultaneously | ||||
* wiring this new mapping entry. Detect these cases | * wiring this new mapping entry. Detect these cases | ||||
* and skip any entries marked as in transition not by us. | * and skip any entries marked as in transition not by us. | ||||
*/ | */ | ||||
if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || | ||||
entry->wiring_thread != curthread) { | entry->wiring_thread != curthread) { | ||||
KASSERT(holes_ok, | KASSERT(holes_ok, | ||||
("vm_map_wire: !HOLESOK and new/changed entry")); | ("%s: !HOLESOK and new/changed entry", __func__)); | ||||
continue; | continue; | ||||
} | } | ||||
if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { | if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { | ||||
/* do nothing */ | /* do nothing */ | ||||
} else if (rv == KERN_SUCCESS) { | } else if (rv == KERN_SUCCESS) { | ||||
if (user_wire) | if (user_wire) | ||||
entry->eflags |= MAP_ENTRY_USER_WIRED; | entry->eflags |= MAP_ENTRY_USER_WIRED; | ||||
Show All 13 Lines | if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { | ||||
vm_map_entry_unwire(map, entry); | vm_map_entry_unwire(map, entry); | ||||
if (user_wire) | if (user_wire) | ||||
vm_map_wire_user_count_sub( | vm_map_wire_user_count_sub( | ||||
atop(entry->end - entry->start)); | atop(entry->end - entry->start)); | ||||
} else | } else | ||||
entry->wired_count--; | entry->wired_count--; | ||||
} | } | ||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, | ||||
("vm_map_wire: in-transition flag missing %p", entry)); | ("%s: in-transition flag missing %p", __func__, entry)); | ||||
KASSERT(entry->wiring_thread == curthread, | KASSERT(entry->wiring_thread == curthread, | ||||
("vm_map_wire: alien wire %p", entry)); | ("%s: alien wire %p", __func__, entry)); | ||||
entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | | entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | | ||||
MAP_ENTRY_WIRE_SKIPPED); | MAP_ENTRY_WIRE_SKIPPED); | ||||
entry->wiring_thread = NULL; | entry->wiring_thread = NULL; | ||||
if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { | ||||
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; | ||||
need_wakeup = true; | need_wakeup = true; | ||||
} | } | ||||
vm_map_try_merge_entries(map, entry->prev, entry); | vm_map_try_merge_entries(map, prev, entry); | ||||
} | } | ||||
vm_map_try_merge_entries(map, entry->prev, entry); | vm_map_try_merge_entries(map, prev, entry); | ||||
if (need_wakeup) | if (need_wakeup) | ||||
vm_map_wakeup(map); | vm_map_wakeup(map); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_sync | * vm_map_sync | ||||
* | * | ||||
Show All 26 Lines | vm_map_sync( | ||||
unsigned int last_timestamp; | unsigned int last_timestamp; | ||||
boolean_t failed; | boolean_t failed; | ||||
vm_map_lock_read(map); | vm_map_lock_read(map); | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
if (!vm_map_lookup_entry(map, start, &entry)) { | if (!vm_map_lookup_entry(map, start, &entry)) { | ||||
vm_map_unlock_read(map); | vm_map_unlock_read(map); | ||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
} else if (start == end) { | } | ||||
if (start == end) { | |||||
start = entry->start; | start = entry->start; | ||||
end = entry->end; | end = entry->end; | ||||
} | } | ||||
/* | /* | ||||
* Make a first pass to check for user-wired memory and holes. | * Make a first pass to check for user-wired memory and holes. | ||||
*/ | */ | ||||
for (current = entry; current->start < end; current = current->next) { | for (current = entry; current->start < end; current = current->next) { | ||||
if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { | if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { | ||||
Show All 38 Lines | for (current = entry; current->start < end;) { | ||||
vm_object_reference(object); | vm_object_reference(object); | ||||
last_timestamp = map->timestamp; | last_timestamp = map->timestamp; | ||||
vm_map_unlock_read(map); | vm_map_unlock_read(map); | ||||
if (!vm_object_sync(object, offset, size, syncio, invalidate)) | if (!vm_object_sync(object, offset, size, syncio, invalidate)) | ||||
failed = TRUE; | failed = TRUE; | ||||
start += size; | start += size; | ||||
vm_object_deallocate(object); | vm_object_deallocate(object); | ||||
vm_map_lock_read(map); | vm_map_lock_read(map); | ||||
if (last_timestamp == map->timestamp || | if (last_timestamp == map->timestamp) | ||||
!vm_map_lookup_entry(map, start, ¤t)) | |||||
current = current->next; | current = current->next; | ||||
else | |||||
vm_map_lookup_entry_ge(map, start, ¤t); | |||||
} | } | ||||
vm_map_unlock_read(map); | vm_map_unlock_read(map); | ||||
return (failed ? KERN_FAILURE : KERN_SUCCESS); | return (failed ? KERN_FAILURE : KERN_SUCCESS); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_entry_unwire: [ internal use only ] | * vm_map_entry_unwire: [ internal use only ] | ||||
▲ Show 20 Lines • Show All 116 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* Deallocates the given address range from the target | * Deallocates the given address range from the target | ||||
* map. | * map. | ||||
*/ | */ | ||||
int | int | ||||
vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) | vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) | ||||
{ | { | ||||
vm_map_entry_t entry; | vm_map_entry_t entry; | ||||
vm_map_entry_t first_entry; | |||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
/* | /* | ||||
* Find the start of the region, and clip it | * Find the start of the region, and clip it | ||||
*/ | */ | ||||
if (!vm_map_lookup_entry(map, start, &first_entry)) | if (vm_map_lookup_entry_ge(map, start, &entry)) | ||||
entry = first_entry->next; | |||||
else { | |||||
entry = first_entry; | |||||
vm_map_clip_start(map, entry, start); | vm_map_clip_start(map, entry, start); | ||||
} | |||||
/* | /* | ||||
* Step through all entries in this region | * Step through all entries in this region | ||||
*/ | */ | ||||
while (entry->start < end) { | while (entry->start < end) { | ||||
vm_map_entry_t next; | vm_map_entry_t next; | ||||
/* | /* | ||||
* Wait for wiring or unwiring of an entry to complete. | * Wait for wiring or unwiring of an entry to complete. | ||||
* Also wait for any system wirings to disappear on | * Also wait for any system wirings to disappear on | ||||
* user maps. | * user maps. | ||||
*/ | */ | ||||
if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || | if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || | ||||
(vm_map_pmap(map) != kernel_pmap && | (vm_map_pmap(map) != kernel_pmap && | ||||
vm_map_entry_system_wired_count(entry) != 0)) { | vm_map_entry_system_wired_count(entry) != 0)) { | ||||
unsigned int last_timestamp; | unsigned int last_timestamp; | ||||
vm_offset_t saved_start; | vm_offset_t saved_start; | ||||
vm_map_entry_t tmp_entry; | |||||
saved_start = entry->start; | saved_start = entry->start; | ||||
entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; | entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; | ||||
last_timestamp = map->timestamp; | last_timestamp = map->timestamp; | ||||
(void) vm_map_unlock_and_wait(map, 0); | (void) vm_map_unlock_and_wait(map, 0); | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
if (last_timestamp + 1 != map->timestamp) { | if (last_timestamp + 1 == map->timestamp) | ||||
continue; | |||||
/* | /* | ||||
* Look again for the entry because the map was | * Look again for the entry because the map was | ||||
* modified while it was unlocked. | * modified while it was unlocked. Specifically, the | ||||
* Specifically, the entry may have been | * entry may have been clipped, merged, or deleted. | ||||
* clipped, merged, or deleted. | |||||
*/ | */ | ||||
if (!vm_map_lookup_entry(map, saved_start, | if (vm_map_lookup_entry_ge(map, saved_start, &entry)) | ||||
&tmp_entry)) | vm_map_clip_start(map, entry, saved_start); | ||||
entry = tmp_entry->next; | |||||
else { | |||||
entry = tmp_entry; | |||||
vm_map_clip_start(map, entry, | |||||
saved_start); | |||||
} | |||||
} | |||||
continue; | continue; | ||||
} | } | ||||
vm_map_clip_end(map, entry, end); | vm_map_clip_end(map, entry, end); | ||||
next = entry->next; | next = entry->next; | ||||
/* | /* | ||||
* Unwire before removing addresses from the pmap; otherwise, | * Unwire before removing addresses from the pmap; otherwise, | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* The map must be locked. A read lock is sufficient. | * The map must be locked. A read lock is sufficient. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, | vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, | ||||
vm_prot_t protection) | vm_prot_t protection) | ||||
{ | { | ||||
vm_map_entry_t entry; | vm_map_entry_t entry; | ||||
vm_map_entry_t tmp_entry; | |||||
if (!vm_map_lookup_entry(map, start, &tmp_entry)) | if (!vm_map_lookup_entry(map, start, &entry)) | ||||
return (FALSE); | return (FALSE); | ||||
entry = tmp_entry; | |||||
while (start < end) { | while (start < end) { | ||||
/* | /* | ||||
* No holes allowed! | * No holes allowed! | ||||
*/ | */ | ||||
if (start < entry->start) | if (start < entry->start) | ||||
return (FALSE); | return (FALSE); | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 503 Lines • ▼ Show 20 Lines | vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, | ||||
if (rv == KERN_SUCCESS) { | if (rv == KERN_SUCCESS) { | ||||
/* | /* | ||||
* Gap can never successfully handle a fault, so | * Gap can never successfully handle a fault, so | ||||
* read-ahead logic is never used for it. Re-use | * read-ahead logic is never used for it. Re-use | ||||
* next_read of the gap entry to store | * next_read of the gap entry to store | ||||
* stack_guard_page for vm_map_growstack(). | * stack_guard_page for vm_map_growstack(). | ||||
*/ | */ | ||||
if (orient == MAP_STACK_GROWS_DOWN) | if (orient == MAP_STACK_GROWS_DOWN) | ||||
new_entry->prev->next_read = sgp; | prev_entry->next->next_read = sgp; | ||||
else | else | ||||
new_entry->next->next_read = sgp; | new_entry->next->next_read = sgp; | ||||
} else { | } else { | ||||
(void)vm_map_delete(map, bot, top); | (void)vm_map_delete(map, bot, top); | ||||
} | } | ||||
return (rv); | return (rv); | ||||
} | } | ||||
/* | /* | ||||
* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we | * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we | ||||
* successfully grow the stack. | * successfully grow the stack. | ||||
*/ | */ | ||||
static int | static int | ||||
vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) | vm_map_growstack(vm_map_t map, vm_offset_t addr, | ||||
vm_map_entry_t gap_entry, vm_map_entry_t stack_entry) | |||||
{ | { | ||||
vm_map_entry_t stack_entry; | |||||
struct proc *p; | struct proc *p; | ||||
struct vmspace *vm; | struct vmspace *vm; | ||||
struct ucred *cred; | struct ucred *cred; | ||||
vm_offset_t gap_end, gap_start, grow_start; | vm_offset_t gap_end, gap_start, grow_start; | ||||
vm_size_t grow_amount, guard, max_grow; | vm_size_t grow_amount, guard, max_grow; | ||||
rlim_t lmemlim, stacklim, vmemlim; | rlim_t lmemlim, stacklim, vmemlim; | ||||
int rv, rv1; | int rv, rv1; | ||||
bool gap_deleted, grow_down, is_procstack; | bool gap_deleted, grow_down, is_procstack; | ||||
Show All 18 Lines | #endif | ||||
MPASS(!map->system_map); | MPASS(!map->system_map); | ||||
lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); | lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); | ||||
stacklim = lim_cur(curthread, RLIMIT_STACK); | stacklim = lim_cur(curthread, RLIMIT_STACK); | ||||
vmemlim = lim_cur(curthread, RLIMIT_VMEM); | vmemlim = lim_cur(curthread, RLIMIT_VMEM); | ||||
retry: | retry: | ||||
/* If addr is not in a hole for a stack grow area, no need to grow. */ | /* If addr is not in a hole for a stack grow area, no need to grow. */ | ||||
if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) | if (gap_entry == NULL && | ||||
!vm_map_lookup_helper(map, addr, true, &gap_entry, &stack_entry)) | |||||
return (KERN_FAILURE); | return (KERN_FAILURE); | ||||
if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) | if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { | if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { | ||||
stack_entry = gap_entry->next; | |||||
if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || | if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || | ||||
stack_entry->start != gap_entry->end) | stack_entry->start != gap_entry->end) | ||||
return (KERN_FAILURE); | return (KERN_FAILURE); | ||||
grow_amount = round_page(stack_entry->start - addr); | grow_amount = round_page(stack_entry->start - addr); | ||||
grow_down = true; | grow_down = true; | ||||
} else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { | } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { | ||||
stack_entry = gap_entry->prev; | |||||
if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || | if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || | ||||
stack_entry->end != gap_entry->start) | stack_entry->end != gap_entry->start) | ||||
return (KERN_FAILURE); | return (KERN_FAILURE); | ||||
grow_amount = round_page(addr + 1 - stack_entry->end); | grow_amount = round_page(addr + 1 - stack_entry->end); | ||||
grow_down = false; | grow_down = false; | ||||
} else { | } else { | ||||
return (KERN_FAILURE); | return (KERN_FAILURE); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 259 Lines • ▼ Show 20 Lines | vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ | ||||
vm_offset_t vaddr, | vm_offset_t vaddr, | ||||
vm_prot_t fault_typea, | vm_prot_t fault_typea, | ||||
vm_map_entry_t *out_entry, /* OUT */ | vm_map_entry_t *out_entry, /* OUT */ | ||||
vm_object_t *object, /* OUT */ | vm_object_t *object, /* OUT */ | ||||
vm_pindex_t *pindex, /* OUT */ | vm_pindex_t *pindex, /* OUT */ | ||||
vm_prot_t *out_prot, /* OUT */ | vm_prot_t *out_prot, /* OUT */ | ||||
boolean_t *wired) /* OUT */ | boolean_t *wired) /* OUT */ | ||||
{ | { | ||||
vm_map_entry_t entry; | vm_map_entry_t entry, nbr; | ||||
vm_map_t map = *var_map; | vm_map_t map = *var_map; | ||||
vm_prot_t prot; | vm_prot_t prot; | ||||
vm_prot_t fault_type = fault_typea; | vm_prot_t fault_type = fault_typea; | ||||
vm_object_t eobject; | vm_object_t eobject; | ||||
vm_size_t size; | vm_size_t size; | ||||
struct ucred *cred; | struct ucred *cred; | ||||
RetryLookup: | RetryLookup: | ||||
vm_map_lock_read(map); | vm_map_lock_read(map); | ||||
RetryLookupLocked: | RetryLookupLocked: | ||||
/* | /* | ||||
* Lookup the faulting address. | * Lookup the faulting address. | ||||
*/ | */ | ||||
if (!vm_map_lookup_entry(map, vaddr, out_entry)) { | if (!vm_map_lookup_helper(map, vaddr, true, out_entry, &nbr)) { | ||||
vm_map_unlock_read(map); | vm_map_unlock_read(map); | ||||
return (KERN_INVALID_ADDRESS); | return (KERN_INVALID_ADDRESS); | ||||
} | } | ||||
entry = *out_entry; | entry = *out_entry; | ||||
/* | /* | ||||
* Handle submaps. | * Handle submaps. | ||||
Show All 11 Lines | RetryLookupLocked: | ||||
*/ | */ | ||||
prot = entry->protection; | prot = entry->protection; | ||||
if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { | if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { | ||||
fault_typea &= ~VM_PROT_FAULT_LOOKUP; | fault_typea &= ~VM_PROT_FAULT_LOOKUP; | ||||
if (prot == VM_PROT_NONE && map != kernel_map && | if (prot == VM_PROT_NONE && map != kernel_map && | ||||
(entry->eflags & MAP_ENTRY_GUARD) != 0 && | (entry->eflags & MAP_ENTRY_GUARD) != 0 && | ||||
(entry->eflags & (MAP_ENTRY_STACK_GAP_DN | | (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | | ||||
MAP_ENTRY_STACK_GAP_UP)) != 0 && | MAP_ENTRY_STACK_GAP_UP)) != 0 && | ||||
vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) | vm_map_growstack(map, vaddr, entry, nbr) == KERN_SUCCESS) | ||||
goto RetryLookupLocked; | goto RetryLookupLocked; | ||||
} | } | ||||
fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; | fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; | ||||
if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { | if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { | ||||
vm_map_unlock_read(map); | vm_map_unlock_read(map); | ||||
return (KERN_PROTECTION_FAILURE); | return (KERN_PROTECTION_FAILURE); | ||||
} | } | ||||
KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & | KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & | ||||
▲ Show 20 Lines • Show All 332 Lines • Show Last 20 Lines |
OUT comment looks not very useful and somewhat cryptic now that the function definition is styled. I think the best is to drop it.
Instead, it might make sense to split a new paragraph in the herald comment and explain both entry and nbr results (i.e. move the last sentence to it, add entry descr).