Changeset View
Standalone View
sys/vm/vm_map.c
Context not available. | |||||
* | * | ||||
* As mentioned above, virtual copy operations are performed | * As mentioned above, virtual copy operations are performed | ||||
* by copying VM object references from one map to | * by copying VM object references from one map to | ||||
* another, and then marking both regions as copy-on-write. | * another, and then marking both regions as copy-on-write. | ||||
*/ | */ | ||||
static struct mtx map_sleep_mtx; | static struct mtx map_sleep_mtx; | ||||
static uma_zone_t mapentzone; | static uma_zone_t mapentzone; | ||||
static uma_zone_t kmapentzone; | static uma_zone_t kmapentzone; | ||||
static uma_zone_t mapzone; | static uma_zone_t mapzone; | ||||
static uma_zone_t vmspace_zone; | static uma_zone_t vmspace_zone; | ||||
static int vmspace_zinit(void *mem, int size, int flags); | static int vmspace_zinit(void *mem, int size, int flags); | ||||
static int vm_map_zinit(void *mem, int ize, int flags); | static int vm_map_zinit(void *mem, int ize, int flags); | ||||
static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, | static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, | ||||
vm_offset_t max); | vm_offset_t max); | ||||
static int vm_map_alignspace(vm_map_t map, vm_object_t object, | |||||
vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length, | |||||
vm_offset_t max_addr, vm_offset_t alignment); | |||||
static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); | static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); | ||||
static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); | static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); | ||||
static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); | static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); | ||||
static int vm_map_growstack(vm_map_t map, vm_offset_t addr, | static int vm_map_growstack(vm_map_t map, vm_offset_t addr, | ||||
vm_map_entry_t gap_entry); | vm_map_entry_t gap_entry); | ||||
static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, | static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, | ||||
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); | vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
Show All 14 Lines | |||||
if (map->system_map) | if (map->system_map) | ||||
mtx_assert_(&map->system_mtx, MA_OWNED, file, line); | mtx_assert_(&map->system_mtx, MA_OWNED, file, line); | ||||
else | else | ||||
sx_assert_(&map->lock, SA_XLOCKED, file, line); | sx_assert_(&map->lock, SA_XLOCKED, file, line); | ||||
} | } | ||||
#define VM_MAP_ASSERT_LOCKED(map) \ | #define VM_MAP_ASSERT_LOCKED(map) \ | ||||
_vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) | _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) | ||||
static void | |||||
_vm_map_assert_consistent(vm_map_t map) | |||||
{ | |||||
vm_map_entry_t entry; | |||||
vm_map_entry_t child; | |||||
vm_size_t max_left, max_right; | |||||
for (entry = map->header.next; entry != &map->header; | |||||
entry = entry->next) { | |||||
KASSERT(entry->prev->end <= entry->start, | |||||
("map %p prev->end = %lu, start = %lu.", | |||||
kib: This formats %ul break on ILP32. Use %jx and cast args to uintmax_t. | |||||
Done Inline ActionsMinor nit: the convention is not to add a period at the end of panic string or assertion messages. markj: Minor nit: the convention is not to add a period at the end of panic string or assertion… | |||||
map, entry->prev->end, entry->start)); | |||||
KASSERT(entry->start < entry->end, | |||||
("map %p start = %lu, end = %lu.", | |||||
map, entry->start, entry->end)); | |||||
KASSERT(entry->end <= entry->next->start, | |||||
("map %p end = %lu, next->start = %lu.", | |||||
map, entry->end, entry->next->start)); | |||||
KASSERT(entry->left == NULL || | |||||
entry->left->start < entry->start, | |||||
("map %p left->start = %lu, start = %lu.", | |||||
map, entry->left->start, entry->start)); | |||||
KASSERT(entry->right == NULL || | |||||
entry->start < entry->right->start, | |||||
("map %p start = %lu, right->start = %lu.", | |||||
map, entry->start, entry->right->start)); | |||||
child = entry->left; | |||||
max_left = (child != NULL) ? child->max_free : | |||||
entry->start - entry->prev->end; | |||||
child = entry->right; | |||||
max_right = (child != NULL) ? child->max_free : | |||||
entry->next->start - entry->end; | |||||
KASSERT(entry->max_free == MAX(max_left, max_right), | |||||
("map %p max = %lu, max_left = %lu, max_right = %lu.", | |||||
map, entry->max_free, max_left, max_right)); | |||||
} | |||||
} | |||||
#define VM_MAP_ASSERT_CONSISTENT(map) \ | |||||
_vm_map_assert_consistent(map) | |||||
#else | #else | ||||
#define VM_MAP_ASSERT_LOCKED(map) | #define VM_MAP_ASSERT_LOCKED(map) | ||||
#define VM_MAP_ASSERT_CONSISTENT(map) | |||||
#endif | #endif | ||||
/* | /* | ||||
* _vm_map_unlock_and_wait: | * _vm_map_unlock_and_wait: | ||||
* | * | ||||
* Atomically releases the lock on the specified map and puts the calling | * Atomically releases the lock on the specified map and puts the calling | ||||
* thread to sleep. The calling thread will remain asleep until either | * thread to sleep. The calling thread will remain asleep until either | ||||
* vm_map_wakeup() is performed on the map or the specified timeout is | * vm_map_wakeup() is performed on the map or the specified timeout is | ||||
Show All 14 Lines | |||||
/* | /* | ||||
* vm_map_entry_set_max_free: | * vm_map_entry_set_max_free: | ||||
* | * | ||||
* Set the max_free field in a vm_map_entry. | * Set the max_free field in a vm_map_entry. | ||||
*/ | */ | ||||
static inline void | static inline void | ||||
vm_map_entry_set_max_free(vm_map_entry_t entry) | vm_map_entry_set_max_free(vm_map_entry_t entry) | ||||
{ | { | ||||
vm_map_entry_t child; | |||||
entry->max_free = entry->adj_free; | vm_size_t max_left, max_right; | ||||
if (entry->left != NULL && entry->left->max_free > entry->max_free) | |||||
entry->max_free = entry->left->max_free; | child = entry->left; | ||||
if (entry->right != NULL && entry->right->max_free > entry->max_free) | max_left = (child != NULL) ? child->max_free : | ||||
entry->max_free = entry->right->max_free; | entry->start - entry->prev->end; | ||||
child = entry->right; | |||||
max_right = (child != NULL) ? child->max_free : | |||||
entry->next->start - entry->end; | |||||
entry->max_free = MAX(max_left, max_right); | |||||
} | } | ||||
#define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ | |||||
y = root->left; \ | |||||
if (y != NULL && (test)) { \ | |||||
Done Inline ActionsThe split and merge in the name of this function and the function below refer to the llist and rlist, and not to the split of some entry ? It sounds strange to name them vm_map_entry_something. kib: The split and merge in the name of this function and the function below refer to the llist and… | |||||
Done Inline ActionsThe split function divides the tree into an llist of nodes less than addr, an rlist of nodes greater than addr, and a root (possibly NULL) that contains addr, with left and right children less than and greater than addr. I'm trying to follow convention with the naming. I don't object to dropping 'entry' from the names of the new functions, if that's what people want. dougm: The split function divides the tree into an llist of nodes less than addr, an rlist of nodes… | |||||
/* Rotate right and make y root. */ \ | |||||
root->left = y->right; \ | |||||
y->right = root; \ | |||||
vm_map_entry_set_max_free(root); \ | |||||
root = y; \ | |||||
y = root->left; \ | |||||
} \ | |||||
/* Put root on rlist. */ \ | |||||
root->left = rlist; \ | |||||
rlist = root; \ | |||||
root = y; \ | |||||
} while (0) | |||||
#define SPLAY_RIGHT_STEP(root, y, llist, test) do { \ | |||||
y = root->right; \ | |||||
if (y != NULL && (test)) { \ | |||||
/* Rotate left and make y root. */ \ | |||||
root->right = y->left; \ | |||||
y->left = root; \ | |||||
vm_map_entry_set_max_free(root); \ | |||||
root = y; \ | |||||
y = root->right; \ | |||||
} \ | |||||
/* Put root on llist. */ \ | |||||
root->right = llist; \ | |||||
llist = root; \ | |||||
root = y; \ | |||||
} while (0) | |||||
/* | /* | ||||
* vm_map_entry_splay: | * Walk down the tree until we find addr or a NULL pointer where addr would go, | ||||
* | * breaking off left and right subtrees of nodes less than, or greater than | ||||
* The Sleator and Tarjan top-down splay algorithm with the | * addr. Treat pointers to nodes with max_free < length as NULL pointers. | ||||
* following variation. Max_free must be computed bottom-up, so | * llist and rlist are the two sides in reverse order (bottom-up), with llist | ||||
* on the downward pass, maintain the left and right spines in | * linked by the right pointer and rlist linked by the left pointer in the | ||||
* reverse order. Then, make a second pass up each side to fix | * vm_map_entry. | ||||
* the pointers and compute max_free. The time bound is O(log n) | |||||
* amortized. | |||||
* | |||||
* The new root is the vm_map_entry containing "addr", or else an | |||||
* adjacent entry (lower or higher) if addr is not in the tree. | |||||
* | |||||
* The map must be locked, and leaves it so. | |||||
* | |||||
* Returns: the new root. | |||||
*/ | */ | ||||
static vm_map_entry_t | static vm_map_entry_t | ||||
vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) | vm_map_splay_split(vm_offset_t addr, vm_size_t length, | ||||
vm_map_entry_t root, vm_map_entry_t *out_llist, vm_map_entry_t *out_rlist) | |||||
{ | { | ||||
vm_map_entry_t llist, rlist; | vm_map_entry_t llist, rlist; | ||||
vm_map_entry_t ltree, rtree; | |||||
vm_map_entry_t y; | vm_map_entry_t y; | ||||
/* Special case of empty tree. */ | |||||
if (root == NULL) | |||||
return (root); | |||||
/* | |||||
* Pass One: Splay down the tree until we find addr or a NULL | |||||
* pointer where addr would go. llist and rlist are the two | |||||
* sides in reverse order (bottom-up), with llist linked by | |||||
* the right pointer and rlist linked by the left pointer in | |||||
* the vm_map_entry. Wait until Pass Two to set max_free on | |||||
* the two spines. | |||||
*/ | |||||
llist = NULL; | llist = NULL; | ||||
rlist = NULL; | rlist = NULL; | ||||
for (;;) { | while (root != NULL && root->max_free >= length) { | ||||
/* root is never NULL in here. */ | |||||
if (addr < root->start) { | if (addr < root->start) { | ||||
y = root->left; | SPLAY_LEFT_STEP(root, y, rlist, | ||||
if (y == NULL) | y->max_free >= length && addr < y->start); | ||||
break; | |||||
if (addr < y->start && y->left != NULL) { | |||||
/* Rotate right and put y on rlist. */ | |||||
root->left = y->right; | |||||
y->right = root; | |||||
vm_map_entry_set_max_free(root); | |||||
root = y->left; | |||||
y->left = rlist; | |||||
rlist = y; | |||||
} else { | |||||
/* Put root on rlist. */ | |||||
root->left = rlist; | |||||
rlist = root; | |||||
root = y; | |||||
} | |||||
} else if (addr >= root->end) { | } else if (addr >= root->end) { | ||||
y = root->right; | SPLAY_RIGHT_STEP(root, y, llist, | ||||
if (y == NULL) | y->max_free >= length && addr >= y->end); | ||||
break; | |||||
if (addr >= y->end && y->right != NULL) { | |||||
/* Rotate left and put y on llist. */ | |||||
root->right = y->left; | |||||
y->left = root; | |||||
vm_map_entry_set_max_free(root); | |||||
root = y->right; | |||||
y->right = llist; | |||||
llist = y; | |||||
} else { | |||||
/* Put root on llist. */ | |||||
root->right = llist; | |||||
llist = root; | |||||
root = y; | |||||
} | |||||
} else | } else | ||||
break; | break; | ||||
} | } | ||||
*out_llist = llist; | |||||
*out_rlist = rlist; | |||||
return (root); | |||||
} | |||||
static void | |||||
vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *iolist) | |||||
{ | |||||
vm_map_entry_t rlist, y; | |||||
root = root->right; | |||||
rlist = *iolist; | |||||
while (root != NULL) | |||||
SPLAY_LEFT_STEP(root, y, rlist, true); | |||||
*iolist = rlist; | |||||
} | |||||
static void | |||||
vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *iolist) | |||||
{ | |||||
vm_map_entry_t llist, y; | |||||
root = root->left; | |||||
llist = *iolist; | |||||
while (root != NULL) | |||||
SPLAY_RIGHT_STEP(root, y, llist, true); | |||||
*iolist = llist; | |||||
} | |||||
/* | |||||
* Walk back up the two spines, flip the pointers and set max_free. The | |||||
* subtrees of the root go at the bottom of llist and rlist. | |||||
*/ | |||||
static vm_map_entry_t | |||||
vm_map_splay_merge(vm_map_entry_t root, | |||||
vm_map_entry_t llist, vm_map_entry_t rlist, | |||||
vm_map_entry_t ltree, vm_map_entry_t rtree) | |||||
{ | |||||
vm_map_entry_t y; | |||||
/* | |||||
* Pass Two: Walk back up the two spines, flip the pointers | |||||
* and set max_free. The subtrees of the root go at the | |||||
* bottom of llist and rlist. | |||||
*/ | |||||
ltree = root->left; | |||||
while (llist != NULL) { | while (llist != NULL) { | ||||
y = llist->right; | y = llist->right; | ||||
llist->right = ltree; | llist->right = ltree; | ||||
vm_map_entry_set_max_free(llist); | vm_map_entry_set_max_free(llist); | ||||
ltree = llist; | ltree = llist; | ||||
llist = y; | llist = y; | ||||
} | } | ||||
rtree = root->right; | |||||
while (rlist != NULL) { | while (rlist != NULL) { | ||||
y = rlist->left; | y = rlist->left; | ||||
rlist->left = rtree; | rlist->left = rtree; | ||||
vm_map_entry_set_max_free(rlist); | vm_map_entry_set_max_free(rlist); | ||||
rtree = rlist; | rtree = rlist; | ||||
rlist = y; | rlist = y; | ||||
} | } | ||||
/* | /* | ||||
* Final assembly: add ltree and rtree as subtrees of root. | * Final assembly: add ltree and rtree as subtrees of root. | ||||
*/ | */ | ||||
root->left = ltree; | root->left = ltree; | ||||
root->right = rtree; | root->right = rtree; | ||||
vm_map_entry_set_max_free(root); | vm_map_entry_set_max_free(root); | ||||
return (root); | return (root); | ||||
} | } | ||||
/* | |||||
* vm_map_entry_splay: | |||||
* | |||||
* The Sleator and Tarjan top-down splay algorithm with the | |||||
* following variation. Max_free must be computed bottom-up, so | |||||
* on the downward pass, maintain the left and right spines in | |||||
* reverse order. Then, make a second pass up each side to fix | |||||
* the pointers and compute max_free. The time bound is O(log n) | |||||
* amortized. | |||||
* | |||||
* The new root is the vm_map_entry containing "addr", or else an | |||||
* adjacent entry (lower if possible) if addr is not in the tree. | |||||
* | |||||
* The map must be locked, and leaves it so. | |||||
* | |||||
* Returns: the new root. | |||||
*/ | |||||
static vm_map_entry_t | |||||
vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) | |||||
{ | |||||
vm_map_entry_t llist, rlist; | |||||
root = vm_map_splay_split(addr, 0, root, &llist, &rlist); | |||||
if (root != NULL) { | |||||
/* do nothing */ | |||||
} else if (llist != NULL) { | |||||
/* | |||||
* Recover the greatest node in the left | |||||
* subtree and make it the root. | |||||
*/ | |||||
root = llist; | |||||
llist = root->right; | |||||
root->right = NULL; | |||||
} else if (rlist != NULL) { | |||||
/* | |||||
* Recover the least node in the right | |||||
* subtree and make it the root. | |||||
*/ | |||||
root = rlist; | |||||
rlist = root->left; | |||||
root->left = NULL; | |||||
} else { | |||||
/* There is no root. */ | |||||
return (NULL); | |||||
} | |||||
return (vm_map_splay_merge(root, llist, rlist, | |||||
root->left, root->right)); | |||||
} | |||||
/* | /* | ||||
* vm_map_entry_{un,}link: | * vm_map_entry_{un,}link: | ||||
* | * | ||||
* Insert/remove entries from maps. | * Insert/remove entries from maps. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_map_entry_link(vm_map_t map, | vm_map_entry_link(vm_map_t map, | ||||
vm_map_entry_t after_where, | |||||
vm_map_entry_t entry) | vm_map_entry_t entry) | ||||
{ | { | ||||
vm_map_entry_t llist, rlist, root; | |||||
CTR4(KTR_VM, | CTR3(KTR_VM, | ||||
"vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, | "vm_map_entry_link: map %p, nentries %d, entry %p", map, | ||||
map->nentries, entry, after_where); | map->nentries, entry); | ||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
KASSERT(after_where->end <= entry->start, | |||||
("vm_map_entry_link: prev end %jx new start %jx overlap", | |||||
(uintmax_t)after_where->end, (uintmax_t)entry->start)); | |||||
KASSERT(entry->end <= after_where->next->start, | |||||
("vm_map_entry_link: new end %jx next start %jx overlap", | |||||
(uintmax_t)entry->end, (uintmax_t)after_where->next->start)); | |||||
map->nentries++; | map->nentries++; | ||||
entry->prev = after_where; | root = map->root; | ||||
entry->next = after_where->next; | root = vm_map_splay_split(entry->start, 0, root, &llist, &rlist); | ||||
entry->next->prev = entry; | KASSERT(root == NULL, | ||||
after_where->next = entry; | ("vm_map_entry_link: link object already mapped")); | ||||
entry->prev = (llist == NULL) ? &map->header : llist; | |||||
if (after_where != &map->header) { | entry->next = (rlist == NULL) ? &map->header : rlist; | ||||
if (after_where != map->root) | entry->prev->next = entry->next->prev = entry; | ||||
vm_map_entry_splay(after_where->start, map->root); | root = vm_map_splay_merge(entry, llist, rlist, NULL, NULL); | ||||
entry->right = after_where->right; | |||||
entry->left = after_where; | |||||
after_where->right = NULL; | |||||
after_where->adj_free = entry->start - after_where->end; | |||||
vm_map_entry_set_max_free(after_where); | |||||
} else { | |||||
entry->right = map->root; | |||||
entry->left = NULL; | |||||
} | |||||
entry->adj_free = entry->next->start - entry->end; | |||||
vm_map_entry_set_max_free(entry); | |||||
map->root = entry; | map->root = entry; | ||||
VM_MAP_ASSERT_CONSISTENT(map); | |||||
} | } | ||||
enum unlink_merge_type { | |||||
UNLINK_MERGE_PREV, | |||||
UNLINK_MERGE_NONE, | |||||
UNLINK_MERGE_NEXT | |||||
}; | |||||
Done Inline ActionsWhy do you need two switches ? I.e, why cannot the code from the second switch cases go straight into the cases of the first switch ? I understand that you do separate llist/rlists and root calculations, but I do not quite get the separation. kib: Why do you need two switches ? I.e, why cannot the code from the second switch cases go… | |||||
Done Inline ActionsI can get by with only one switch, but then a have to duplicate each of the three-line code snippets that start with resetting the new root value. Note that the NONE case in the first switch is likely to alter the value of 'op', so that some other case is taken in the second switch. dougm: I can get by with only one switch, but then a have to duplicate each of the three-line code… | |||||
static void | static void | ||||
vm_map_entry_unlink(vm_map_t map, | vm_map_entry_unlink(vm_map_t map, | ||||
vm_map_entry_t entry) | vm_map_entry_t entry, | ||||
enum unlink_merge_type op) | |||||
{ | { | ||||
vm_map_entry_t next, prev, root; | vm_map_entry_t llist, rlist, root, y; | ||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
if (entry != map->root) | llist = entry->prev; | ||||
vm_map_entry_splay(entry->start, map->root); | rlist = entry->next; | ||||
if (entry->left == NULL) | llist->next = rlist; | ||||
root = entry->right; | rlist->prev = llist; | ||||
else { | root = map->root; | ||||
root = vm_map_entry_splay(entry->start, entry->left); | root = vm_map_splay_split(entry->start, 0, root, &llist, &rlist); | ||||
root->right = entry->right; | KASSERT(root != NULL, | ||||
root->adj_free = entry->next->start - root->end; | ("vm_map_entry_unlink: unlink object not mapped")); | ||||
vm_map_entry_set_max_free(root); | |||||
switch (op) { | |||||
case UNLINK_MERGE_PREV: | |||||
vm_map_splay_findprev(root, &llist); | |||||
llist->end = root->end; | |||||
y = root->right; | |||||
root = llist; | |||||
llist = root->right; | |||||
root->right = y; | |||||
break; | |||||
case UNLINK_MERGE_NEXT: | |||||
vm_map_splay_findnext(root, &rlist); | |||||
rlist->start = root->start; | |||||
rlist->offset = root->offset; | |||||
y = root->left; | |||||
root = rlist; | |||||
rlist = root->left; | |||||
root->left = y; | |||||
break; | |||||
case UNLINK_MERGE_NONE: | |||||
vm_map_splay_findprev(root, &llist); | |||||
vm_map_splay_findnext(root, &rlist); | |||||
if (llist != NULL) { | |||||
root = llist; | |||||
llist = root->right; | |||||
root->right = NULL; | |||||
} else if (rlist != NULL) { | |||||
root = rlist; | |||||
rlist = root->left; | |||||
root->left = NULL; | |||||
} else | |||||
root = NULL; | |||||
break; | |||||
} | } | ||||
if (root != NULL) | |||||
root = vm_map_splay_merge(root, llist, rlist, | |||||
root->left, root->right); | |||||
map->root = root; | map->root = root; | ||||
VM_MAP_ASSERT_CONSISTENT(map); | |||||
prev = entry->prev; | |||||
next = entry->next; | |||||
next->prev = prev; | |||||
prev->next = next; | |||||
map->nentries--; | map->nentries--; | ||||
CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, | CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, | ||||
map->nentries, entry); | map->nentries, entry); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_entry_resize_free: | * vm_map_entry_resize_free: | ||||
* | * | ||||
* Recompute the amount of free space following a vm_map_entry | * Recompute the amount of free space following a modified vm_map_entry | ||||
* and propagate that value up the tree. Call this function after | * and propagate those values up the tree. Call this function after | ||||
* resizing a map entry in-place, that is, without a call to | * resizing a map entry in-place by changing the end value, without a | ||||
* vm_map_entry_link() or _unlink(). | * call to vm_map_entry_link() or _unlink(). | ||||
* | * | ||||
* The map must be locked, and leaves it so. | * The map must be locked, and leaves it so. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) | vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) | ||||
{ | { | ||||
vm_map_entry_t llist, rlist, root; | |||||
/* | VM_MAP_ASSERT_LOCKED(map); | ||||
* Using splay trees without parent pointers, propagating | root = map->root; | ||||
* max_free up the tree is done by moving the entry to the | root = vm_map_splay_split(entry->start, 0, root, &llist, &rlist); | ||||
* root and making the change there. | KASSERT(root != NULL, | ||||
*/ | ("vm_map_entry_resize_free: resize_free object not mapped")); | ||||
if (entry != map->root) | vm_map_splay_findnext(root, &rlist); | ||||
map->root = vm_map_entry_splay(entry->start, map->root); | root->right = NULL; | ||||
map->root = vm_map_splay_merge(root, llist, rlist, | |||||
entry->adj_free = entry->next->start - entry->end; | root->left, root->right); | ||||
vm_map_entry_set_max_free(entry); | VM_MAP_ASSERT_CONSISTENT(map); | ||||
CTR3(KTR_VM, "vm_map_entry_resize_free: map %p, nentries %d, entry %p", map, | |||||
map->nentries, entry); | |||||
} | } | ||||
/* | /* | ||||
* vm_map_lookup_entry: [ internal use only ] | * vm_map_lookup_entry: [ internal use only ] | ||||
* | * | ||||
* Finds the map entry containing (or | * Finds the map entry containing (or | ||||
* immediately preceding) the specified address | * immediately preceding) the specified address | ||||
* in the given map; the entry is returned | * in the given map; the entry is returned | ||||
* in the "entry" parameter. The boolean | * in the "entry" parameter. The boolean | ||||
* result indicates whether the address is | * result indicates whether the address is | ||||
* actually contained in the map. | * actually contained in the map. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
vm_map_lookup_entry( | vm_map_lookup_entry( | ||||
vm_map_t map, | vm_map_t map, | ||||
vm_offset_t address, | vm_offset_t address, | ||||
vm_map_entry_t *entry) /* OUT */ | vm_map_entry_t *entry) /* OUT */ | ||||
{ | { | ||||
vm_map_entry_t cur; | vm_map_entry_t cur, lbound; | ||||
boolean_t locked; | boolean_t locked; | ||||
/* | /* | ||||
* If the map is empty, then the map entry immediately preceding | * If the map is empty, then the map entry immediately preceding | ||||
* "address" is the map's header. | * "address" is the map's header. | ||||
*/ | */ | ||||
cur = map->root; | cur = map->root; | ||||
if (cur == NULL) | if (cur == NULL) { | ||||
*entry = &map->header; | *entry = &map->header; | ||||
else if (address >= cur->start && cur->end > address) { | return (FALSE); | ||||
} | |||||
if (address >= cur->start && cur->end > address) { | |||||
*entry = cur; | *entry = cur; | ||||
return (TRUE); | return (TRUE); | ||||
} else if ((locked = vm_map_locked(map)) || | } | ||||
if ((locked = vm_map_locked(map)) || | |||||
sx_try_upgrade(&map->lock)) { | sx_try_upgrade(&map->lock)) { | ||||
/* | /* | ||||
* Splay requires a write lock on the map. However, it only | * Splay requires a write lock on the map. However, it only | ||||
* restructures the binary search tree; it does not otherwise | * restructures the binary search tree; it does not otherwise | ||||
* change the map. Thus, the map's timestamp need not change | * change the map. Thus, the map's timestamp need not change | ||||
* on a temporary upgrade. | * on a temporary upgrade. | ||||
*/ | */ | ||||
map->root = cur = vm_map_entry_splay(address, cur); | map->root = cur = vm_map_entry_splay(address, cur); | ||||
VM_MAP_ASSERT_CONSISTENT(map); | |||||
if (!locked) | if (!locked) | ||||
sx_downgrade(&map->lock); | sx_downgrade(&map->lock); | ||||
/* | /* | ||||
* If "address" is contained within a map entry, the new root | * If "address" is contained within a map entry, the new root | ||||
* is that map entry. Otherwise, the new root is a map entry | * is that map entry. Otherwise, the new root is a map entry | ||||
* immediately before or after "address". | * immediately before or after "address". | ||||
*/ | */ | ||||
if (address >= cur->start) { | if (address < cur->start) { | ||||
*entry = &map->header; | |||||
return (FALSE); | |||||
} | |||||
*entry = cur; | |||||
return (address < cur->end); | |||||
} | |||||
/* | |||||
* Since the map is only locked for read access, perform a | |||||
* standard binary search tree lookup for "address". | |||||
*/ | |||||
lbound = &map->header; | |||||
do { | |||||
if (address < cur->start) { | |||||
cur = cur->left; | |||||
} else if (cur->end <= address) { | |||||
lbound = cur; | |||||
cur = cur->right; | |||||
} else { | |||||
*entry = cur; | *entry = cur; | ||||
if (cur->end > address) | return (TRUE); | ||||
return (TRUE); | |||||
} else | |||||
*entry = cur->prev; | |||||
} else | |||||
/* | |||||
* Since the map is only locked for read access, perform a | |||||
* standard binary search tree lookup for "address". | |||||
*/ | |||||
for (;;) { | |||||
if (address < cur->start) { | |||||
if (cur->left == NULL) { | |||||
*entry = cur->prev; | |||||
break; | |||||
} | |||||
cur = cur->left; | |||||
} else if (cur->end > address) { | |||||
*entry = cur; | |||||
return (TRUE); | |||||
} else { | |||||
if (cur->right == NULL) { | |||||
*entry = cur; | |||||
break; | |||||
} | |||||
cur = cur->right; | |||||
} | |||||
} | } | ||||
} while (cur != NULL); | |||||
*entry = lbound; | |||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
/* | /* | ||||
* vm_map_insert: | * vm_map_insert: | ||||
* | * | ||||
* Inserts the given whole VM object into the target | * Inserts the given whole VM object into the target | ||||
* map at the specified address range. The object's | * map at the specified address range. The object's | ||||
Show All 14 Lines | |||||
KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), | KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), | ||||
("overcommit: vm_map_insert leaks vm_map %p", new_entry)); | ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); | ||||
new_entry->cred = cred; | new_entry->cred = cred; | ||||
/* | /* | ||||
* Insert the new entry into the list | * Insert the new entry into the list | ||||
*/ | */ | ||||
vm_map_entry_link(map, prev_entry, new_entry); | vm_map_entry_link(map, new_entry); | ||||
Done Inline ActionsPerhaps change the return type of the function to bool ? It got almost complete rewrite anyway. kib: Perhaps change the return type of the function to bool ? It got almost complete rewrite anyway. | |||||
Done Inline ActionsIf we're talking about changing the interface, how would you feel about changing the return type to vm_offset_t, eliminating the addr parameter, returning the addr, and returning the vm_map_max value for the failure case? dougm: If we're talking about changing the interface, how would you feel about changing the return… | |||||
if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) | if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) | ||||
map->size += new_entry->end - new_entry->start; | map->size += new_entry->end - new_entry->start; | ||||
/* | /* | ||||
* Try to coalesce the new entry with both the previous and next | * Try to coalesce the new entry with both the previous and next | ||||
* entries in the list. Previously, we only attempted to coalesce | * entries in the list. Previously, we only attempted to coalesce | ||||
* with the previous entry when object is NULL. Here, we handle the | * with the previous entry when object is NULL. Here, we handle the | ||||
* other cases, which are less common. | * other cases, which are less common. | ||||
Show All 9 Lines | |||||
} | } | ||||
/* | /* | ||||
* vm_map_findspace: | * vm_map_findspace: | ||||
* | * | ||||
* Find the first fit (lowest VM address) for "length" free bytes | * Find the first fit (lowest VM address) for "length" free bytes | ||||
* beginning at address >= start in the given map. | * beginning at address >= start in the given map. | ||||
* | * | ||||
* In a vm_map_entry, "adj_free" is the amount of free space | * In a vm_map_entry, "max_free" is the maximum amount of | ||||
* adjacent (higher address) to this entry, and "max_free" is the | * contiguous free space between an entry in its subtree and a | ||||
* maximum amount of contiguous free space in its subtree. This | * neighbor of that entry. This allows finding a free region in | ||||
* allows finding a free region in one path down the tree, so | * one path down the tree, so O(log n) amortized with splay | ||||
* O(log n) amortized with splay trees. | * trees. | ||||
* | * | ||||
* The map must be locked, and leaves it so. | * The map must be locked, and leaves it so. | ||||
* | * | ||||
* Returns: 0 on success, and starting address in *addr, | * Returns: starting address if sufficient space, | ||||
* 1 if insufficient space. | * vm_map_max(map)-length+1 if insufficient space. | ||||
*/ | */ | ||||
int | vm_offset_t | ||||
vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, | vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length) | ||||
vm_offset_t *addr) /* OUT */ | |||||
{ | { | ||||
vm_map_entry_t entry; | vm_map_entry_t llist, rlist, root, y; | ||||
vm_offset_t st; | vm_size_t left_length; | ||||
/* | /* | ||||
* Request must fit within min/max VM address and must avoid | * Request must fit within min/max VM address and must avoid | ||||
* address wrap. | * address wrap. | ||||
*/ | */ | ||||
start = MAX(start, vm_map_min(map)); | start = MAX(start, vm_map_min(map)); | ||||
if (start + length > vm_map_max(map) || start + length < start) | if (start + length > vm_map_max(map) || start + length < start) | ||||
return (1); | return (vm_map_max(map) - length + 1); | ||||
/* Empty tree means wide open address space. */ | /* Empty tree means wide open address space. */ | ||||
if (map->root == NULL) { | if (map->root == NULL) | ||||
*addr = start; | return (start); | ||||
return (0); | |||||
} | |||||
/* | /* | ||||
* After splay, if start comes before root node, then there | * After splay, if start comes before root node, then there | ||||
* must be a gap from start to the root. | * must be a gap from start to the root. | ||||
*/ | */ | ||||
map->root = vm_map_entry_splay(start, map->root); | root = vm_map_splay_split(start, length, map->root, | ||||
if (start + length <= map->root->start) { | &llist, &rlist); | ||||
*addr = start; | if (root != NULL) | ||||
return (0); | start = root->end; | ||||
else if (rlist != NULL) { | |||||
root = rlist; | |||||
rlist = root->left; | |||||
root->left = NULL; | |||||
} else { | |||||
root = llist; | |||||
llist = root->right; | |||||
root->right = NULL; | |||||
} | } | ||||
map->root = vm_map_splay_merge(root, llist, rlist, | |||||
root->left, root->right); | |||||
VM_MAP_ASSERT_CONSISTENT(map); | |||||
if (start + length <= root->start) | |||||
return (start); | |||||
/* | /* | ||||
* Root is the last node that might begin its gap before | * Root is the last node that might begin its gap before | ||||
* start, and this is the last comparison where address | * start, and this is the last comparison where address | ||||
* wrap might be a problem. | * wrap might be a problem. | ||||
*/ | */ | ||||
st = (start > map->root->end) ? start : map->root->end; | if (root->right == NULL && | ||||
if (length <= map->root->end + map->root->adj_free - st) { | start + length <= vm_map_max(map)) | ||||
*addr = st; | return (start); | ||||
return (0); | |||||
} | |||||
/* With max_free, can immediately tell if no solution. */ | /* With max_free, can immediately tell if no solution. */ | ||||
entry = map->root->right; | if (root->right == NULL || length > root->right->max_free) | ||||
if (entry == NULL || length > entry->max_free) | return (vm_map_max(map) - length + 1); | ||||
return (1); | |||||
/* | /* | ||||
* Search the right subtree in the order: left subtree, root, | * Splay for the least large-enough gap in the right subtree. | ||||
* right subtree (first fit). The previous splay implies that | |||||
* all regions in the right subtree have addresses > start. | |||||
*/ | */ | ||||
while (entry != NULL) { | llist = NULL; | ||||
if (entry->left != NULL && entry->left->max_free >= length) | rlist = NULL; | ||||
entry = entry->left; | for (left_length = 0; ; | ||||
else if (entry->adj_free >= length) { | left_length = root->left != NULL ? | ||||
*addr = entry->end; | root->left->max_free : root->start - llist->end) { | ||||
return (0); | if (length <= left_length) | ||||
} else | SPLAY_LEFT_STEP(root, y, rlist, | ||||
entry = entry->right; | length <= (y->left != NULL ? | ||||
y->left->max_free : y->start - llist->end)); | |||||
else | |||||
SPLAY_RIGHT_STEP(root, y, llist, | |||||
length > (y->left != NULL ? | |||||
y->left->max_free : y->start - root->end)); | |||||
if (root == NULL) | |||||
break; | |||||
} | } | ||||
root = llist; | |||||
/* Can't get here, so panic if we do. */ | llist = root->right; | ||||
panic("vm_map_findspace: max_free corrupt"); | if ((y = rlist) == NULL) | ||||
root->right = NULL; | |||||
else { | |||||
rlist = y->left; | |||||
y->left = NULL; | |||||
root->right = y->right; | |||||
} | |||||
root = vm_map_splay_merge(root, llist, rlist, | |||||
root->left, root->right); | |||||
if (y != NULL) { | |||||
y->right = root->right; | |||||
vm_map_entry_set_max_free(y); | |||||
root->right = y; | |||||
vm_map_entry_set_max_free(root); | |||||
} | |||||
map->root = root; | |||||
VM_MAP_ASSERT_CONSISTENT(map); | |||||
return (root->end); | |||||
} | } | ||||
int | int | ||||
vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, | vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, | ||||
vm_offset_t start, vm_size_t length, vm_prot_t prot, | vm_offset_t start, vm_size_t length, vm_prot_t prot, | ||||
vm_prot_t max, int cow) | vm_prot_t max, int cow) | ||||
{ | { | ||||
vm_offset_t end; | vm_offset_t end; | ||||
Show All 14 Lines | |||||
vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, | vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, | ||||
vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, | vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, | ||||
vm_offset_t alignment) | vm_offset_t alignment) | ||||
{ | { | ||||
vm_offset_t aligned_addr, free_addr; | vm_offset_t aligned_addr, free_addr; | ||||
VM_MAP_ASSERT_LOCKED(map); | VM_MAP_ASSERT_LOCKED(map); | ||||
free_addr = *addr; | free_addr = *addr; | ||||
KASSERT(!vm_map_findspace(map, free_addr, length, addr) && | KASSERT(free_addr == vm_map_findspace(map, free_addr, length), | ||||
free_addr == *addr, ("caller provided insufficient free space")); | ("caller failed to provide space %d at address %p", | ||||
(int)length, (void*)free_addr)); | |||||
for (;;) { | for (;;) { | ||||
/* | /* | ||||
* At the start of every iteration, the free space at address | * At the start of every iteration, the free space at address | ||||
* "*addr" is at least "length" bytes. | * "*addr" is at least "length" bytes. | ||||
*/ | */ | ||||
if (alignment == 0) | if (alignment == 0) | ||||
pmap_align_superpage(object, offset, addr, length); | pmap_align_superpage(object, offset, addr, length); | ||||
else if ((*addr & (alignment - 1)) != 0) { | else if ((*addr & (alignment - 1)) != 0) { | ||||
Show All 9 Lines | |||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
/* | /* | ||||
* Test for address wrap on "*addr". A wrapped "*addr" could | * Test for address wrap on "*addr". A wrapped "*addr" could | ||||
* be a valid address, in which case vm_map_findspace() cannot | * be a valid address, in which case vm_map_findspace() cannot | ||||
* be relied upon to fail. | * be relied upon to fail. | ||||
*/ | */ | ||||
if (aligned_addr < free_addr || | if (aligned_addr < free_addr) | ||||
vm_map_findspace(map, aligned_addr, length, addr) || | return (KERN_NO_SPACE); | ||||
*addr = vm_map_findspace(map, aligned_addr, length); | |||||
if (*addr + length > vm_map_max(map) || | |||||
(max_addr != 0 && *addr + length > max_addr)) | (max_addr != 0 && *addr + length > max_addr)) | ||||
return (KERN_NO_SPACE); | return (KERN_NO_SPACE); | ||||
free_addr = *addr; | free_addr = *addr; | ||||
if (free_addr == aligned_addr) { | if (free_addr == aligned_addr) { | ||||
/* | /* | ||||
* If a successful call to vm_map_findspace() did not | * If a successful call to vm_map_findspace() did not | ||||
* change "*addr", then "*addr" must still be aligned | * change "*addr", then "*addr" must still be aligned | ||||
* and provide sufficient free space. | * and provide sufficient free space. | ||||
Show All 14 Lines | |||||
* gap needed for later randomization. | * gap needed for later randomization. | ||||
*/ | */ | ||||
pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 && | pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 && | ||||
(find_space == VMFS_SUPER_SPACE || find_space == | (find_space == VMFS_SUPER_SPACE || find_space == | ||||
VMFS_OPTIMAL_SPACE) ? 1 : 0; | VMFS_OPTIMAL_SPACE) ? 1 : 0; | ||||
gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR && | gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR && | ||||
(max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ? | (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ? | ||||
aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx]; | aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx]; | ||||
if (vm_map_findspace(map, curr_min_addr, length + | *addr = vm_map_findspace(map, curr_min_addr, | ||||
gap * pagesizes[pidx], addr)) | length + gap * pagesizes[pidx]); | ||||
if (*addr + length + gap * pagesizes[pidx] > | |||||
+ vm_map_max(map)) | |||||
goto again; | goto again; | ||||
/* And randomize the start address. */ | /* And randomize the start address. */ | ||||
*addr += (arc4random() % gap) * pagesizes[pidx]; | *addr += (arc4random() % gap) * pagesizes[pidx]; | ||||
if (max_addr != 0 && *addr + length > max_addr) | if (max_addr != 0 && *addr + length > max_addr) | ||||
goto again; | goto again; | ||||
} else if (vm_map_findspace(map, curr_min_addr, length, addr) || | } else { | ||||
(max_addr != 0 && *addr + length > max_addr)) { | *addr = vm_map_findspace(map, curr_min_addr, length); | ||||
if (cluster) { | if (*addr + length > vm_map_max(map) || | ||||
cluster = false; | (max_addr != 0 && *addr + length > max_addr)) { | ||||
MPASS(try == 1); | if (cluster) { | ||||
goto again; | cluster = false; | ||||
MPASS(try == 1); | |||||
goto again; | |||||
} | |||||
rv = KERN_NO_SPACE; | |||||
goto done; | |||||
} | } | ||||
rv = KERN_NO_SPACE; | |||||
goto done; | |||||
} | } | ||||
if (find_space != VMFS_ANY_SPACE && | if (find_space != VMFS_ANY_SPACE && | ||||
(rv = vm_map_alignspace(map, object, offset, addr, length, | (rv = vm_map_alignspace(map, object, offset, addr, length, | ||||
max_addr, alignment)) != KERN_SUCCESS) { | max_addr, alignment)) != KERN_SUCCESS) { | ||||
if (find_space == VMFS_OPTIMAL_SPACE) { | if (find_space == VMFS_OPTIMAL_SPACE) { | ||||
find_space = VMFS_ANY_SPACE; | find_space = VMFS_ANY_SPACE; | ||||
curr_min_addr = min_addr; | curr_min_addr = min_addr; | ||||
Show All 14 Lines | |||||
vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) | vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) | ||||
{ | { | ||||
vm_map_entry_t next, prev; | vm_map_entry_t next, prev; | ||||
if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) != 0) | if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) != 0) | ||||
return; | return; | ||||
prev = entry->prev; | prev = entry->prev; | ||||
if (vm_map_mergeable_neighbors(prev, entry)) { | if (vm_map_mergeable_neighbors(prev, entry)) { | ||||
vm_map_entry_unlink(map, prev); | vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT); | ||||
entry->start = prev->start; | |||||
entry->offset = prev->offset; | |||||
if (entry->prev != &map->header) | |||||
vm_map_entry_resize_free(map, entry->prev); | |||||
vm_map_merged_neighbor_dispose(map, prev); | vm_map_merged_neighbor_dispose(map, prev); | ||||
} | } | ||||
next = entry->next; | next = entry->next; | ||||
if (vm_map_mergeable_neighbors(entry, next)) { | if (vm_map_mergeable_neighbors(entry, next)) { | ||||
vm_map_entry_unlink(map, next); | vm_map_entry_unlink(map, next, UNLINK_MERGE_PREV); | ||||
entry->end = next->end; | |||||
vm_map_entry_resize_free(map, entry); | |||||
vm_map_merged_neighbor_dispose(map, next); | vm_map_merged_neighbor_dispose(map, next); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* vm_map_clip_start: [ internal use only ] | * vm_map_clip_start: [ internal use only ] | ||||
* | * | ||||
* Asserts that the given entry begins at or after | * Asserts that the given entry begins at or after | ||||
Show All 14 Lines | |||||
*new_entry = *entry; | *new_entry = *entry; | ||||
new_entry->end = start; | new_entry->end = start; | ||||
entry->offset += (start - entry->start); | entry->offset += (start - entry->start); | ||||
entry->start = start; | entry->start = start; | ||||
if (new_entry->cred != NULL) | if (new_entry->cred != NULL) | ||||
crhold(entry->cred); | crhold(entry->cred); | ||||
vm_map_entry_link(map, entry->prev, new_entry); | vm_map_entry_link(map, new_entry); | ||||
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { | if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { | ||||
vm_object_reference(new_entry->object.vm_object); | vm_object_reference(new_entry->object.vm_object); | ||||
/* | /* | ||||
* The object->un_pager.vnp.writemappings for the | * The object->un_pager.vnp.writemappings for the | ||||
* object of MAP_ENTRY_VN_WRITECNT type entry shall be | * object of MAP_ENTRY_VN_WRITECNT type entry shall be | ||||
* kept as is here. The virtual pages are | * kept as is here. The virtual pages are | ||||
* re-distributed among the clipped entries, so the sum is | * re-distributed among the clipped entries, so the sum is | ||||
Show All 14 Lines | |||||
new_entry = vm_map_entry_create(map); | new_entry = vm_map_entry_create(map); | ||||
*new_entry = *entry; | *new_entry = *entry; | ||||
new_entry->start = entry->end = end; | new_entry->start = entry->end = end; | ||||
new_entry->offset += (end - entry->start); | new_entry->offset += (end - entry->start); | ||||
if (new_entry->cred != NULL) | if (new_entry->cred != NULL) | ||||
crhold(entry->cred); | crhold(entry->cred); | ||||
vm_map_entry_link(map, entry, new_entry); | vm_map_entry_link(map, new_entry); | ||||
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { | if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { | ||||
vm_object_reference(new_entry->object.vm_object); | vm_object_reference(new_entry->object.vm_object); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* vm_map_submap: [ kernel use only ] | * vm_map_submap: [ kernel use only ] | ||||
Show All 14 Lines | |||||
*/ | */ | ||||
static void | static void | ||||
vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) | vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) | ||||
{ | { | ||||
vm_object_t object; | vm_object_t object; | ||||
vm_pindex_t offidxstart, offidxend, count, size1; | vm_pindex_t offidxstart, offidxend, count, size1; | ||||
vm_size_t size; | vm_size_t size; | ||||
vm_map_entry_unlink(map, entry); | vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); | ||||
object = entry->object.vm_object; | object = entry->object.vm_object; | ||||
if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { | if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { | ||||
MPASS(entry->cred == NULL); | MPASS(entry->cred == NULL); | ||||
MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); | MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); | ||||
MPASS(object == NULL); | MPASS(object == NULL); | ||||
vm_map_entry_deallocate(entry, map->system_map); | vm_map_entry_deallocate(entry, map->system_map); | ||||
return; | return; | ||||
Show All 14 Lines | |||||
vnode_pager_update_writecount(object, | vnode_pager_update_writecount(object, | ||||
new_entry->start, new_entry->end); | new_entry->start, new_entry->end); | ||||
} | } | ||||
/* | /* | ||||
* Insert the entry into the new map -- we know we're | * Insert the entry into the new map -- we know we're | ||||
* inserting at the end of the new map. | * inserting at the end of the new map. | ||||
*/ | */ | ||||
vm_map_entry_link(new_map, new_map->header.prev, | vm_map_entry_link(new_map, new_entry); | ||||
new_entry); | |||||
vmspace_map_entry_forked(vm1, vm2, new_entry); | vmspace_map_entry_forked(vm1, vm2, new_entry); | ||||
/* | /* | ||||
* Update the physical map | * Update the physical map | ||||
*/ | */ | ||||
pmap_copy(new_map->pmap, old_map->pmap, | pmap_copy(new_map->pmap, old_map->pmap, | ||||
new_entry->start, | new_entry->start, | ||||
(old_entry->end - old_entry->start), | (old_entry->end - old_entry->start), | ||||
Show All 10 Lines | |||||
* Copied entry is COW over the old object. | * Copied entry is COW over the old object. | ||||
*/ | */ | ||||
new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | | new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | | ||||
MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); | MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); | ||||
new_entry->wiring_thread = NULL; | new_entry->wiring_thread = NULL; | ||||
new_entry->wired_count = 0; | new_entry->wired_count = 0; | ||||
new_entry->object.vm_object = NULL; | new_entry->object.vm_object = NULL; | ||||
new_entry->cred = NULL; | new_entry->cred = NULL; | ||||
vm_map_entry_link(new_map, new_map->header.prev, | vm_map_entry_link(new_map, new_entry); | ||||
new_entry); | |||||
vmspace_map_entry_forked(vm1, vm2, new_entry); | vmspace_map_entry_forked(vm1, vm2, new_entry); | ||||
vm_map_copy_entry(old_map, new_map, old_entry, | vm_map_copy_entry(old_map, new_map, old_entry, | ||||
new_entry, fork_charge); | new_entry, fork_charge); | ||||
break; | break; | ||||
case VM_INHERIT_ZERO: | case VM_INHERIT_ZERO: | ||||
/* | /* | ||||
* Create a new anonymous mapping entry modelled from | * Create a new anonymous mapping entry modelled from | ||||
* the old one. | * the old one. | ||||
*/ | */ | ||||
new_entry = vm_map_entry_create(new_map); | new_entry = vm_map_entry_create(new_map); | ||||
memset(new_entry, 0, sizeof(*new_entry)); | memset(new_entry, 0, sizeof(*new_entry)); | ||||
new_entry->start = old_entry->start; | new_entry->start = old_entry->start; | ||||
new_entry->end = old_entry->end; | new_entry->end = old_entry->end; | ||||
new_entry->eflags = old_entry->eflags & | new_entry->eflags = old_entry->eflags & | ||||
~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | | ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | | ||||
MAP_ENTRY_VN_WRITECNT); | MAP_ENTRY_VN_WRITECNT); | ||||
new_entry->protection = old_entry->protection; | new_entry->protection = old_entry->protection; | ||||
new_entry->max_protection = old_entry->max_protection; | new_entry->max_protection = old_entry->max_protection; | ||||
new_entry->inheritance = VM_INHERIT_ZERO; | new_entry->inheritance = VM_INHERIT_ZERO; | ||||
vm_map_entry_link(new_map, new_map->header.prev, | vm_map_entry_link(new_map, new_entry); | ||||
new_entry); | |||||
vmspace_map_entry_forked(vm1, vm2, new_entry); | vmspace_map_entry_forked(vm1, vm2, new_entry); | ||||
new_entry->cred = curthread->td_ucred; | new_entry->cred = curthread->td_ucred; | ||||
crhold(new_entry->cred); | crhold(new_entry->cred); | ||||
*fork_charge += (new_entry->end - new_entry->start); | *fork_charge += (new_entry->end - new_entry->start); | ||||
break; | break; | ||||
} | } | ||||
old_entry = old_entry->next; | old_entry = old_entry->next; | ||||
} | } | ||||
/* | /* | ||||
* Use inlined vm_map_unlock() to postpone handling the deferred | * Use inlined vm_map_unlock() to postpone handling the deferred | ||||
* map entries, which cannot be done until both old_map and | * map entries, which cannot be done until both old_map and | ||||
* new_map locks are released. | * new_map locks are released. | ||||
*/ | */ | ||||
Context not available. |
This formats %ul break on ILP32. Use %jx and cast args to uintmax_t.