Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F143564996
D21964.id64833.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
39 KB
Referenced Files
None
Subscribers
None
D21964.id64833.diff
View Options
Index: sys/vm/vm_map.h
===================================================================
--- sys/vm/vm_map.h
+++ sys/vm/vm_map.h
@@ -99,10 +99,8 @@
* Also included is control information for virtual copy operations.
*/
struct vm_map_entry {
- struct vm_map_entry *prev; /* previous entry */
- struct vm_map_entry *next; /* next entry */
- struct vm_map_entry *left; /* left child in binary search tree */
- struct vm_map_entry *right; /* right child in binary search tree */
+ struct vm_map_entry *left; /* left child or previous entry */
+ struct vm_map_entry *right; /* right child or next entry */
vm_offset_t start; /* start address */
vm_offset_t end; /* end address */
vm_offset_t next_read; /* vaddr of the next sequential read */
@@ -424,14 +422,21 @@
vm_map_entry_first(vm_map_t map)
{
- return (map->header.next);
+ return (map->header.right);
}
static inline vm_map_entry_t
vm_map_entry_succ(vm_map_entry_t entry)
{
+ vm_map_entry_t after;
- return (entry->next);
+ after = entry->right;
+ if (after->left->start > entry->start) {
+ do
+ after = after->left;
+ while (after->left != entry);
+ }
+ return (after);
}
#define VM_MAP_ENTRY_FOREACH(it, map) \
Index: sys/vm/vm_map.c
===================================================================
--- sys/vm/vm_map.c
+++ sys/vm/vm_map.c
@@ -885,7 +885,6 @@
_vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
{
- map->header.next = map->header.prev = &map->header;
map->header.eflags = MAP_ENTRY_HEADER;
map->needs_wakeup = FALSE;
map->system_map = 0;
@@ -893,6 +892,7 @@
map->header.end = min;
map->header.start = max;
map->flags = 0;
+ map->header.left = map->header.right = &map->header;
map->root = NULL;
map->timestamp = 0;
map->busy = 0;
@@ -955,6 +955,12 @@
(behavior & MAP_ENTRY_BEHAV_MASK);
}
+static inline vm_size_t
+vm_size_max(vm_size_t a, vm_size_t b)
+{
+ return (a > b) ? a : b;
+}
+
/*
* vm_map_entry_max_free_{left,right}:
*
@@ -966,7 +972,7 @@
vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor)
{
- return (root->left != NULL ?
+ return (root->left != left_ancestor ?
root->left->max_free : root->start - left_ancestor->end);
}
@@ -974,7 +980,7 @@
vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor)
{
- return (root->right != NULL ?
+ return (root->right != right_ancestor ?
root->right->max_free : right_ancestor->start - root->end);
}
@@ -987,13 +993,20 @@
static inline vm_map_entry_t
vm_map_entry_pred(vm_map_entry_t entry)
{
+ vm_map_entry_t prior;
- return (entry->prev);
+ prior = entry->left;
+ if (prior->right->start < entry->start) {
+ do
+ prior = prior->right;
+ while (prior->right != entry);
+ }
+ return (prior);
}
/* vm_map_entry_succ is defined in vm_map.h. */
-#define SPLAY_LEFT_STEP(root, y, rlist, test) do { \
+#define SPLAY_LEFT_STEP(root, y, llist, rlist, found, test) do { \
vm_size_t max_free; \
\
/* \
@@ -1005,14 +1018,17 @@
max_free = root->max_free; \
KASSERT(max_free >= vm_map_entry_max_free_right(root, rlist), \
("%s: max_free invariant fails", __func__)); \
- if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \
+ if (y == llist ? max_free > 0 : max_free - 1 < y->max_free) \
max_free = vm_map_entry_max_free_right(root, rlist); \
- if (y != NULL && (test)) { \
+ if (y != llist && (test)) { \
/* Rotate right and make y root. */ \
- root->left = y->right; \
- y->right = root; \
+ if (y->right != root) { \
+ root->left = y->right; \
+ y->right = root; \
+ } \
if (max_free < y->max_free) \
- root->max_free = max_free = MAX(max_free, \
+ root->max_free = max_free = \
+ vm_size_max(max_free, \
vm_map_entry_max_free_left(root, y)); \
root = y; \
y = root->left; \
@@ -1024,9 +1040,10 @@
root->left = rlist; \
rlist = root; \
root = y; \
+ found = root != llist; \
} while (0)
-#define SPLAY_RIGHT_STEP(root, y, llist, test) do { \
+#define SPLAY_RIGHT_STEP(root, y, llist, rlist, found, test) do { \
vm_size_t max_free; \
\
/* \
@@ -1038,14 +1055,17 @@
max_free = root->max_free; \
KASSERT(max_free >= vm_map_entry_max_free_left(root, llist), \
("%s: max_free invariant fails", __func__)); \
- if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \
+ if (y == rlist ? max_free > 0 : max_free - 1 < y->max_free) \
max_free = vm_map_entry_max_free_left(root, llist); \
- if (y != NULL && (test)) { \
+ if (y != rlist && (test)) { \
/* Rotate left and make y root. */ \
- root->right = y->left; \
- y->left = root; \
+ if (y->left != root) { \
+ root->right = y->left; \
+ y->left = root; \
+ } \
if (max_free < y->max_free) \
- root->max_free = max_free = MAX(max_free, \
+ root->max_free = max_free = \
+ vm_size_max(max_free, \
vm_map_entry_max_free_right(root, y)); \
root = y; \
y = root->right; \
@@ -1057,67 +1077,65 @@
root->right = llist; \
llist = root; \
root = y; \
+ found = root != rlist; \
} while (0)
/*
- * Walk down the tree until we find addr or a NULL pointer where addr would go,
- * breaking off left and right subtrees of nodes less than, or greater than
- * addr. Treat pointers to nodes with max_free < length as NULL pointers.
- * llist and rlist are the two sides in reverse order (bottom-up), with llist
- * linked by the right pointer and rlist linked by the left pointer in the
- * vm_map_entry, and both lists terminated by &map->header. This function, and
- * the subsequent call to vm_map_splay_merge, rely on the start and end address
- * values in &map->header.
+ * Walk down the tree until we find addr or a gap where addr would go, breaking
+ * off left and right subtrees of nodes less than, or greater than addr. Treat
+ * subtrees with root->max_free < length as empty trees. llist and rlist are
+ * the two sides in reverse order (bottom-up), with llist linked by the right
+ * pointer and rlist linked by the left pointer in the vm_map_entry, and both
+ * lists terminated by &map->header. This function, and the subsequent call to
+ * vm_map_splay_merge_{left,right}, rely on the start and end address values in
+ * &map->header.
*/
-static vm_map_entry_t
-vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length,
- vm_map_entry_t *out_llist, vm_map_entry_t *out_rlist)
-{
- vm_map_entry_t llist, rlist, root, y;
+#define VM_MAP_SPLAY_SPLIT(map, addr, length, root, llist, rlist, found)\
+do { \
+ vm_map_entry_t y; \
+ \
+ llist = rlist = &map->header; \
+ root = map->root; \
+ found = root != NULL; \
+ while (found && root->max_free >= length) { \
+ KASSERT(llist->end <= root->start && \
+ root->end <= rlist->start, \
+ ("%s: root not within tree bounds", __func__)); \
+ if (addr < root->start) { \
+ SPLAY_LEFT_STEP(root, y, llist, rlist, found, \
+ y->max_free >= length && addr < y->start); \
+ } else if (addr >= root->end) { \
+ SPLAY_RIGHT_STEP(root, y, llist, rlist, found, \
+ y->max_free >= length && addr >= y->end); \
+ } else \
+ break; \
+ } \
+} while (0)
- llist = rlist = &map->header;
- root = map->root;
- while (root != NULL && root->max_free >= length) {
- KASSERT(llist->end <= root->start && root->end <= rlist->start,
- ("%s: root not within tree bounds", __func__));
- if (addr < root->start) {
- SPLAY_LEFT_STEP(root, y, rlist,
- y->max_free >= length && addr < y->start);
- } else if (addr >= root->end) {
- SPLAY_RIGHT_STEP(root, y, llist,
- y->max_free >= length && addr >= y->end);
- } else
- break;
- }
- *out_llist = llist;
- *out_rlist = rlist;
- return (root);
-}
+#define VM_MAP_SPLAY_FINDNEXT(root, rlist) do { \
+ vm_map_entry_t hi, y; \
+ bool found; \
+ \
+ hi = root->right; \
+ if (hi == rlist) \
+ break; \
+ do \
+ SPLAY_LEFT_STEP(hi, y, root, rlist, found, true); \
+ while (found); \
+} while (0)
-static void
-vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *iolist)
-{
- vm_map_entry_t rlist, y;
+#define VM_MAP_SPLAY_FINDPREV(root, llist) do { \
+ vm_map_entry_t lo, y; \
+ bool found; \
+ \
+ lo = root->left; \
+ if (lo == llist) \
+ break; \
+ do \
+ SPLAY_RIGHT_STEP(lo, y, llist, root, found, true); \
+ while (found); \
+} while (0)
- root = root->right;
- rlist = *iolist;
- while (root != NULL)
- SPLAY_LEFT_STEP(root, y, rlist, true);
- *iolist = rlist;
-}
-
-static void
-vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *iolist)
-{
- vm_map_entry_t llist, y;
-
- root = root->left;
- llist = *iolist;
- while (root != NULL)
- SPLAY_RIGHT_STEP(root, y, llist, true);
- *iolist = llist;
-}
-
static inline void
vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b)
{
@@ -1132,50 +1150,120 @@
* Walk back up the two spines, flip the pointers and set max_free. The
* subtrees of the root go at the bottom of llist and rlist.
*/
-static void
-vm_map_splay_merge(vm_map_t map, vm_map_entry_t root,
- vm_map_entry_t llist, vm_map_entry_t rlist)
+static vm_size_t
+vm_map_splay_merge_left_walk(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist)
{
- vm_map_entry_t prev;
- vm_size_t max_free_left, max_free_right;
+ do {
+ /*
+ * The max_free values of the children of llist are in
+ * llist->max_free and max_free. Update with the
+ * max value.
+ */
+ llist->max_free = max_free =
+ vm_size_max(llist->max_free, max_free);
+ vm_map_entry_swap(&llist->right, &tail);
+ vm_map_entry_swap(&tail, &llist);
+ } while (llist != &map->header);
+ root->left = tail;
+ return (max_free);
+}
- max_free_left = vm_map_entry_max_free_left(root, llist);
+/*
+ * When llist is known to be the predecessor of root.
+ */
+static inline vm_size_t
+vm_map_splay_merge_pred(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t llist)
+{
+ vm_size_t max_free;
+
+ max_free = root->start - llist->end;
+ if (llist != &map->header)
+ max_free = vm_map_splay_merge_left_walk(map, root,
+ root, max_free, llist);
+ else {
+ root->left = llist;
+ llist->right = root;
+ }
+ return (max_free);
+}
+
+/*
+ * When llist may or may not be the predecessor of root.
+ */
+static inline vm_size_t
+vm_map_splay_merge_left(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t llist)
+{
+ vm_size_t max_free;
+
+ max_free = vm_map_entry_max_free_left(root, llist);
if (llist != &map->header) {
- prev = root->left;
- do {
- /*
- * The max_free values of the children of llist are in
- * llist->max_free and max_free_left. Update with the
- * max value.
- */
- llist->max_free = max_free_left =
- MAX(llist->max_free, max_free_left);
- vm_map_entry_swap(&llist->right, &prev);
- vm_map_entry_swap(&prev, &llist);
- } while (llist != &map->header);
- root->left = prev;
+ max_free = vm_map_splay_merge_left_walk(map, root,
+ root->left == llist ? root : root->left,
+ max_free, llist);
+ } else if (root->left == llist)
+ llist->right = root;
+ return (max_free);
+}
+
+static vm_size_t
+vm_map_splay_merge_right_walk(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist)
+{
+ do {
+ /*
+ * The max_free values of the children of rlist are in
+ * rlist->max_free and max_free. Update with the
+ * max value.
+ */
+ rlist->max_free = max_free =
+ vm_size_max(rlist->max_free, max_free);
+ vm_map_entry_swap(&rlist->left, &tail);
+ vm_map_entry_swap(&tail, &rlist);
+ } while (rlist != &map->header);
+ root->right = tail;
+ return (max_free);
+}
+
+/*
+ * When rlist is known to be the succecessor of root.
+ */
+static inline vm_size_t
+vm_map_splay_merge_succ(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t rlist)
+{
+ vm_size_t max_free;
+
+ max_free = rlist->start - root->end;
+ if (rlist != &map->header) {
+ max_free = vm_map_splay_merge_right_walk(map, root,
+ root, max_free, rlist);
+ } else {
+ root->right = rlist;
+ rlist->left = root;
}
- max_free_right = vm_map_entry_max_free_right(root, rlist);
+ return (max_free);
+}
+
+/*
+ * When rlist may or may not be the succecessor of root.
+ */
+static inline vm_size_t
+vm_map_splay_merge_right(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t rlist)
+{
+ vm_size_t max_free;
+
+ max_free = vm_map_entry_max_free_right(root, rlist);
if (rlist != &map->header) {
- prev = root->right;
- do {
- /*
- * The max_free values of the children of rlist are in
- * rlist->max_free and max_free_right. Update with the
- * max value.
- */
- rlist->max_free = max_free_right =
- MAX(rlist->max_free, max_free_right);
- vm_map_entry_swap(&rlist->left, &prev);
- vm_map_entry_swap(&prev, &rlist);
- } while (rlist != &map->header);
- root->right = prev;
- }
- root->max_free = MAX(max_free_left, max_free_right);
- map->root = root;
-#ifdef DIAGNOSTIC
- ++map->nupdates;
-#endif
+ max_free = vm_map_splay_merge_right_walk(map, root,
+ root->right == rlist ? root : root->right,
+ max_free, rlist);
+ } else if (root->right == rlist)
+ rlist->left = root;
+ return (max_free);
}
/*
@@ -1188,6 +1276,14 @@
* the pointers and compute max_free. The time bound is O(log n)
* amortized.
*
+ * The tree is threaded, which means that there are no null pointers.
+ * When a node has no left child, its left pointer points to its
+ * predecessor, which the last ancestor on the search path from the root
+ * where the search branched right. Likewise, when a node has no right
+ * child, its right pointer points to its successor. The map header node
+ * is the predecessor of the first map entry, and the successor of the
+ * last.
+ *
* The new root is the vm_map_entry containing "addr", or else an
* adjacent entry (lower if possible) if addr is not in the tree.
*
@@ -1199,10 +1295,13 @@
vm_map_splay(vm_map_t map, vm_offset_t addr)
{
vm_map_entry_t llist, rlist, root;
+ vm_size_t max_free_left, max_free_right;
+ bool found;
- root = vm_map_splay_split(map, addr, 0, &llist, &rlist);
- if (root != NULL) {
- /* do nothing */
+ VM_MAP_SPLAY_SPLIT(map, addr, 0, root, llist, rlist, found);
+ if (found) {
+ max_free_left = vm_map_splay_merge_left(map, root, llist);
+ max_free_right = vm_map_splay_merge_right(map, root, rlist);
} else if (llist != &map->header) {
/*
* Recover the greatest node in the left
@@ -1210,7 +1309,8 @@
*/
root = llist;
llist = root->right;
- root->right = NULL;
+ max_free_left = vm_map_splay_merge_left(map, root, llist);
+ max_free_right = vm_map_splay_merge_succ(map, root, rlist);
} else if (rlist != &map->header) {
/*
* Recover the least node in the right
@@ -1218,12 +1318,14 @@
*/
root = rlist;
rlist = root->left;
- root->left = NULL;
+ max_free_left = vm_map_splay_merge_pred(map, root, llist);
+ max_free_right = vm_map_splay_merge_right(map, root, rlist);
} else {
/* There is no root. */
return (NULL);
}
- vm_map_splay_merge(map, root, llist, rlist);
+ root->max_free = vm_size_max(max_free_left, max_free_right);
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
return (root);
}
@@ -1237,20 +1339,21 @@
vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
{
vm_map_entry_t llist, rlist, root;
+ bool found;
CTR3(KTR_VM,
"vm_map_entry_link: map %p, nentries %d, entry %p", map,
map->nentries, entry);
VM_MAP_ASSERT_LOCKED(map);
map->nentries++;
- root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
- KASSERT(root == NULL,
+ VM_MAP_SPLAY_SPLIT(map, entry->start, 0, root, llist, rlist, found);
+ KASSERT(!found,
("vm_map_entry_link: link object already mapped"));
- entry->prev = llist;
- entry->next = rlist;
- llist->next = rlist->prev = entry;
- entry->left = entry->right = NULL;
- vm_map_splay_merge(map, entry, llist, rlist);
+ root = entry;
+ root->max_free = vm_size_max(
+ vm_map_splay_merge_pred(map, root, llist),
+ vm_map_splay_merge_succ(map, root, rlist));
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
}
@@ -1263,44 +1366,38 @@
vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
enum unlink_merge_type op)
{
- vm_map_entry_t llist, rlist, root, y;
+ vm_map_entry_t llist, rlist, root;
+ vm_size_t max_free_left, max_free_right;
+ bool found;
VM_MAP_ASSERT_LOCKED(map);
- root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
- KASSERT(root != NULL,
+ VM_MAP_SPLAY_SPLIT(map, entry->start, 0, root, llist, rlist, found);
+ KASSERT(found,
("vm_map_entry_unlink: unlink object not mapped"));
- vm_map_splay_findnext(root, &rlist);
- switch (op) {
- case UNLINK_MERGE_NEXT:
+ VM_MAP_SPLAY_FINDPREV(root, llist);
+ VM_MAP_SPLAY_FINDNEXT(root, rlist);
+ if (op == UNLINK_MERGE_NEXT) {
rlist->start = root->start;
rlist->offset = root->offset;
- y = root->left;
+ }
+ if (llist != &map->header) {
+ root = llist;
+ llist = root->right;
+ max_free_left = vm_map_splay_merge_left(map, root, llist);
+ max_free_right = vm_map_splay_merge_succ(map, root, rlist);
+ } else if (rlist != &map->header) {
root = rlist;
rlist = root->left;
- root->left = y;
- break;
- case UNLINK_MERGE_NONE:
- vm_map_splay_findprev(root, &llist);
- if (llist != &map->header) {
- root = llist;
- llist = root->right;
- root->right = NULL;
- } else if (rlist != &map->header) {
- root = rlist;
- rlist = root->left;
- root->left = NULL;
- } else
- root = NULL;
- break;
+ max_free_left = vm_map_splay_merge_pred(map, root, llist);
+ max_free_right = vm_map_splay_merge_right(map, root, rlist);
+ } else {
+ map->header.left = map->header.right = &map->header;
+ root = NULL;
}
- y = entry->next;
- y->prev = entry->prev;
- y->prev->next = y;
if (root != NULL)
- vm_map_splay_merge(map, root, llist, rlist);
- else
- map->root = NULL;
+ root->max_free = vm_size_max(max_free_left, max_free_right);
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
map->nentries--;
CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
@@ -1319,15 +1416,18 @@
vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
{
vm_map_entry_t llist, rlist, root;
+ bool found;
VM_MAP_ASSERT_LOCKED(map);
- root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
- KASSERT(root != NULL,
+ VM_MAP_SPLAY_SPLIT(map, entry->start, 0, root, llist, rlist, found);
+ KASSERT(found,
("%s: resize object not mapped", __func__));
- vm_map_splay_findnext(root, &rlist);
- root->right = NULL;
+ VM_MAP_SPLAY_FINDNEXT(root, rlist);
entry->end += grow_amount;
- vm_map_splay_merge(map, root, llist, rlist);
+ root->max_free = vm_size_max(
+ vm_map_splay_merge_left(map, root, llist),
+ vm_map_splay_merge_succ(map, root, rlist));
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
__func__, map, map->nentries, entry);
@@ -1349,7 +1449,7 @@
vm_offset_t address,
vm_map_entry_t *entry) /* OUT */
{
- vm_map_entry_t cur, lbound;
+ vm_map_entry_t cur, lbound, ubound;
boolean_t locked;
/*
@@ -1395,18 +1495,23 @@
* Since the map is only locked for read access, perform a
* standard binary search tree lookup for "address".
*/
- lbound = &map->header;
- do {
+ lbound = ubound = &map->header;
+ for (;;) {
if (address < cur->start) {
+ ubound = cur;
cur = cur->left;
+ if (cur == lbound)
+ break;
} else if (cur->end <= address) {
lbound = cur;
cur = cur->right;
+ if (cur == ubound)
+ break;
} else {
*entry = cur;
return (TRUE);
}
- } while (cur != NULL);
+ }
*entry = lbound;
return (FALSE);
}
@@ -1646,8 +1751,8 @@
vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
{
vm_map_entry_t llist, rlist, root, y;
- vm_size_t left_length;
- vm_offset_t gap_end;
+ vm_size_t left_length, max_free_left, max_free_right;
+ bool found;
/*
* Request must fit within min/max VM address and must avoid
@@ -1664,63 +1769,70 @@
/*
* After splay_split, if start is within an entry, push it to the start
* of the following gap. If rlist is at the end of the gap containing
- * start, save the end of that gap in gap_end to see if the gap is big
- * enough; otherwise set gap_end to start skip gap-checking and move
- * directly to a search of the right subtree.
+ * start, clear 'found' and see if the gap is big enough.
*/
- root = vm_map_splay_split(map, start, length, &llist, &rlist);
- gap_end = rlist->start;
- if (root != NULL) {
- start = root->end;
- if (root->right != NULL)
- gap_end = start;
+ VM_MAP_SPLAY_SPLIT(map, start, length, root, llist, rlist, found);
+ if (found) {
+ max_free_left = vm_map_splay_merge_left(map, root, llist);
+ if (root->right == rlist) {
+ start = root->end;
+ found = false;
+ }
+ max_free_right = vm_map_splay_merge_right(map, root, rlist);
} else if (rlist != &map->header) {
root = rlist;
rlist = root->left;
- root->left = NULL;
+ max_free_left = vm_map_splay_merge_pred(map, root, llist);
+ max_free_right = vm_map_splay_merge_right(map, root, rlist);
} else {
root = llist;
llist = root->right;
- root->right = NULL;
+ root->right = rlist;
+ rlist->left = root;
+ max_free_left = vm_map_splay_merge_left(map, root, llist);
+ max_free_right = rlist->start - root->end;
}
- vm_map_splay_merge(map, root, llist, rlist);
+ root->max_free = vm_size_max(max_free_left, max_free_right);
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
- if (length <= gap_end - start)
+ if (!found && length <= rlist->start - start)
return (start);
/* With max_free, can immediately tell if no solution. */
- if (root->right == NULL || length > root->right->max_free)
+ if (root->right == &map->header || length > root->right->max_free)
return (vm_map_max(map) - length + 1);
/*
* Splay for the least large-enough gap in the right subtree.
*/
llist = rlist = &map->header;
- for (left_length = 0;;
+ for (left_length = 0, found = true; found;
left_length = vm_map_entry_max_free_left(root, llist)) {
if (length <= left_length)
- SPLAY_LEFT_STEP(root, y, rlist,
+ SPLAY_LEFT_STEP(root, y, llist, rlist, found,
length <= vm_map_entry_max_free_left(y, llist));
else
- SPLAY_RIGHT_STEP(root, y, llist,
+ SPLAY_RIGHT_STEP(root, y, llist, rlist, found,
length > vm_map_entry_max_free_left(y, root));
- if (root == NULL)
- break;
}
root = llist;
llist = root->right;
- root->right = NULL;
- if (rlist != &map->header) {
+ root->max_free = vm_map_splay_merge_left(map, root, llist);
+ if (rlist == &map->header) {
+ root->right = rlist;
+ root->max_free = vm_size_max(root->max_free,
+ rlist->start - root->end);
+ } else {
y = rlist;
rlist = y->left;
- y->left = NULL;
- vm_map_splay_merge(map, y, &map->header, rlist);
- y->max_free = MAX(
- vm_map_entry_max_free_left(y, root),
- vm_map_entry_max_free_right(y, &map->header));
+ y->left = root;
+ y->max_free = vm_size_max(
+ y->start - root->end,
+ vm_map_splay_merge_right(map, y, rlist));
root->right = y;
+ root->max_free = vm_size_max(root->max_free, y->max_free);
}
- vm_map_splay_merge(map, root, llist, &map->header);
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
return (root->end);
}
@@ -2093,14 +2205,15 @@
* The map must be locked.
*/
void
-vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, vm_map_entry_t entry)
+vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry,
+ vm_map_entry_t entry)
{
VM_MAP_ASSERT_LOCKED(map);
if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 &&
- vm_map_mergeable_neighbors(prev, entry)) {
- vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT);
- vm_map_merged_neighbor_dispose(map, prev);
+ vm_map_mergeable_neighbors(prev_entry, entry)) {
+ vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT);
+ vm_map_merged_neighbor_dispose(map, prev_entry);
}
}
@@ -2445,7 +2558,7 @@
vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_prot_t new_prot, boolean_t set_max)
{
- vm_map_entry_t current, entry, in_tran, prev_entry;
+ vm_map_entry_t entry, first_entry, in_tran, prev_entry;
vm_object_t obj;
struct ucred *cred;
vm_prot_t old_prot;
@@ -2468,26 +2581,26 @@
VM_MAP_RANGE_CHECK(map, start, end);
- if (!vm_map_lookup_entry(map, start, &entry))
- entry = vm_map_entry_succ(entry);
+ if (!vm_map_lookup_entry(map, start, &first_entry))
+ first_entry = vm_map_entry_succ(first_entry);
/*
* Make a first pass to check for protection violations.
*/
- for (current = entry; current->start < end;
- current = vm_map_entry_succ(current)) {
- if ((current->eflags & MAP_ENTRY_GUARD) != 0)
+ for (entry = first_entry; entry->start < end;
+ entry = vm_map_entry_succ(entry)) {
+ if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
continue;
- if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
+ if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
vm_map_unlock(map);
return (KERN_INVALID_ARGUMENT);
}
- if ((new_prot & current->max_protection) != new_prot) {
+ if ((new_prot & entry->max_protection) != new_prot) {
vm_map_unlock(map);
return (KERN_PROTECTION_FAILURE);
}
- if ((current->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
- in_tran = current;
+ if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
+ in_tran = entry;
}
/*
@@ -2511,30 +2624,30 @@
* some may now be mergeable.
*/
rv = KERN_SUCCESS;
- vm_map_clip_start(map, entry, start);
- for (current = entry; current->start < end;
- current = vm_map_entry_succ(current)) {
+ vm_map_clip_start(map, first_entry, start);
+ for (entry = first_entry; entry->start < end;
+ entry = vm_map_entry_succ(entry)) {
+ vm_map_clip_end(map, entry, end);
- vm_map_clip_end(map, current, end);
-
if (set_max ||
- ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
- ENTRY_CHARGED(current) ||
- (current->eflags & MAP_ENTRY_GUARD) != 0) {
+ ((new_prot & ~(entry->protection)) & VM_PROT_WRITE) == 0 ||
+ ENTRY_CHARGED(entry) ||
+ (entry->eflags & MAP_ENTRY_GUARD) != 0) {
continue;
}
cred = curthread->td_ucred;
- obj = current->object.vm_object;
+ obj = entry->object.vm_object;
- if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
- if (!swap_reserve(current->end - current->start)) {
+ if (obj == NULL ||
+ (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) {
+ if (!swap_reserve(entry->end - entry->start)) {
rv = KERN_RESOURCE_SHORTAGE;
- end = current->end;
+ end = entry->end;
break;
}
crhold(cred);
- current->cred = cred;
+ entry->cred = cred;
continue;
}
@@ -2551,11 +2664,11 @@
*/
KASSERT(obj->charge == 0,
("vm_map_protect: object %p overcharged (entry %p)",
- obj, current));
+ obj, entry));
if (!swap_reserve(ptoa(obj->size))) {
VM_OBJECT_WUNLOCK(obj);
rv = KERN_RESOURCE_SHORTAGE;
- end = current->end;
+ end = entry->end;
break;
}
@@ -2570,22 +2683,22 @@
* Otherwise, just simplify entries, since some may have been modified.
* [Note that clipping is not necessary the second time.]
*/
- for (prev_entry = vm_map_entry_pred(entry), current = entry;
- current->start < end;
- vm_map_try_merge_entries(map, prev_entry, current),
- prev_entry = current, current = vm_map_entry_succ(current)) {
+ for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry;
+ entry->start < end;
+ vm_map_try_merge_entries(map, prev_entry, entry),
+ prev_entry = entry, entry = vm_map_entry_succ(entry)) {
if (rv != KERN_SUCCESS ||
- (current->eflags & MAP_ENTRY_GUARD) != 0)
+ (entry->eflags & MAP_ENTRY_GUARD) != 0)
continue;
- old_prot = current->protection;
+ old_prot = entry->protection;
if (set_max)
- current->protection =
- (current->max_protection = new_prot) &
+ entry->protection =
+ (entry->max_protection = new_prot) &
old_prot;
else
- current->protection = new_prot;
+ entry->protection = new_prot;
/*
* For user wired map entries, the normal lazy evaluation of
@@ -2593,25 +2706,25 @@
* undesirable. Instead, immediately copy any pages that are
* copy-on-write and enable write access in the physical map.
*/
- if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
- (current->protection & VM_PROT_WRITE) != 0 &&
+ if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
+ (entry->protection & VM_PROT_WRITE) != 0 &&
(old_prot & VM_PROT_WRITE) == 0)
- vm_fault_copy_entry(map, map, current, current, NULL);
+ vm_fault_copy_entry(map, map, entry, entry, NULL);
/*
* When restricting access, update the physical map. Worry
* about copy-on-write here.
*/
- if ((old_prot & ~current->protection) != 0) {
+ if ((old_prot & ~entry->protection) != 0) {
#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
VM_PROT_ALL)
- pmap_protect(map->pmap, current->start,
- current->end,
- current->protection & MASK(current));
+ pmap_protect(map->pmap, entry->start,
+ entry->end,
+ entry->protection & MASK(entry));
#undef MASK
}
}
- vm_map_try_merge_entries(map, prev_entry, current);
+ vm_map_try_merge_entries(map, prev_entry, entry);
vm_map_unlock(map);
return (rv);
}
@@ -2631,7 +2744,7 @@
vm_offset_t end,
int behav)
{
- vm_map_entry_t current, prev_entry;
+ vm_map_entry_t entry, prev_entry;
bool modify_map;
/*
@@ -2670,13 +2783,13 @@
*/
VM_MAP_RANGE_CHECK(map, start, end);
- if (vm_map_lookup_entry(map, start, ¤t)) {
+ if (vm_map_lookup_entry(map, start, &entry)) {
if (modify_map)
- vm_map_clip_start(map, current, start);
- prev_entry = vm_map_entry_pred(current);
+ vm_map_clip_start(map, entry, start);
+ prev_entry = vm_map_entry_pred(entry);
} else {
- prev_entry = current;
- current = vm_map_entry_succ(current);
+ prev_entry = entry;
+ entry = vm_map_entry_succ(entry);
}
if (modify_map) {
@@ -2686,41 +2799,41 @@
* We clip the vm_map_entry so that behavioral changes are
* limited to the specified address range.
*/
- for (; current->start < end; prev_entry = current,
- current = vm_map_entry_succ(current)) {
- if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
+ for (; entry->start < end;
+ prev_entry = entry, entry = vm_map_entry_succ(entry)) {
+ if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
continue;
- vm_map_clip_end(map, current, end);
+ vm_map_clip_end(map, entry, end);
switch (behav) {
case MADV_NORMAL:
- vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
+ vm_map_entry_set_behavior(entry, MAP_ENTRY_BEHAV_NORMAL);
break;
case MADV_SEQUENTIAL:
- vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
+ vm_map_entry_set_behavior(entry, MAP_ENTRY_BEHAV_SEQUENTIAL);
break;
case MADV_RANDOM:
- vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
+ vm_map_entry_set_behavior(entry, MAP_ENTRY_BEHAV_RANDOM);
break;
case MADV_NOSYNC:
- current->eflags |= MAP_ENTRY_NOSYNC;
+ entry->eflags |= MAP_ENTRY_NOSYNC;
break;
case MADV_AUTOSYNC:
- current->eflags &= ~MAP_ENTRY_NOSYNC;
+ entry->eflags &= ~MAP_ENTRY_NOSYNC;
break;
case MADV_NOCORE:
- current->eflags |= MAP_ENTRY_NOCOREDUMP;
+ entry->eflags |= MAP_ENTRY_NOCOREDUMP;
break;
case MADV_CORE:
- current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
+ entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
break;
default:
break;
}
- vm_map_try_merge_entries(map, prev_entry, current);
+ vm_map_try_merge_entries(map, prev_entry, entry);
}
- vm_map_try_merge_entries(map, prev_entry, current);
+ vm_map_try_merge_entries(map, prev_entry, entry);
vm_map_unlock(map);
} else {
vm_pindex_t pstart, pend;
@@ -2732,11 +2845,11 @@
* Since we don't clip the vm_map_entry, we have to clip
* the vm_object pindex and count.
*/
- for (; current->start < end;
- current = vm_map_entry_succ(current)) {
+ for (; entry->start < end;
+ entry = vm_map_entry_succ(entry)) {
vm_offset_t useEnd, useStart;
- if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
+ if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
continue;
/*
@@ -2747,21 +2860,21 @@
* backing object can change.
*/
if (behav == MADV_FREE &&
- current->object.vm_object != NULL &&
- current->object.vm_object->backing_object != NULL)
+ entry->object.vm_object != NULL &&
+ entry->object.vm_object->backing_object != NULL)
continue;
- pstart = OFF_TO_IDX(current->offset);
- pend = pstart + atop(current->end - current->start);
- useStart = current->start;
- useEnd = current->end;
+ pstart = OFF_TO_IDX(entry->offset);
+ pend = pstart + atop(entry->end - entry->start);
+ useStart = entry->start;
+ useEnd = entry->end;
- if (current->start < start) {
- pstart += atop(start - current->start);
+ if (entry->start < start) {
+ pstart += atop(start - entry->start);
useStart = start;
}
- if (current->end > end) {
- pend -= atop(current->end - end);
+ if (entry->end > end) {
+ pend -= atop(entry->end - end);
useEnd = end;
}
@@ -2782,7 +2895,7 @@
pmap_advise(map->pmap, useStart, useEnd,
behav);
- vm_object_madvise(current->object.vm_object, pstart,
+ vm_object_madvise(entry->object.vm_object, pstart,
pend, behav);
/*
@@ -2791,11 +2904,11 @@
* paging structures are already populated.
*/
if (behav == MADV_WILLNEED &&
- current->wired_count == 0) {
+ entry->wired_count == 0) {
vm_map_pmap_enter(map,
useStart,
- current->protection,
- current->object.vm_object,
+ entry->protection,
+ entry->object.vm_object,
pstart,
ptoa(pend - pstart),
MAP_PREFAULT_MADVISE
@@ -3360,7 +3473,7 @@
boolean_t syncio,
boolean_t invalidate)
{
- vm_map_entry_t current, entry, next_entry;
+ vm_map_entry_t entry, first_entry, next_entry;
vm_size_t size;
vm_object_t object;
vm_ooffset_t offset;
@@ -3369,25 +3482,24 @@
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
- if (!vm_map_lookup_entry(map, start, &entry)) {
+ if (!vm_map_lookup_entry(map, start, &first_entry)) {
vm_map_unlock_read(map);
return (KERN_INVALID_ADDRESS);
} else if (start == end) {
- start = entry->start;
- end = entry->end;
+ start = first_entry->start;
+ end = first_entry->end;
}
/*
* Make a first pass to check for user-wired memory and holes.
*/
- for (current = entry; current->start < end;
- current = next_entry) {
- if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
+ for (entry = first_entry; entry->start < end; entry = next_entry) {
+ if (invalidate && (entry->eflags & MAP_ENTRY_USER_WIRED)) {
vm_map_unlock_read(map);
return (KERN_INVALID_ARGUMENT);
}
- next_entry = vm_map_entry_succ(current);
- if (end > current->end &&
- current->end != next_entry->start) {
+ next_entry = vm_map_entry_succ(entry);
+ if (end > entry->end &&
+ entry->end != next_entry->start) {
vm_map_unlock_read(map);
return (KERN_INVALID_ADDRESS);
}
@@ -3401,15 +3513,15 @@
* Make a second pass, cleaning/uncaching pages from the indicated
* objects as we go.
*/
- for (current = entry; current->start < end;) {
- offset = current->offset + (start - current->start);
- size = (end <= current->end ? end : current->end) - start;
- if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
+ for (entry = first_entry; entry->start < end;) {
+ offset = entry->offset + (start - entry->start);
+ size = (end <= entry->end ? end : entry->end) - start;
+ if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
vm_map_t smap;
vm_map_entry_t tentry;
vm_size_t tsize;
- smap = current->object.sub_map;
+ smap = entry->object.sub_map;
vm_map_lock_read(smap);
(void) vm_map_lookup_entry(smap, offset, &tentry);
tsize = tentry->end - offset;
@@ -3419,7 +3531,7 @@
offset = tentry->offset + (offset - tentry->start);
vm_map_unlock_read(smap);
} else {
- object = current->object.vm_object;
+ object = entry->object.vm_object;
}
vm_object_reference(object);
last_timestamp = map->timestamp;
@@ -3430,8 +3542,8 @@
vm_object_deallocate(object);
vm_map_lock_read(map);
if (last_timestamp == map->timestamp ||
- !vm_map_lookup_entry(map, start, ¤t))
- current = vm_map_entry_succ(current);
+ !vm_map_lookup_entry(map, start, &entry))
+ entry = vm_map_entry_succ(entry);
}
vm_map_unlock_read(map);
@@ -3929,10 +4041,8 @@
new_map->anon_loc = old_map->anon_loc;
- old_entry = vm_map_entry_first(old_map);
-
- while (old_entry != &old_map->header) {
- if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
+ VM_MAP_ENTRY_FOREACH(old_entry, old_map) {
+ if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
panic("vm_map_fork: encountered a submap");
inh = old_entry->inheritance;
@@ -3946,7 +4056,8 @@
case VM_INHERIT_SHARE:
/*
- * Clone the entry, creating the shared object if necessary.
+ * Clone the entry, creating the shared object if
+ * necessary.
*/
object = old_entry->object.vm_object;
if (object == NULL) {
@@ -4081,7 +4192,6 @@
break;
}
- old_entry = vm_map_entry_succ(old_entry);
}
/*
* Use inlined vm_map_unlock() to postpone handling the deferred
@@ -4846,8 +4956,12 @@
_vm_map_assert_consistent(vm_map_t map, int check)
{
vm_map_entry_t entry, prev;
+ vm_map_entry_t cur, lbound, ubound;
vm_size_t max_left, max_right;
+#ifdef DIAGNOSTIC
+ ++map->nupdates;
+#endif
if (enable_vmmap_check != check)
return;
@@ -4859,23 +4973,39 @@
KASSERT(entry->start < entry->end,
("map %p start = %jx, end = %jx", map,
(uintmax_t)entry->start, (uintmax_t)entry->end));
- KASSERT(entry->end <= vm_map_entry_succ(entry)->start,
- ("map %p end = %jx, next->start = %jx", map,
- (uintmax_t)entry->end,
- (uintmax_t)vm_map_entry_succ(entry)->start));
- KASSERT(entry->left == NULL ||
+ KASSERT(entry->left == &map->header ||
entry->left->start < entry->start,
("map %p left->start = %jx, start = %jx", map,
(uintmax_t)entry->left->start, (uintmax_t)entry->start));
- KASSERT(entry->right == NULL ||
+ KASSERT(entry->right == &map->header ||
entry->start < entry->right->start,
("map %p start = %jx, right->start = %jx", map,
(uintmax_t)entry->start, (uintmax_t)entry->right->start));
- max_left = vm_map_entry_max_free_left(entry,
- vm_map_entry_pred(entry));
- max_right = vm_map_entry_max_free_right(entry,
- vm_map_entry_succ(entry));
- KASSERT(entry->max_free == MAX(max_left, max_right),
+ cur = map->root;
+ lbound = ubound = &map->header;
+ for (;;) {
+ if (entry->start < cur->start) {
+ ubound = cur;
+ cur = cur->left;
+ KASSERT(cur != lbound,
+ ("map %p cannot find %jx",
+ map, entry->start));
+ } else if (cur->end <= entry->start) {
+ lbound = cur;
+ cur = cur->right;
+ KASSERT(cur != ubound,
+ ("map %p cannot find %jx",
+ map, entry->start));
+ } else {
+ KASSERT(cur == entry,
+ ("map %p cannot find %jx",
+ map, entry->start));
+ break;
+ }
+ }
+ max_left = vm_map_entry_max_free_left(entry, lbound);
+ max_right = vm_map_entry_max_free_right(entry, ubound);
+ KASSERT(entry->max_free == vm_size_max(max_left, max_right),
("map %p max = %jx, max_left = %jx, max_right = %jx", map,
(uintmax_t)entry->max_free,
(uintmax_t)max_left, (uintmax_t)max_right));
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Jan 31, 10:41 PM (15 h, 29 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28206651
Default Alt Text
D21964.id64833.diff (39 KB)
Attached To
Mode
D21964: Make vm_map a threaded tree
Attached
Detach File
Event Timeline
Log In to Comment