Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F143565000
D21964.id63130.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
41 KB
Referenced Files
None
Subscribers
None
D21964.id63130.diff
View Options
Index: sys/security/mac/mac_process.c
===================================================================
--- sys/security/mac/mac_process.c
+++ sys/security/mac/mac_process.c
@@ -252,7 +252,7 @@
mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
struct vm_map *map)
{
- vm_map_entry_t vme;
+ vm_map_entry_t prev, vme;
int result;
vm_prot_t revokeperms;
vm_object_t backing_object, object;
@@ -263,8 +263,10 @@
if (!mac_mmap_revocation)
return;
+ prev = &map->header;
vm_map_lock(map);
- VM_MAP_ENTRY_FOREACH(vme, map) {
+ for (vme = map->header.right; vme != &map->header;
+ prev = vme, vme = vm_map_entry_succ(prev)) {
if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) {
mac_proc_vm_revoke_recurse(td, cred,
vme->object.sub_map);
@@ -363,7 +365,7 @@
}
pmap_protect(map->pmap, vme->start, vme->end,
vme->protection & ~revokeperms);
- vm_map_try_merge_entries(map, vme->prev, vme);
+ vm_map_try_merge_entries(map, prev, vme);
}
}
vm_map_unlock(map);
Index: sys/vm/vm_map.h
===================================================================
--- sys/vm/vm_map.h
+++ sys/vm/vm_map.h
@@ -99,10 +99,8 @@
* Also included is control information for virtual copy operations.
*/
struct vm_map_entry {
- struct vm_map_entry *prev; /* previous entry */
- struct vm_map_entry *next; /* next entry */
- struct vm_map_entry *left; /* left child in binary search tree */
- struct vm_map_entry *right; /* right child in binary search tree */
+ struct vm_map_entry *left; /* left child or previous entry */
+ struct vm_map_entry *right; /* right child or next entry */
vm_offset_t start; /* start address */
vm_offset_t end; /* end address */
vm_offset_t next_read; /* vaddr of the next sequential read */
@@ -416,10 +414,22 @@
vm_pindex_t *, vm_prot_t *, boolean_t *);
void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
+
+static inline vm_map_entry_t
+vm_map_entry_succ(vm_map_entry_t entry)
+{
+ vm_map_entry_t after;
+
+ after = entry->right;
+ if (after->left->start > entry->start)
+ while (after = after->left, after->left != entry);
+ return (after);
+}
+
#define VM_MAP_ENTRY_FOREACH(it, map) \
- for ((it) = (map)->header.next; \
+ for ((it) = (map)->header.right; \
(it) != &(map)->header; \
- (it) = (it)->next)
+ (it) = vm_map_entry_succ(it))
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev,
Index: sys/vm/vm_map.c
===================================================================
--- sys/vm/vm_map.c
+++ sys/vm/vm_map.c
@@ -574,7 +574,7 @@
entry = td->td_map_def_user;
td->td_map_def_user = NULL;
while (entry != NULL) {
- next = entry->next;
+ next = entry->right;
MPASS((entry->eflags & (MAP_ENTRY_WRITECNT |
MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT |
MAP_ENTRY_VN_EXEC));
@@ -739,47 +739,8 @@
SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN,
&enable_vmmap_check, 0, "Enable vm map consistency checking");
-static void
-_vm_map_assert_consistent(vm_map_t map)
-{
- vm_map_entry_t child, entry, prev;
- vm_size_t max_left, max_right;
+static void _vm_map_assert_consistent(vm_map_t map);
- if (!enable_vmmap_check)
- return;
-
- for (prev = &map->header; (entry = prev->next) != &map->header;
- prev = entry) {
- KASSERT(prev->end <= entry->start,
- ("map %p prev->end = %jx, start = %jx", map,
- (uintmax_t)prev->end, (uintmax_t)entry->start));
- KASSERT(entry->start < entry->end,
- ("map %p start = %jx, end = %jx", map,
- (uintmax_t)entry->start, (uintmax_t)entry->end));
- KASSERT(entry->end <= entry->next->start,
- ("map %p end = %jx, next->start = %jx", map,
- (uintmax_t)entry->end, (uintmax_t)entry->next->start));
- KASSERT(entry->left == NULL ||
- entry->left->start < entry->start,
- ("map %p left->start = %jx, start = %jx", map,
- (uintmax_t)entry->left->start, (uintmax_t)entry->start));
- KASSERT(entry->right == NULL ||
- entry->start < entry->right->start,
- ("map %p start = %jx, right->start = %jx", map,
- (uintmax_t)entry->start, (uintmax_t)entry->right->start));
- child = entry->left;
- max_left = (child != NULL) ? child->max_free :
- entry->start - prev->end;
- child = entry->right;
- max_right = (child != NULL) ? child->max_free :
- entry->next->start - entry->end;
- KASSERT(entry->max_free == MAX(max_left, max_right),
- ("map %p max = %jx, max_left = %jx, max_right = %jx", map,
- (uintmax_t)entry->max_free,
- (uintmax_t)max_left, (uintmax_t)max_right));
- }
-}
-
#define VM_MAP_ASSERT_CONSISTENT(map) \
_vm_map_assert_consistent(map)
#else
@@ -901,7 +862,6 @@
_vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
{
- map->header.next = map->header.prev = &map->header;
map->header.eflags = MAP_ENTRY_HEADER;
map->needs_wakeup = FALSE;
map->system_map = 0;
@@ -909,6 +869,7 @@
map->header.end = min;
map->header.start = max;
map->flags = 0;
+ map->header.left = map->header.right = &map->header;
map->root = NULL;
map->timestamp = 0;
map->busy = 0;
@@ -968,6 +929,12 @@
(behavior & MAP_ENTRY_BEHAV_MASK);
}
+static inline vm_size_t
+vm_size_max(vm_size_t a, vm_size_t b)
+{
+ return (a > b) ? a : b;
+}
+
/*
* vm_map_entry_max_free_{left,right}:
*
@@ -979,7 +946,7 @@
vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor)
{
- return (root->left != NULL ?
+ return (root->left != left_ancestor ?
root->left->max_free : root->start - left_ancestor->end);
}
@@ -987,11 +954,31 @@
vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor)
{
- return (root->right != NULL ?
+ return (root->right != right_ancestor ?
root->right->max_free : right_ancestor->start - root->end);
}
-#define SPLAY_LEFT_STEP(root, y, rlist, test) do { \
+/*
+ * vm_map_entry_{pred,succ}:
+ *
+ * Find the {predecessor, successor} of the entry by taking one step
+ * in the appropriate direction and backtracking as much as necessary.
+ */
+
+/* vm_map_entry_succ is defined in vm_map.h. */
+
+static inline vm_map_entry_t
+vm_map_entry_pred(vm_map_entry_t entry)
+{
+ vm_map_entry_t prior;
+
+ prior = entry->left;
+ if (prior->right->start < entry->start)
+ while (prior = prior->right, prior->right != entry);
+ return (prior);
+}
+
+#define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do { \
vm_size_t max_free; \
\
/* \
@@ -1003,14 +990,17 @@
max_free = root->max_free; \
KASSERT(max_free >= vm_map_entry_max_free_right(root, rlist), \
("%s: max_free invariant fails", __func__)); \
- if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \
+ if (y == llist ? max_free > 0 : max_free - 1 < y->max_free) \
max_free = vm_map_entry_max_free_right(root, rlist); \
- if (y != NULL && (test)) { \
+ if (y != llist && (test)) { \
/* Rotate right and make y root. */ \
- root->left = y->right; \
- y->right = root; \
+ if (y->right != root) { \
+ root->left = y->right; \
+ y->right = root; \
+ } \
if (max_free < y->max_free) \
- root->max_free = max_free = MAX(max_free, \
+ root->max_free = max_free = \
+ vm_size_max(max_free, \
vm_map_entry_max_free_left(root, y)); \
root = y; \
y = root->left; \
@@ -1021,10 +1011,10 @@
("%s: max_free not copied from right", __func__)); \
root->left = rlist; \
rlist = root; \
- root = y; \
+ root = (y != llist) ? y : NULL; \
} while (0)
-#define SPLAY_RIGHT_STEP(root, y, llist, test) do { \
+#define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do { \
vm_size_t max_free; \
\
/* \
@@ -1036,14 +1026,17 @@
max_free = root->max_free; \
KASSERT(max_free >= vm_map_entry_max_free_left(root, llist), \
("%s: max_free invariant fails", __func__)); \
- if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \
+ if (y == rlist ? max_free > 0 : max_free - 1 < y->max_free) \
max_free = vm_map_entry_max_free_left(root, llist); \
- if (y != NULL && (test)) { \
+ if (y != rlist && (test)) { \
/* Rotate left and make y root. */ \
- root->right = y->left; \
- y->left = root; \
+ if (y->left != root) { \
+ root->right = y->left; \
+ y->left = root; \
+ } \
if (max_free < y->max_free) \
- root->max_free = max_free = MAX(max_free, \
+ root->max_free = max_free = \
+ vm_size_max(max_free, \
vm_map_entry_max_free_right(root, y)); \
root = y; \
y = root->right; \
@@ -1054,18 +1047,18 @@
("%s: max_free not copied from left", __func__)); \
root->right = llist; \
llist = root; \
- root = y; \
+ root = (y != rlist) ? y : NULL; \
} while (0)
/*
- * Walk down the tree until we find addr or a NULL pointer where addr would go,
- * breaking off left and right subtrees of nodes less than, or greater than
- * addr. Treat pointers to nodes with max_free < length as NULL pointers.
- * llist and rlist are the two sides in reverse order (bottom-up), with llist
- * linked by the right pointer and rlist linked by the left pointer in the
- * vm_map_entry, and both lists terminated by &map->header. This function, and
- * the subsequent call to vm_map_splay_merge, rely on the start and end address
- * values in &map->header.
+ * Walk down the tree until we find addr or a gap where addr would go, breaking
+ * off left and right subtrees of nodes less than, or greater than addr. Treat
+ * subtrees with root->max_free < length as empty trees. llist and rlist are
+ * the two sides in reverse order (bottom-up), with llist linked by the right
+ * pointer and rlist linked by the left pointer in the vm_map_entry, and both
+ * lists terminated by &map->header. This function, and the subsequent call to
+ * vm_map_splay_merge_{left,right}, rely on the start and end address values in
+ * &map->header.
*/
static vm_map_entry_t
vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length,
@@ -1079,10 +1072,10 @@
KASSERT(llist->end <= root->start && root->end <= rlist->start,
("%s: root not within tree bounds", __func__));
if (addr < root->start) {
- SPLAY_LEFT_STEP(root, y, rlist,
+ SPLAY_LEFT_STEP(root, y, llist, rlist,
y->max_free >= length && addr < y->start);
} else if (addr >= root->end) {
- SPLAY_RIGHT_STEP(root, y, llist,
+ SPLAY_RIGHT_STEP(root, y, llist, rlist,
y->max_free >= length && addr >= y->end);
} else
break;
@@ -1095,25 +1088,34 @@
static void
vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *iolist)
{
- vm_map_entry_t rlist, y;
+ vm_map_entry_t llist, rlist, y;
+ rlist = *iolist;
+ llist = root;
root = root->right;
- rlist = *iolist;
- while (root != NULL)
- SPLAY_LEFT_STEP(root, y, rlist, true);
- *iolist = rlist;
+ if (root != rlist) {
+ do
+ SPLAY_LEFT_STEP(root, y, llist, rlist, true);
+ while (root != NULL);
+ *iolist = rlist;
+ }
}
static void
vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *iolist)
{
- vm_map_entry_t llist, y;
+ vm_map_entry_t llist, rlist, y;
+
+ llist = *iolist;
+ rlist = root;
root = root->left;
- llist = *iolist;
- while (root != NULL)
- SPLAY_RIGHT_STEP(root, y, llist, true);
- *iolist = llist;
+ if (root != llist) {
+ do
+ SPLAY_RIGHT_STEP(root, y, llist, rlist, true);
+ while (root != NULL);
+ *iolist = llist;
+ }
}
static inline void
@@ -1130,47 +1132,120 @@
* Walk back up the two spines, flip the pointers and set max_free. The
* subtrees of the root go at the bottom of llist and rlist.
*/
-static void
-vm_map_splay_merge(vm_map_t map, vm_map_entry_t root,
- vm_map_entry_t llist, vm_map_entry_t rlist)
+static vm_size_t
+vm_map_splay_merge_left_walk(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist)
{
- vm_map_entry_t prev;
- vm_size_t max_free_left, max_free_right;
+ do {
+ /*
+ * The max_free values of the children of llist are in
+ * llist->max_free and max_free. Update with the
+ * max value.
+ */
+ llist->max_free = max_free =
+ vm_size_max(llist->max_free, max_free);
+ vm_map_entry_swap(&llist->right, &tail);
+ vm_map_entry_swap(&tail, &llist);
+ } while (llist != &map->header);
+ root->left = tail;
+ return (max_free);
+}
- max_free_left = vm_map_entry_max_free_left(root, llist);
+/*
+ * When llist is known to be the predecessor of root.
+ */
+static inline vm_size_t
+vm_map_splay_merge_pred(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t llist)
+{
+ vm_size_t max_free;
+
+ max_free = root->start - llist->end;
+ if (llist != &map->header)
+ max_free = vm_map_splay_merge_left_walk(map, root,
+ root, max_free, llist);
+ else {
+ root->left = llist;
+ llist->right = root;
+ }
+ return (max_free);
+}
+
+/*
+ * When llist may or may not be the predecessor of root.
+ */
+static inline vm_size_t
+vm_map_splay_merge_left(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t llist)
+{
+ vm_size_t max_free;
+
+ max_free = vm_map_entry_max_free_left(root, llist);
if (llist != &map->header) {
- prev = root->left;
- do {
- /*
- * The max_free values of the children of llist are in
- * llist->max_free and max_free_left. Update with the
- * max value.
- */
- llist->max_free = max_free_left =
- MAX(llist->max_free, max_free_left);
- vm_map_entry_swap(&llist->right, &prev);
- vm_map_entry_swap(&prev, &llist);
- } while (llist != &map->header);
- root->left = prev;
+ max_free = vm_map_splay_merge_left_walk(map, root,
+ root->left == llist ? root : root->left,
+ max_free, llist);
+ } else if (root->left == llist)
+ llist->right = root;
+ return (max_free);
+}
+
+static vm_size_t
+vm_map_splay_merge_right_walk(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist)
+{
+ do {
+ /*
+ * The max_free values of the children of rlist are in
+ * rlist->max_free and max_free. Update with the
+ * max value.
+ */
+ rlist->max_free = max_free =
+ vm_size_max(rlist->max_free, max_free);
+ vm_map_entry_swap(&rlist->left, &tail);
+ vm_map_entry_swap(&tail, &rlist);
+ } while (rlist != &map->header);
+ root->right = tail;
+ return (max_free);
+}
+
+/*
+ * When rlist is known to be the succecessor of root.
+ */
+static inline vm_size_t
+vm_map_splay_merge_succ(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t rlist)
+{
+ vm_size_t max_free;
+
+ max_free = rlist->start - root->end;
+ if (rlist != &map->header) {
+ max_free = vm_map_splay_merge_right_walk(map, root,
+ root, max_free, rlist);
+ } else {
+ root->right = rlist;
+ rlist->left = root;
}
- max_free_right = vm_map_entry_max_free_right(root, rlist);
+ return (max_free);
+}
+
+/*
+ * When rlist may or may not be the succecessor of root.
+ */
+static inline vm_size_t
+vm_map_splay_merge_right(vm_map_t map, vm_map_entry_t root,
+ vm_map_entry_t rlist)
+{
+ vm_size_t max_free;
+
+ max_free = vm_map_entry_max_free_right(root, rlist);
if (rlist != &map->header) {
- prev = root->right;
- do {
- /*
- * The max_free values of the children of rlist are in
- * rlist->max_free and max_free_right. Update with the
- * max value.
- */
- rlist->max_free = max_free_right =
- MAX(rlist->max_free, max_free_right);
- vm_map_entry_swap(&rlist->left, &prev);
- vm_map_entry_swap(&prev, &rlist);
- } while (rlist != &map->header);
- root->right = prev;
- }
- root->max_free = MAX(max_free_left, max_free_right);
- map->root = root;
+ max_free = vm_map_splay_merge_right_walk(map, root,
+ root->right == rlist ? root : root->right,
+ max_free, rlist);
+ } else if (root->right == rlist)
+ rlist->left = root;
+ return (max_free);
}
/*
@@ -1183,6 +1258,14 @@
* the pointers and compute max_free. The time bound is O(log n)
* amortized.
*
+ * The tree is threaded, which means that there are no null pointers.
+ * When a node has no left child, its left pointer points to its
+ * predecessor, which the last ancestor on the search path from the root
+ * where the search branched right. Likewise, when a node has no right
+ * child, its right pointer points to its successor. The map header node
+ * is the predecessor of the first map entry, and the successor of the
+ * last.
+ *
* The new root is the vm_map_entry containing "addr", or else an
* adjacent entry (lower if possible) if addr is not in the tree.
*
@@ -1194,10 +1277,12 @@
vm_map_splay(vm_map_t map, vm_offset_t addr)
{
vm_map_entry_t llist, rlist, root;
+ vm_size_t max_free_left, max_free_right;
root = vm_map_splay_split(map, addr, 0, &llist, &rlist);
if (root != NULL) {
- /* do nothing */
+ max_free_left = vm_map_splay_merge_left(map, root, llist);
+ max_free_right = vm_map_splay_merge_right(map, root, rlist);
} else if (llist != &map->header) {
/*
* Recover the greatest node in the left
@@ -1205,7 +1290,8 @@
*/
root = llist;
llist = root->right;
- root->right = NULL;
+ max_free_left = vm_map_splay_merge_left(map, root, llist);
+ max_free_right = vm_map_splay_merge_succ(map, root, rlist);
} else if (rlist != &map->header) {
/*
* Recover the least node in the right
@@ -1213,12 +1299,14 @@
*/
root = rlist;
rlist = root->left;
- root->left = NULL;
+ max_free_left = vm_map_splay_merge_pred(map, root, llist);
+ max_free_right = vm_map_splay_merge_right(map, root, rlist);
} else {
/* There is no root. */
return (NULL);
}
- vm_map_splay_merge(map, root, llist, rlist);
+ root->max_free = vm_size_max(max_free_left, max_free_right);
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
return (root);
}
@@ -1241,11 +1329,11 @@
root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
KASSERT(root == NULL,
("vm_map_entry_link: link object already mapped"));
- entry->prev = llist;
- entry->next = rlist;
- llist->next = rlist->prev = entry;
- entry->left = entry->right = NULL;
- vm_map_splay_merge(map, entry, llist, rlist);
+ root = entry;
+ root->max_free = vm_size_max(
+ vm_map_splay_merge_pred(map, root, llist),
+ vm_map_splay_merge_succ(map, root, rlist));
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
}
@@ -1258,44 +1346,37 @@
vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
enum unlink_merge_type op)
{
- vm_map_entry_t llist, rlist, root, y;
+ vm_map_entry_t llist, rlist, root;
+ vm_size_t max_free_left, max_free_right;
VM_MAP_ASSERT_LOCKED(map);
root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
KASSERT(root != NULL,
("vm_map_entry_unlink: unlink object not mapped"));
+ vm_map_splay_findprev(root, &llist);
vm_map_splay_findnext(root, &rlist);
- switch (op) {
- case UNLINK_MERGE_NEXT:
- rlist->start = root->start;
- rlist->offset = root->offset;
- y = root->left;
+ if (op == UNLINK_MERGE_NEXT) {
+ rlist->start = root->start;
+ rlist->offset = root->offset;
+ }
+ if (llist != &map->header) {
+ root = llist;
+ llist = root->right;
+ max_free_left = vm_map_splay_merge_left(map, root, llist);
+ max_free_right = vm_map_splay_merge_succ(map, root, rlist);
+ } else if (rlist != &map->header) {
root = rlist;
rlist = root->left;
- root->left = y;
- break;
- case UNLINK_MERGE_NONE:
- vm_map_splay_findprev(root, &llist);
- if (llist != &map->header) {
- root = llist;
- llist = root->right;
- root->right = NULL;
- } else if (rlist != &map->header) {
- root = rlist;
- rlist = root->left;
- root->left = NULL;
- } else
- root = NULL;
- break;
+ max_free_left = vm_map_splay_merge_pred(map, root, llist);
+ max_free_right = vm_map_splay_merge_right(map, root, rlist);
+ } else {
+ map->header.left = map->header.right = &map->header;
+ root = NULL;
}
- y = entry->next;
- y->prev = entry->prev;
- y->prev->next = y;
if (root != NULL)
- vm_map_splay_merge(map, root, llist, rlist);
- else
- map->root = NULL;
+ root->max_free = vm_size_max(max_free_left, max_free_right);
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
map->nentries--;
CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
@@ -1320,9 +1401,11 @@
KASSERT(root != NULL,
("%s: resize object not mapped", __func__));
vm_map_splay_findnext(root, &rlist);
- root->right = NULL;
entry->end += grow_amount;
- vm_map_splay_merge(map, root, llist, rlist);
+ root->max_free = vm_size_max(
+ vm_map_splay_merge_left(map, root, llist),
+ vm_map_splay_merge_succ(map, root, rlist));
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
__func__, map, map->nentries, entry);
@@ -1344,7 +1427,7 @@
vm_offset_t address,
vm_map_entry_t *entry) /* OUT */
{
- vm_map_entry_t cur, lbound;
+ vm_map_entry_t cur, lbound, ubound;
boolean_t locked;
/*
@@ -1388,18 +1471,23 @@
* Since the map is only locked for read access, perform a
* standard binary search tree lookup for "address".
*/
- lbound = &map->header;
- do {
+ lbound = ubound = &map->header;
+ for (;;) {
if (address < cur->start) {
+ ubound = cur;
cur = cur->left;
+ if (cur == lbound)
+ break;
} else if (cur->end <= address) {
lbound = cur;
cur = cur->right;
+ if (cur == ubound)
+ break;
} else {
*entry = cur;
return (TRUE);
}
- } while (cur != NULL);
+ }
*entry = lbound;
return (FALSE);
}
@@ -1420,7 +1508,7 @@
vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
{
- vm_map_entry_t new_entry, prev_entry;
+ vm_map_entry_t new_entry, prev_entry, next_entry;
struct ucred *cred;
vm_eflags_t protoeflags;
vm_inherit_t inheritance;
@@ -1451,7 +1539,8 @@
/*
* Assert that the next entry doesn't overlap the end point.
*/
- if (prev_entry->next->start < end)
+ next_entry = vm_map_entry_succ(prev_entry);
+ if (next_entry->start < end)
return (KERN_NO_SPACE);
if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
@@ -1544,7 +1633,7 @@
map->size += end - prev_entry->end;
vm_map_entry_resize(map, prev_entry,
end - prev_entry->end);
- vm_map_try_merge_entries(map, prev_entry, prev_entry->next);
+ vm_map_try_merge_entries(map, prev_entry, next_entry);
return (KERN_SUCCESS);
}
@@ -1605,7 +1694,7 @@
* other cases, which are less common.
*/
vm_map_try_merge_entries(map, prev_entry, new_entry);
- vm_map_try_merge_entries(map, new_entry, new_entry->next);
+ vm_map_try_merge_entries(map, new_entry, next_entry);
if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
@@ -1636,7 +1725,7 @@
vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
{
vm_map_entry_t llist, rlist, root, y;
- vm_size_t left_length;
+ vm_size_t left_length, max_free_left, max_free_right;
vm_offset_t gap_end;
/*
@@ -1662,24 +1751,31 @@
gap_end = rlist->start;
if (root != NULL) {
start = root->end;
- if (root->right != NULL)
+ if (root->right != rlist)
gap_end = start;
+ max_free_left = vm_map_splay_merge_left(map, root, llist);
+ max_free_right = vm_map_splay_merge_right(map, root, rlist);
} else if (rlist != &map->header) {
root = rlist;
rlist = root->left;
- root->left = NULL;
+ max_free_left = vm_map_splay_merge_pred(map, root, llist);
+ max_free_right = vm_map_splay_merge_right(map, root, rlist);
} else {
root = llist;
llist = root->right;
- root->right = NULL;
+ root->right = rlist;
+ rlist->left = root;
+ max_free_left = vm_map_splay_merge_left(map, root, llist);
+ max_free_right = rlist->start - root->end;
}
- vm_map_splay_merge(map, root, llist, rlist);
+ root->max_free = vm_size_max(max_free_left, max_free_right);
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
if (length <= gap_end - start)
return (start);
/* With max_free, can immediately tell if no solution. */
- if (root->right == NULL || length > root->right->max_free)
+ if (root->right == &map->header || length > root->right->max_free)
return (vm_map_max(map) - length + 1);
/*
@@ -1689,28 +1785,33 @@
for (left_length = 0;;
left_length = vm_map_entry_max_free_left(root, llist)) {
if (length <= left_length)
- SPLAY_LEFT_STEP(root, y, rlist,
+ SPLAY_LEFT_STEP(root, y, llist, rlist,
length <= vm_map_entry_max_free_left(y, llist));
else
- SPLAY_RIGHT_STEP(root, y, llist,
+ SPLAY_RIGHT_STEP(root, y, llist, rlist,
length > vm_map_entry_max_free_left(y, root));
if (root == NULL)
break;
}
root = llist;
llist = root->right;
- root->right = NULL;
- if (rlist != &map->header) {
+ root->max_free = vm_map_splay_merge_left(map, root, llist);
+ if (rlist == &map->header) {
+ root->right = rlist;
+ root->max_free = vm_size_max(root->max_free,
+ rlist->start - root->end);
+ } else {
y = rlist;
rlist = y->left;
- y->left = NULL;
- vm_map_splay_merge(map, y, &map->header, rlist);
- y->max_free = MAX(
- vm_map_entry_max_free_left(y, root),
- vm_map_entry_max_free_right(y, &map->header));
+ y->left = root;
+ y->max_free = vm_size_max(
+ y->start - root->end,
+ vm_map_splay_merge_right(map, y, rlist));
root->right = y;
+ root->max_free = vm_size_max(root->max_free,
+ y->max_free);
}
- vm_map_splay_merge(map, root, llist, &map->header);
+ map->root = root;
VM_MAP_ASSERT_CONSISTENT(map);
return (root->end);
}
@@ -2303,7 +2404,7 @@
if (vm_map_lookup_entry(map, start, &entry)) {
vm_map_clip_start(map, entry, start);
} else
- entry = entry->next;
+ entry = vm_map_entry_succ(entry);
vm_map_clip_end(map, entry, end);
@@ -2460,12 +2561,13 @@
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &entry))
- entry = entry->next;
+ entry = vm_map_entry_succ(entry);
/*
* Make a first pass to check for protection violations.
*/
- for (current = entry; current->start < end; current = current->next) {
+ for (current = entry; current->start < end;
+ current = vm_map_entry_succ(current)) {
if ((current->eflags & MAP_ENTRY_GUARD) != 0)
continue;
if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
@@ -2503,7 +2605,8 @@
*/
rv = KERN_SUCCESS;
vm_map_clip_start(map, entry, start);
- for (current = entry; current->start < end; current = current->next) {
+ for (current = entry; current->start < end;
+ current = vm_map_entry_succ(current)) {
vm_map_clip_end(map, current, end);
@@ -2560,9 +2663,11 @@
* Otherwise, just simplify entries, since some may have been modified.
* [Note that clipping is not necessary the second time.]
*/
- for (current = entry; current->start < end;
- vm_map_try_merge_entries(map, current->prev, current),
- current = current->next) {
+ for (current = entry, entry = vm_map_entry_pred(current);
+ current->start < end;
+ (vm_map_try_merge_entries(map, entry, current),
+ entry = current,
+ current = vm_map_entry_succ(current))) {
if (rv != KERN_SUCCESS ||
(current->eflags & MAP_ENTRY_GUARD) != 0)
continue;
@@ -2600,7 +2705,7 @@
#undef MASK
}
}
- vm_map_try_merge_entries(map, current->prev, current);
+ vm_map_try_merge_entries(map, entry, current);
vm_map_unlock(map);
return (rv);
}
@@ -2660,10 +2765,14 @@
VM_MAP_RANGE_CHECK(map, start, end);
if (vm_map_lookup_entry(map, start, &entry)) {
- if (modify_map)
+ if (modify_map) {
vm_map_clip_start(map, entry, start);
+ current = entry;
+ entry = vm_map_entry_pred(current);
+ } else
+ current = entry;
} else {
- entry = entry->next;
+ current = vm_map_entry_succ(entry);
}
if (modify_map) {
@@ -2673,8 +2782,8 @@
* We clip the vm_map_entry so that behavioral changes are
* limited to the specified address range.
*/
- for (current = entry; current->start < end;
- current = current->next) {
+ for (; current->start < end;
+ entry = current, current = vm_map_entry_succ(current)) {
if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
continue;
@@ -2705,9 +2814,9 @@
default:
break;
}
- vm_map_try_merge_entries(map, current->prev, current);
+ vm_map_try_merge_entries(map, entry, current);
}
- vm_map_try_merge_entries(map, current->prev, current);
+ vm_map_try_merge_entries(map, entry, current);
vm_map_unlock(map);
} else {
vm_pindex_t pstart, pend;
@@ -2719,8 +2828,8 @@
* Since we don't clip the vm_map_entry, we have to clip
* the vm_object pindex and count.
*/
- for (current = entry; current->start < end;
- current = current->next) {
+ for (; current->start < end;
+ current = vm_map_entry_succ(current)) {
vm_offset_t useEnd, useStart;
if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
@@ -2826,17 +2935,19 @@
if (vm_map_lookup_entry(map, start, &temp_entry)) {
entry = temp_entry;
vm_map_clip_start(map, entry, start);
+ temp_entry = vm_map_entry_pred(entry);
} else
- entry = temp_entry->next;
+ entry = vm_map_entry_succ(temp_entry);
while (entry->start < end) {
vm_map_clip_end(map, entry, end);
if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
new_inheritance != VM_INHERIT_ZERO)
entry->inheritance = new_inheritance;
- vm_map_try_merge_entries(map, entry->prev, entry);
- entry = entry->next;
+ vm_map_try_merge_entries(map, temp_entry, entry);
+ temp_entry = entry;
+ entry = vm_map_entry_succ(entry);
}
- vm_map_try_merge_entries(map, entry->prev, entry);
+ vm_map_try_merge_entries(map, temp_entry, entry);
vm_map_unlock(map);
return (KERN_SUCCESS);
}
@@ -2885,7 +2996,7 @@
*io_end = start;
return (NULL);
}
- entry = entry->next;
+ entry = vm_map_entry_succ(entry);
}
return (entry);
}
@@ -2899,7 +3010,7 @@
vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
int flags)
{
- vm_map_entry_t entry, first_entry;
+ vm_map_entry_t entry, first_entry, next_entry;
int rv;
bool first_iteration, holes_ok, need_wakeup, user_unwire;
@@ -2911,7 +3022,7 @@
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &first_entry)) {
if (holes_ok)
- first_entry = first_entry->next;
+ first_entry = vm_map_entry_succ(first_entry);
else {
vm_map_unlock(map);
return (KERN_INVALID_ADDRESS);
@@ -2950,12 +3061,13 @@
("owned map entry %p", entry));
entry->eflags |= MAP_ENTRY_IN_TRANSITION;
entry->wiring_thread = curthread;
+ next_entry = vm_map_entry_succ(entry);
/*
* Check the map for holes in the specified region.
* If holes_ok, skip this check.
*/
if (!holes_ok &&
- (entry->end < end && entry->next->start > entry->end)) {
+ (entry->end < end && next_entry->start > entry->end)) {
end = entry->end;
rv = KERN_INVALID_ADDRESS;
break;
@@ -2969,15 +3081,19 @@
rv = KERN_INVALID_ARGUMENT;
break;
}
- entry = entry->next;
+ entry = next_entry;
}
need_wakeup = false;
if (first_entry == NULL &&
!vm_map_lookup_entry(map, start, &first_entry)) {
KASSERT(holes_ok, ("vm_map_unwire: lookup failed"));
- first_entry = first_entry->next;
+ entry = vm_map_entry_succ(first_entry);
+ } else {
+ entry = first_entry;
+ first_entry = vm_map_entry_pred(entry);
}
- for (entry = first_entry; entry->start < end; entry = entry->next) {
+ for (; entry->start < end;
+ first_entry = entry, entry = vm_map_entry_succ(entry)) {
/*
* If holes_ok was specified, an empty
* space in the unwired region could have been mapped
@@ -3013,9 +3129,9 @@
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
need_wakeup = true;
}
- vm_map_try_merge_entries(map, entry->prev, entry);
+ vm_map_try_merge_entries(map, first_entry, entry);
}
- vm_map_try_merge_entries(map, entry->prev, entry);
+ vm_map_try_merge_entries(map, first_entry, entry);
vm_map_unlock(map);
if (need_wakeup)
vm_map_wakeup(map);
@@ -3121,7 +3237,7 @@
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &first_entry)) {
if (holes_ok)
- first_entry = first_entry->next;
+ first_entry = vm_map_entry_succ(first_entry);
else
return (KERN_INVALID_ADDRESS);
}
@@ -3225,7 +3341,7 @@
faddr < entry->end)
vm_map_wire_entry_failure(map,
entry, faddr);
- entry = entry->next;
+ entry = vm_map_entry_succ(entry);
}
}
if (rv != KERN_SUCCESS) {
@@ -3243,13 +3359,14 @@
* Check the map for holes in the specified region.
* If holes_ok was specified, skip this check.
*/
+ tmp_entry = vm_map_entry_succ(entry);
if (!holes_ok &&
- entry->end < end && entry->next->start > entry->end) {
+ entry->end < end && tmp_entry->start > entry->end) {
end = entry->end;
rv = KERN_INVALID_ADDRESS;
goto done;
}
- entry = entry->next;
+ entry = tmp_entry;
}
rv = KERN_SUCCESS;
done:
@@ -3257,9 +3374,13 @@
if (first_entry == NULL &&
!vm_map_lookup_entry(map, start, &first_entry)) {
KASSERT(holes_ok, ("vm_map_wire: lookup failed"));
- first_entry = first_entry->next;
+ entry = vm_map_entry_succ(first_entry);
+ } else {
+ entry = first_entry;
+ first_entry = vm_map_entry_pred(entry);
}
- for (entry = first_entry; entry->start < end; entry = entry->next) {
+ for (; entry->start < end;
+ first_entry = entry, entry = vm_map_entry_succ(entry)) {
/*
* If holes_ok was specified, an empty
* space in the unwired region could have been mapped
@@ -3312,9 +3433,9 @@
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
need_wakeup = true;
}
- vm_map_try_merge_entries(map, entry->prev, entry);
+ vm_map_try_merge_entries(map, first_entry, entry);
}
- vm_map_try_merge_entries(map, entry->prev, entry);
+ vm_map_try_merge_entries(map, first_entry, entry);
if (need_wakeup)
vm_map_wakeup(map);
return (rv);
@@ -3344,8 +3465,7 @@
boolean_t syncio,
boolean_t invalidate)
{
- vm_map_entry_t current;
- vm_map_entry_t entry;
+ vm_map_entry_t current, entry, next;
vm_size_t size;
vm_object_t object;
vm_ooffset_t offset;
@@ -3364,13 +3484,14 @@
/*
* Make a first pass to check for user-wired memory and holes.
*/
- for (current = entry; current->start < end; current = current->next) {
+ for (current = entry; current->start < end; current = next) {
if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
vm_map_unlock_read(map);
return (KERN_INVALID_ARGUMENT);
}
+ next = vm_map_entry_succ(current);
if (end > current->end &&
- current->end != current->next->start) {
+ current->end != next->start) {
vm_map_unlock_read(map);
return (KERN_INVALID_ADDRESS);
}
@@ -3414,7 +3535,7 @@
vm_map_lock_read(map);
if (last_timestamp == map->timestamp ||
!vm_map_lookup_entry(map, start, ¤t))
- current = current->next;
+ current = vm_map_entry_succ(current);
}
vm_map_unlock_read(map);
@@ -3532,7 +3653,7 @@
if (map->system_map)
vm_map_entry_deallocate(entry, TRUE);
else {
- entry->next = curthread->td_map_def_user;
+ entry->right = curthread->td_map_def_user;
curthread->td_map_def_user = entry;
}
}
@@ -3557,7 +3678,7 @@
* Find the start of the region, and clip it
*/
if (!vm_map_lookup_entry(map, start, &first_entry))
- entry = first_entry->next;
+ entry = vm_map_entry_succ(first_entry);
else {
entry = first_entry;
vm_map_clip_start(map, entry, start);
@@ -3595,7 +3716,7 @@
*/
if (!vm_map_lookup_entry(map, saved_start,
&tmp_entry))
- entry = tmp_entry->next;
+ entry = vm_map_entry_succ(tmp_entry);
else {
entry = tmp_entry;
vm_map_clip_start(map, entry,
@@ -3606,7 +3727,7 @@
}
vm_map_clip_end(map, entry, end);
- next = entry->next;
+ next = vm_map_entry_succ(entry);
/*
* Unwire before removing addresses from the pmap; otherwise,
@@ -3695,7 +3816,7 @@
return (FALSE);
/* go to next entry */
start = entry->end;
- entry = entry->next;
+ entry = vm_map_entry_succ(entry);
}
return (TRUE);
}
@@ -3804,7 +3925,7 @@
fake_entry->object.vm_object = src_object;
fake_entry->start = src_entry->start;
fake_entry->end = src_entry->end;
- fake_entry->next = curthread->td_map_def_user;
+ fake_entry->right = curthread->td_map_def_user;
curthread->td_map_def_user = fake_entry;
}
@@ -3912,7 +4033,7 @@
new_map->anon_loc = old_map->anon_loc;
- old_entry = old_map->header.next;
+ old_entry = old_map->header.right;
while (old_entry != &old_map->header) {
if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
@@ -4064,7 +4185,7 @@
break;
}
- old_entry = old_entry->next;
+ old_entry = vm_map_entry_succ(old_entry);
}
/*
* Use inlined vm_map_unlock() to postpone handling the deferred
@@ -4151,7 +4272,7 @@
/*
* If we can't accommodate max_ssize in the current mapping, no go.
*/
- if (prev_entry->next->start < addrbos + max_ssize)
+ if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize)
return (KERN_NO_SPACE);
/*
@@ -4178,7 +4299,7 @@
rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
if (rv != KERN_SUCCESS)
return (rv);
- new_entry = prev_entry->next;
+ new_entry = vm_map_entry_succ(prev_entry);
KASSERT(new_entry->end == top || new_entry->start == bot,
("Bad entry start/end for new stack entry"));
KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
@@ -4200,9 +4321,9 @@
* stack_guard_page for vm_map_growstack().
*/
if (orient == MAP_STACK_GROWS_DOWN)
- new_entry->prev->next_read = sgp;
+ vm_map_entry_pred(new_entry)->next_read = sgp;
else
- new_entry->next->next_read = sgp;
+ vm_map_entry_succ(new_entry)->next_read = sgp;
} else {
(void)vm_map_delete(map, bot, top);
}
@@ -4256,14 +4377,14 @@
if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
return (KERN_SUCCESS);
if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
- stack_entry = gap_entry->next;
+ stack_entry = vm_map_entry_succ(gap_entry);
if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
stack_entry->start != gap_entry->end)
return (KERN_FAILURE);
grow_amount = round_page(stack_entry->start - addr);
grow_down = true;
} else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
- stack_entry = gap_entry->prev;
+ stack_entry = vm_map_entry_pred(gap_entry);
if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
stack_entry->end != gap_entry->start)
return (KERN_FAILURE);
@@ -4823,6 +4944,69 @@
return (map->pmap);
}
+#ifdef INVARIANTS
+static void
+_vm_map_assert_consistent(vm_map_t map)
+{
+ vm_map_entry_t entry, prev;
+ vm_map_entry_t cur, lbound, ubound;
+ vm_size_t max_left, max_right;
+
+ if (!enable_vmmap_check)
+ return;
+
+ prev = &map->header;
+ VM_MAP_ENTRY_FOREACH(entry, map) {
+ KASSERT(prev->end <= entry->start,
+ ("map %p prev->end = %jx, start = %jx", map,
+ (uintmax_t)prev->end, (uintmax_t)entry->start));
+ KASSERT(entry->start < entry->end,
+ ("map %p start = %jx, end = %jx", map,
+ (uintmax_t)entry->start, (uintmax_t)entry->end));
+ KASSERT(entry->left == &map->header ||
+ entry->left->start < entry->start,
+ ("map %p left->start = %jx, start = %jx", map,
+ (uintmax_t)entry->left->start, (uintmax_t)entry->start));
+ KASSERT(entry->right == &map->header ||
+ entry->start < entry->right->start,
+ ("map %p start = %jx, right->start = %jx", map,
+ (uintmax_t)entry->start, (uintmax_t)entry->right->start));
+ cur = map->root;
+ lbound = ubound = &map->header;
+ for (;;) {
+ if (entry->start < cur->start) {
+ ubound = cur;
+ cur = cur->left;
+ KASSERT(cur != lbound,
+ ("map %p cannot find %jx",
+ map, entry->start));
+ } else if (cur->end <= entry->start) {
+ lbound = cur;
+ cur = cur->right;
+ KASSERT(cur != ubound,
+ ("map %p cannot find %jx",
+ map, entry->start));
+ } else {
+ KASSERT(cur == entry,
+ ("map %p cannot find %jx",
+ map, entry->start));
+ break;
+ }
+ }
+ max_left = vm_map_entry_max_free_left(entry, lbound);
+ max_right = vm_map_entry_max_free_right(entry, ubound);
+ KASSERT(entry->max_free == vm_size_max(max_left, max_right),
+ ("map %p max = %jx, max_left = %jx, max_right = %jx", map,
+ (uintmax_t)entry->max_free,
+ (uintmax_t)max_left, (uintmax_t)max_right));
+ prev = entry;
+ }
+ KASSERT(prev->end <= entry->start,
+ ("map %p prev->end = %jx, start = %jx", map,
+ (uintmax_t)prev->end, (uintmax_t)entry->start));
+}
+#endif
+
#include "opt_ddb.h"
#ifdef DDB
#include <sys/kernel.h>
@@ -4839,8 +5023,8 @@
(void *)map->pmap, map->nentries, map->timestamp);
db_indent += 2;
- for (prev = &map->header; (entry = prev->next) != &map->header;
- prev = entry) {
+ prev = &map->header;
+ VM_MAP_ENTRY_FOREACH(entry, map) {
db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
(void *)entry, (void *)entry->start, (void *)entry->end,
entry->eflags);
@@ -4892,6 +5076,7 @@
db_indent -= 2;
}
}
+ prev = entry;
}
db_indent -= 2;
}
Index: sys/vm/vm_mmap.c
===================================================================
--- sys/vm/vm_mmap.c
+++ sys/vm/vm_mmap.c
@@ -581,7 +581,7 @@
pkm.pm_address = (uintptr_t) NULL;
if (vm_map_lookup_entry(map, addr, &entry)) {
for (; entry->start < addr + size;
- entry = entry->next) {
+ entry = vm_map_entry_succ(entry)) {
if (vm_map_check_protection(map, entry->start,
entry->end, VM_PROT_EXECUTE) == TRUE) {
pkm.pm_address = (uintptr_t) addr;
@@ -822,12 +822,15 @@
* up the pages elsewhere.
*/
lastvecindex = -1;
- for (current = entry; current->start < end; current = current->next) {
+ while (entry->start < end) {
/*
* check for contiguity
*/
- if (current->end < end && current->next->start > current->end) {
+ current = entry;
+ entry = vm_map_entry_succ(current);
+ if (current->end < end &&
+ entry->start > current->end) {
vm_map_unlock_read(map);
return (ENOMEM);
}
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Jan 31, 10:41 PM (15 h, 29 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28206655
Default Alt Text
D21964.id63130.diff (41 KB)
Attached To
Mode
D21964: Make vm_map a threaded tree
Attached
Detach File
Event Timeline
Log In to Comment