Page MenuHomeFreeBSD

D13999.id51477.diff
No OneTemporary

D13999.id51477.diff

Index: sys/vm/vm_map.c
===================================================================
--- sys/vm/vm_map.c
+++ sys/vm/vm_map.c
@@ -798,7 +798,10 @@
map->header.end = min;
map->header.start = max;
map->flags = 0;
- map->root = NULL;
+ map->root = map->header.right = &map->header;
+ map->header.left = NULL;
+ map->header.adj_free = max - min;
+ map->header.max_free = map->header.adj_free;
map->timestamp = 0;
map->busy = 0;
}
@@ -873,91 +876,83 @@
}
/*
- * vm_map_entry_splay:
- *
- * The Sleator and Tarjan top-down splay algorithm with the
- * following variation. Max_free must be computed bottom-up, so
- * on the downward pass, maintain the left and right spines in
- * reverse order. Then, make a second pass up each side to fix
- * the pointers and compute max_free. The time bound is O(log n)
- * amortized.
- *
- * The new root is the vm_map_entry containing "addr", or else an
- * adjacent entry (lower or higher) if addr is not in the tree.
- *
- * The map must be locked, and leaves it so.
- *
- * Returns: the new root.
+ * Splay down the tree until we find addr or a NULL pointer where addr would
+ * go. llist and rlist are the two sides in reverse order (bottom-up), with
+ * llist linked by the right pointer and rlist linked by the left pointer in
+ * the vm_map_entry. Wait until Pass Two to set max_free on the two spines.
*/
static vm_map_entry_t
-vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
+vm_map_entry_splay_split(vm_offset_t addr, vm_map_entry_t root,
+ vm_map_entry_t *pllist, vm_map_entry_t *prlist)
{
vm_map_entry_t llist, rlist;
- vm_map_entry_t ltree, rtree;
vm_map_entry_t y;
- /* Special case of empty tree. */
- if (root == NULL)
- return (root);
-
- /*
- * Pass One: Splay down the tree until we find addr or a NULL
- * pointer where addr would go. llist and rlist are the two
- * sides in reverse order (bottom-up), with llist linked by
- * the right pointer and rlist linked by the left pointer in
- * the vm_map_entry. Wait until Pass Two to set max_free on
- * the two spines.
- */
llist = NULL;
rlist = NULL;
for (;;) {
/* root is never NULL in here. */
- if (addr < root->start) {
- y = root->left;
- if (y == NULL)
- break;
- if (addr < y->start && y->left != NULL) {
- /* Rotate right and put y on rlist. */
- root->left = y->right;
- y->right = root;
+ while (addr >= root->end) {
+ y = root->right;
+ if (y != NULL && addr >= y->start) {
+ /* Rotate left. */
+ root->right = y->left;
+ y->left = root;
vm_map_entry_set_max_free(root);
- root = y->left;
- y->left = rlist;
- rlist = y;
- } else {
- /* Put root on rlist. */
- root->left = rlist;
- rlist = root;
root = y;
+ if (addr < root->end)
+ break;
+ y = root->right;
+
}
- } else if (addr >= root->end) {
- y = root->right;
- if (y == NULL)
+ /* Put root on llist. */
+ root->right = llist;
+ llist = root;
+ root = y;
+ if (root == NULL || addr < root->start)
break;
- if (addr >= y->end && y->right != NULL) {
- /* Rotate left and put y on llist. */
- root->right = y->left;
- y->left = root;
+ }
+ if (root == NULL || addr >= root->start)
+ break;
+
+ while (addr < root->start) {
+ y = root->left;
+ if (y != NULL && addr < y->end) {
+ /* Rotate right. */
+ root->left = y->right;
+ y->right = root;
vm_map_entry_set_max_free(root);
- root = y->right;
- y->right = llist;
- llist = y;
- } else {
- /* Put root on llist. */
- root->right = llist;
- llist = root;
root = y;
+ if (addr >= root->start)
+ break;
+ y = root->left;
}
- } else
+ /* Put root on rlist. */
+ root->left = rlist;
+ rlist = root;
+ root = y;
+ if (root == NULL || addr >= root->end)
+ break;
+ }
+ if (root == NULL || addr < root->end)
break;
}
+ *pllist = llist;
+ *prlist = rlist;
+ return (root);
+}
+
+/*
+ * Walk back up the two spines, flip the pointers and set max_free. The
+ * subtrees of the root go at the bottom of llist and rlist.
+ */
+static vm_map_entry_t
+vm_map_entry_splay_merge(vm_map_entry_t root,
+ vm_map_entry_t llist, vm_map_entry_t rlist,
+ vm_map_entry_t ltree, vm_map_entry_t rtree)
+{
+ vm_map_entry_t y;
- /*
- * Pass Two: Walk back up the two spines, flip the pointers
- * and set max_free. The subtrees of the root go at the
- * bottom of llist and rlist.
- */
- ltree = root->left;
while (llist != NULL) {
y = llist->right;
llist->right = ltree;
@@ -965,7 +960,7 @@
ltree = llist;
llist = y;
}
- rtree = root->right;
+
while (rlist != NULL) {
y = rlist->left;
rlist->left = rtree;
@@ -980,10 +975,47 @@
root->left = ltree;
root->right = rtree;
vm_map_entry_set_max_free(root);
-
return (root);
}
+/*
+ * vm_map_entry_splay:
+ *
+ * The Sleator and Tarjan top-down splay algorithm with the
+ * following variation. Max_free must be computed bottom-up, so
+ * on the downward pass, maintain the left and right spines in
+ * reverse order. Then, make a second pass up each side to fix
+ * the pointers and compute max_free. The time bound is O(log n)
+ * amortized.
+ *
+ * The new root is the vm_map_entry containing "addr", or else
+ * the next lower entry if addr is not in the tree.
+ *
+ * The map must be locked, and leaves it so.
+ *
+ */
+static vm_map_entry_t
+vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
+{
+ vm_map_entry_t llist, rlist;
+
+ root = vm_map_entry_splay_split(addr, root, &llist, &rlist);
+ if (root == NULL) {
+ /*
+ * With no matching node found, and no new node to
+ * insert, recover the greatest node in the left
+ * subtree and make it the root. There is such a
+ * node, since map->header is in the tree and left of
+ * all addresses.
+ */
+ root = llist;
+ llist = root->right;
+ root->right = NULL;
+ }
+ return (vm_map_entry_splay_merge(root, llist, rlist,
+ root->left, root->right));
+}
+
/*
* vm_map_entry_{un,}link:
*
@@ -991,41 +1023,25 @@
*/
static void
vm_map_entry_link(vm_map_t map,
- vm_map_entry_t after_where,
vm_map_entry_t entry)
{
+ vm_map_entry_t llist, rlist, root;
- CTR4(KTR_VM,
- "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
- map->nentries, entry, after_where);
+ CTR3(KTR_VM,
+ "vm_map_entry_link: map %p, nentries %d, entry %p", map,
+ map->nentries, entry);
VM_MAP_ASSERT_LOCKED(map);
- KASSERT(after_where->end <= entry->start,
- ("vm_map_entry_link: prev end %jx new start %jx overlap",
- (uintmax_t)after_where->end, (uintmax_t)entry->start));
- KASSERT(entry->end <= after_where->next->start,
- ("vm_map_entry_link: new end %jx next start %jx overlap",
- (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
-
map->nentries++;
- entry->prev = after_where;
- entry->next = after_where->next;
- entry->next->prev = entry;
- after_where->next = entry;
-
- if (after_where != &map->header) {
- if (after_where != map->root)
- vm_map_entry_splay(after_where->start, map->root);
- entry->right = after_where->right;
- entry->left = after_where;
- after_where->right = NULL;
- after_where->adj_free = entry->start - after_where->end;
- vm_map_entry_set_max_free(after_where);
- } else {
- entry->right = map->root;
- entry->left = NULL;
- }
- entry->adj_free = entry->next->start - entry->end;
- vm_map_entry_set_max_free(entry);
+ root = map->root;
+ root = vm_map_entry_splay_split(entry->start, root, &llist, &rlist);
+ KASSERT(root == NULL,
+ ("vm_map_entry_link: link object already mapped"));
+ entry->prev = llist;
+ entry->next = rlist;
+ llist->next = rlist->prev = entry;
+ llist->adj_free = entry->start - llist->end;
+ entry->adj_free = rlist->start - entry->end;
+ root = vm_map_entry_splay_merge(entry, llist, rlist, NULL, NULL);
map->root = entry;
}
@@ -1033,25 +1049,43 @@
vm_map_entry_unlink(vm_map_t map,
vm_map_entry_t entry)
{
- vm_map_entry_t next, prev, root;
+ vm_map_entry_t llist, root, rlist, rtree, y;
VM_MAP_ASSERT_LOCKED(map);
- if (entry != map->root)
- vm_map_entry_splay(entry->start, map->root);
- if (entry->left == NULL)
- root = entry->right;
- else {
- root = vm_map_entry_splay(entry->start, entry->left);
- root->right = entry->right;
- root->adj_free = entry->next->start - root->end;
- vm_map_entry_set_max_free(root);
+ root = vm_map_entry_splay_split(entry->start, map->root, &llist, &rlist);
+ KASSERT(root != NULL,
+ ("vm_map_entry_unlink: unlink object not mapped"));
+ /*
+ * To delete the found item from the tree, walk the
+ * left spine of the found subtree, extending the left
+ * spine of the new subtree, so that the predecessor
+ * of the found node can be the new root.
+ */
+ rtree = root->right;
+ if (root->left == NULL) {
+ root = llist;
+ llist = llist->right;
+ } else {
+ root = root->left;
+ while ((y = root->right) != NULL) {
+ /* Rotate left. */
+ root->right = y->left;
+ y->left = root;
+ vm_map_entry_set_max_free(root);
+ if (y->right != NULL) {
+ /* Put y on llist. */
+ root = y->right;
+ y->right = llist;
+ llist = y;
+ } else
+ root = y;
+ }
}
- map->root = root;
-
- prev = entry->prev;
- next = entry->next;
- next->prev = prev;
- prev->next = next;
+ y = entry->next;
+ y->prev = root;
+ root->next = y;
+ root->adj_free = y->start - root->end;
+ map->root = vm_map_entry_splay_merge(root, llist, rlist, root->left, rtree);
map->nentries--;
CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
map->nentries, entry);
@@ -1099,21 +1133,31 @@
vm_offset_t address,
vm_map_entry_t *entry) /* OUT */
{
- vm_map_entry_t cur;
+ vm_map_entry_t cur, lbound;
boolean_t locked;
+ /*
+ * If the address is out of range, fail immediately.
+ */
+ if (address < vm_map_min(map)) {
+ *entry = &map->header;
+ return (FALSE);
+ }
+ if (address >= vm_map_max(map)) {
+ *entry = map->header.prev;
+ return (FALSE);
+ }
+
/*
* If the map is empty, then the map entry immediately preceding
* "address" is the map's header.
*/
cur = map->root;
- if (cur == NULL)
- *entry = &map->header;
- else if (address >= cur->start && cur->end > address) {
+ if (address >= cur->start && cur->end > address) {
*entry = cur;
return (TRUE);
- } else if ((locked = vm_map_locked(map)) ||
- sx_try_upgrade(&map->lock)) {
+ }
+ if ((locked = vm_map_locked(map)) || sx_try_upgrade(&map->lock)) {
/*
* Splay requires a write lock on the map. However, it only
* restructures the binary search tree; it does not otherwise
@@ -1123,42 +1167,34 @@
map->root = cur = vm_map_entry_splay(address, cur);
if (!locked)
sx_downgrade(&map->lock);
-
- /*
- * If "address" is contained within a map entry, the new root
- * is that map entry. Otherwise, the new root is a map entry
- * immediately before or after "address".
- */
- if (address >= cur->start) {
- *entry = cur;
- if (cur->end > address)
- return (TRUE);
- } else
- *entry = cur->prev;
- } else
+ } else {
/*
* Since the map is only locked for read access, perform a
* standard binary search tree lookup for "address".
*/
+ lbound = NULL;
for (;;) {
- if (address < cur->start) {
- if (cur->left == NULL) {
- *entry = cur->prev;
+ while (address >= cur->end) {
+ lbound = cur;
+ cur = cur->right;
+ if (cur == NULL || address < cur->start)
break;
- }
+ }
+ if (cur == NULL || address >= cur->start)
+ break;
+ while (address < cur->start) {
cur = cur->left;
- } else if (cur->end > address) {
- *entry = cur;
- return (TRUE);
- } else {
- if (cur->right == NULL) {
- *entry = cur;
+ if (cur == NULL || address >= cur->end)
break;
- }
- cur = cur->right;
}
+ if (cur == NULL || address < cur->end)
+ break;
}
- return (FALSE);
+ if (cur == NULL)
+ cur = lbound;
+ }
+ *entry = cur;
+ return (cur->end > address);
}
/*
@@ -1350,7 +1386,7 @@
/*
* Insert the new entry into the list
*/
- vm_map_entry_link(map, prev_entry, new_entry);
+ vm_map_entry_link(map, new_entry);
if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
map->size += new_entry->end - new_entry->start;
@@ -1402,27 +1438,12 @@
if (start + length > vm_map_max(map) || start + length < start)
return (1);
- /* Empty tree means wide open address space. */
- if (map->root == NULL) {
- *addr = start;
- return (0);
- }
-
- /*
- * After splay, if start comes before root node, then there
- * must be a gap from start to the root.
- */
- map->root = vm_map_entry_splay(start, map->root);
- if (start + length <= map->root->start) {
- *addr = start;
- return (0);
- }
-
/*
* Root is the last node that might begin its gap before
* start, and this is the last comparison where address
* wrap might be a problem.
*/
+ map->root = vm_map_entry_splay(start, map->root);
st = (start > map->root->end) ? start : map->root->end;
if (length <= map->root->end + map->root->adj_free - st) {
*addr = st;
@@ -1797,7 +1818,7 @@
if (new_entry->cred != NULL)
crhold(entry->cred);
- vm_map_entry_link(map, entry->prev, new_entry);
+ vm_map_entry_link(map, new_entry);
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
vm_object_reference(new_entry->object.vm_object);
@@ -1879,7 +1900,7 @@
if (new_entry->cred != NULL)
crhold(entry->cred);
- vm_map_entry_link(map, entry, new_entry);
+ vm_map_entry_link(map, new_entry);
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
vm_object_reference(new_entry->object.vm_object);
@@ -3532,8 +3553,7 @@
* Insert the entry into the new map -- we know we're
* inserting at the end of the new map.
*/
- vm_map_entry_link(new_map, new_map->header.prev,
- new_entry);
+ vm_map_entry_link(new_map, new_entry);
vmspace_map_entry_forked(vm1, vm2, new_entry);
/*
@@ -3560,8 +3580,7 @@
new_entry->wired_count = 0;
new_entry->object.vm_object = NULL;
new_entry->cred = NULL;
- vm_map_entry_link(new_map, new_map->header.prev,
- new_entry);
+ vm_map_entry_link(new_map, new_entry);
vmspace_map_entry_forked(vm1, vm2, new_entry);
vm_map_copy_entry(old_map, new_map, old_entry,
new_entry, fork_charge);
@@ -3584,8 +3603,7 @@
new_entry->max_protection = old_entry->max_protection;
new_entry->inheritance = VM_INHERIT_ZERO;
- vm_map_entry_link(new_map, new_map->header.prev,
- new_entry);
+ vm_map_entry_link(new_map, new_entry);
vmspace_map_entry_forked(vm1, vm2, new_entry);
new_entry->cred = curthread->td_ucred;

File Metadata

Mime Type
text/plain
Expires
Thu, Nov 27, 8:52 PM (18 h, 27 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
26262389
Default Alt Text
D13999.id51477.diff (14 KB)

Event Timeline