Page MenuHomeFreeBSD

D13999.id49284.diff
No OneTemporary

D13999.id49284.diff

Index: sys/vm/vm_map.c
===================================================================
--- sys/vm/vm_map.c
+++ sys/vm/vm_map.c
@@ -796,13 +796,16 @@
{
map->header.next = map->header.prev = &map->header;
+ map->header.left = map->header.right = NULL;
+ map->header.adj_free = max - min;
+ map->header.max_free = map->header.adj_free;
map->needs_wakeup = FALSE;
map->system_map = 0;
map->pmap = pmap;
map->header.end = min;
map->header.start = max;
map->flags = 0;
- map->root = NULL;
+ map->root = &map->header;
map->timestamp = 0;
map->busy = 0;
}
@@ -877,7 +880,7 @@
}
/*
- * vm_map_entry_splay:
+ * vm_map_splay:
*
* The Sleator and Tarjan top-down splay algorithm with the
* following variation. Max_free must be computed bottom-up, so
@@ -886,23 +889,23 @@
* the pointers and compute max_free. The time bound is O(log n)
* amortized.
*
- * The new root is the vm_map_entry containing "addr", or else an
- * adjacent entry (lower or higher) if addr is not in the tree.
+ * The new root is the vm_map_entry containing "addr", or else
+ * the next lower entry if addr is not in the tree.
*
* The map must be locked, and leaves it so.
*
- * Returns: the new root.
*/
-static vm_map_entry_t
-vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
+static void
+vm_map_splay(vm_offset_t addr, vm_map_t map, vm_map_entry_t *io)
{
vm_map_entry_t llist, rlist;
vm_map_entry_t ltree, rtree;
- vm_map_entry_t y;
+ vm_map_entry_t root, x, y;
- /* Special case of empty tree. */
- if (root == NULL)
- return (root);
+ KASSERT(addr >= map->header.end && addr < map->header.start,
+ ("vm_map_splay: addr %jx, offsets start %jx, end %jx",
+ (uintmax_t)addr,
+ (uintmax_t)map->header.end, (uintmax_t)map->header.start));
/*
* Pass One: Splay down the tree until we find addr or a NULL
@@ -914,46 +917,130 @@
*/
llist = NULL;
rlist = NULL;
- for (;;) {
- /* root is never NULL in here. */
- if (addr < root->start) {
- y = root->left;
- if (y == NULL)
- break;
- if (addr < y->start && y->left != NULL) {
- /* Rotate right and put y on rlist. */
- root->left = y->right;
- y->right = root;
- vm_map_entry_set_max_free(root);
+ root = map->root;
+ do {
+ x = root;
+ /* x is never NULL in here. */
+ if (addr >= x->end) {
+ y = x->right;
+ if (y != NULL && addr >= y->end) {
+ root = y->right;
+ /* Rotate left. */
+ x->right = y->left;
+ y->left = x;
+ vm_map_entry_set_max_free(x);
+ /* Put y on llist. */
+ y->right = llist;
+ llist = y;
+ } else if (y != NULL && addr < y->start) {
root = y->left;
+ /* Put x on llist. */
+ x->right = llist;
+ llist = x;
+ /* Put y on rlist. */
y->left = rlist;
rlist = y;
} else {
- /* Put root on rlist. */
- root->left = rlist;
- rlist = root;
root = y;
- }
- } else if (addr >= root->end) {
- y = root->right;
- if (y == NULL)
+ /* Put x on llist. */
+ x->right = llist;
+ llist = x;
break;
- if (addr >= y->end && y->right != NULL) {
- /* Rotate left and put y on llist. */
- root->right = y->left;
- y->left = root;
- vm_map_entry_set_max_free(root);
+ }
+ } else if (addr < x->start) {
+ y = x->left;
+ if (y != NULL && addr >= y->end) {
root = y->right;
+ /* Put x on rlist. */
+ x->left = rlist;
+ rlist = x;
+ /* Put y on llist. */
y->right = llist;
llist = y;
+ } else if (y != NULL && addr < y->start) {
+ root = y->left;
+ /* Rotate right. */
+ x->left = y->right;
+ y->right = x;
+ vm_map_entry_set_max_free(x);
+ /* Put y on rlist. */
+ y->left = rlist;
+ rlist = y;
} else {
- /* Put root on llist. */
- root->right = llist;
- llist = root;
root = y;
+ /* Put x on rlist. */
+ x->left = rlist;
+ rlist = x;
+ break;
}
} else
break;
+ } while (root != NULL);
+
+ if (root == NULL &&
+ (io == NULL || *io == NULL)) {
+ /*
+ * With no matching node found, and no new node to
+ * insert, recover the greatest node in the left
+ * subtree and make it the root. There is such a
+ * node, since map->header is in the tree and left of
+ * all addresses.
+ */
+ rtree = NULL;
+ root = llist;
+ llist = llist->right;
+ ltree = root->left;
+ } else if (root == NULL) {
+ /* Install the new item as tree root. */
+ rtree = NULL;
+ root = *io;
+ *io = NULL;
+ llist->next = rlist->prev = root;
+ root->prev = llist;
+ root->next = rlist;
+ llist->adj_free = root->start - llist->end;
+ root->adj_free = rlist->start - root->end;
+ ltree = NULL;
+ } else if (io == NULL || *io != NULL) {
+ rtree = root->right;
+ ltree = root->left;
+ } else {
+ /*
+ * To delete the found item from the tree, walk the
+ * left spine of the found subtree, extending the left
+ * spine of the new subtree, so that the predecessor
+ * of the found node can be the new root.
+ */
+ rtree = root->right;
+ *io = root;
+ root = root->left;
+ if (root == NULL) {
+ root = llist;
+ llist = llist->right;
+ } else {
+ for(;;) {
+ y = root->right;
+ if (y == NULL)
+ break;
+ x = root;
+ root = y->right;
+ /* Rotate left. */
+ x->right = y->left;
+ y->left = x;
+ vm_map_entry_set_max_free(x);
+ if (root == NULL) {
+ root = y;
+ break;
+ }
+ /* Put y on llist. */
+ y->right = llist;
+ llist = y;
+ }
+ }
+ ltree = root->left;
+ root->next = (*io)->next;
+ root->next->prev = root;
+ root->adj_free = root->next->start - root->end;
}
/*
@@ -961,7 +1048,6 @@
* and set max_free. The subtrees of the root go at the
* bottom of llist and rlist.
*/
- ltree = root->left;
while (llist != NULL) {
y = llist->right;
llist->right = ltree;
@@ -969,7 +1055,6 @@
ltree = llist;
llist = y;
}
- rtree = root->right;
while (rlist != NULL) {
y = rlist->left;
rlist->left = rtree;
@@ -985,7 +1070,7 @@
root->right = rtree;
vm_map_entry_set_max_free(root);
- return (root);
+ map->root = root;
}
/*
@@ -995,67 +1080,27 @@
*/
static void
vm_map_entry_link(vm_map_t map,
- vm_map_entry_t after_where,
vm_map_entry_t entry)
{
- CTR4(KTR_VM,
- "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
- map->nentries, entry, after_where);
+ CTR3(KTR_VM,
+ "vm_map_entry_link: map %p, nentries %d, entry %p", map,
+ map->nentries, entry);
VM_MAP_ASSERT_LOCKED(map);
- KASSERT(after_where->end <= entry->start,
- ("vm_map_entry_link: prev end %jx new start %jx overlap",
- (uintmax_t)after_where->end, (uintmax_t)entry->start));
- KASSERT(entry->end <= after_where->next->start,
- ("vm_map_entry_link: new end %jx next start %jx overlap",
- (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
map->nentries++;
- entry->prev = after_where;
- entry->next = after_where->next;
- entry->next->prev = entry;
- after_where->next = entry;
-
- if (after_where != &map->header) {
- if (after_where != map->root)
- vm_map_entry_splay(after_where->start, map->root);
- entry->right = after_where->right;
- entry->left = after_where;
- after_where->right = NULL;
- after_where->adj_free = entry->start - after_where->end;
- vm_map_entry_set_max_free(after_where);
- } else {
- entry->right = map->root;
- entry->left = NULL;
- }
- entry->adj_free = entry->next->start - entry->end;
- vm_map_entry_set_max_free(entry);
- map->root = entry;
+ vm_map_splay(entry->start, map, &entry);
}
static void
vm_map_entry_unlink(vm_map_t map,
vm_map_entry_t entry)
{
- vm_map_entry_t next, prev, root;
+ vm_map_entry_t dead;
VM_MAP_ASSERT_LOCKED(map);
- if (entry != map->root)
- vm_map_entry_splay(entry->start, map->root);
- if (entry->left == NULL)
- root = entry->right;
- else {
- root = vm_map_entry_splay(entry->start, entry->left);
- root->right = entry->right;
- root->adj_free = entry->next->start - root->end;
- vm_map_entry_set_max_free(root);
- }
- map->root = root;
-
- prev = entry->prev;
- next = entry->next;
- next->prev = prev;
- prev->next = next;
+ dead = NULL;
+ vm_map_splay(entry->start, map, &dead);
map->nentries--;
CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
map->nentries, entry);
@@ -1081,7 +1126,7 @@
* root and making the change there.
*/
if (entry != map->root)
- map->root = vm_map_entry_splay(entry->start, map->root);
+ vm_map_splay(entry->start, map, NULL);
entry->adj_free = entry->next->start - entry->end;
vm_map_entry_set_max_free(entry);
@@ -1106,62 +1151,54 @@
vm_map_entry_t cur;
boolean_t locked;
+ /*
+ * If the address is out of range, fail immediately.
+ */
+ if (address < map->header.end || address >= map->header.start) {
+ *entry = &map->header;
+ return (FALSE);
+ }
+
/*
* If the map is empty, then the map entry immediately preceding
* "address" is the map's header.
*/
cur = map->root;
- if (cur == NULL)
- *entry = &map->header;
- else if (address >= cur->start && cur->end > address) {
+ if (address >= cur->start && cur->end > address) {
*entry = cur;
return (TRUE);
- } else if ((locked = vm_map_locked(map)) ||
- sx_try_upgrade(&map->lock)) {
+ }
+ if ((locked = vm_map_locked(map)) || sx_try_upgrade(&map->lock)) {
/*
* Splay requires a write lock on the map. However, it only
* restructures the binary search tree; it does not otherwise
* change the map. Thus, the map's timestamp need not change
* on a temporary upgrade.
*/
- map->root = cur = vm_map_entry_splay(address, cur);
+ vm_map_splay(address, map, NULL);
+ cur = map->root;
if (!locked)
sx_downgrade(&map->lock);
- /*
- * If "address" is contained within a map entry, the new root
- * is that map entry. Otherwise, the new root is a map entry
- * immediately before or after "address".
- */
- if (address >= cur->start) {
- *entry = cur;
- if (cur->end > address)
- return (TRUE);
- } else
- *entry = cur->prev;
+ *entry = cur;
+ if (cur->end > address)
+ return (TRUE);
} else
/*
* Since the map is only locked for read access, perform a
* standard binary search tree lookup for "address".
*/
- for (;;) {
- if (address < cur->start) {
- if (cur->left == NULL) {
- *entry = cur->prev;
- break;
- }
+ do {
+ if (cur->end <= address) {
+ *entry = cur;
+ cur = cur->right;
+ } else if (address < cur->start) {
cur = cur->left;
- } else if (cur->end > address) {
+ } else {
*entry = cur;
return (TRUE);
- } else {
- if (cur->right == NULL) {
- *entry = cur;
- break;
- }
- cur = cur->right;
}
- }
+ } while (cur != NULL);
return (FALSE);
}
@@ -1354,7 +1391,7 @@
/*
* Insert the new entry into the list
*/
- vm_map_entry_link(map, prev_entry, new_entry);
+ vm_map_entry_link(map, new_entry);
if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
map->size += new_entry->end - new_entry->start;
@@ -1396,7 +1433,6 @@
vm_offset_t *addr) /* OUT */
{
vm_map_entry_t entry;
- vm_offset_t st;
/*
* Request must fit within min/max VM address and must avoid
@@ -1406,35 +1442,21 @@
if (start + length > vm_map_max(map) || start + length < start)
return (1);
- /* Empty tree means wide open address space. */
- if (map->root == NULL) {
- *addr = start;
- return (0);
- }
-
- /*
- * After splay, if start comes before root node, then there
- * must be a gap from start to the root.
- */
- map->root = vm_map_entry_splay(start, map->root);
- if (start + length <= map->root->start) {
- *addr = start;
- return (0);
- }
-
/*
* Root is the last node that might begin its gap before
* start, and this is the last comparison where address
* wrap might be a problem.
*/
- st = (start > map->root->end) ? start : map->root->end;
- if (length <= map->root->end + map->root->adj_free - st) {
- *addr = st;
+ vm_map_splay(start, map, NULL);
+ entry = map->root;
+ start = MAX(start, entry->end);
+ if (length <= entry->end + entry->adj_free - start) {
+ *addr = start;
return (0);
}
/* With max_free, can immediately tell if no solution. */
- entry = map->root->right;
+ entry = entry->right;
if (entry == NULL || length > entry->max_free)
return (1);
@@ -1443,18 +1465,19 @@
* right subtree (first fit). The previous splay implies that
* all regions in the right subtree have addresses > start.
*/
- while (entry != NULL) {
+ for (;;) {
if (entry->left != NULL && entry->left->max_free >= length)
entry = entry->left;
else if (entry->adj_free >= length) {
*addr = entry->end;
return (0);
- } else
+ } else if (entry->right != NULL)
entry = entry->right;
+ else {
+ /* Can't get here, so panic if we do. */
+ panic("vm_map_findspace: max_free corrupt");
+ }
}
-
- /* Can't get here, so panic if we do. */
- panic("vm_map_findspace: max_free corrupt");
}
int
@@ -1643,6 +1666,50 @@
}
}
+static boolean_t
+vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
+{
+ vm_size_t prevsize;
+
+ prevsize = prev->end - prev->start;
+ return ( (prev->end == entry->start) &&
+ (prev->object.vm_object == entry->object.vm_object) &&
+ (!prev->object.vm_object ||
+ (prev->offset + prevsize == entry->offset)) &&
+ (prev->eflags == entry->eflags) &&
+ (prev->protection == entry->protection) &&
+ (prev->max_protection == entry->max_protection) &&
+ (prev->inheritance == entry->inheritance) &&
+ (prev->wired_count == entry->wired_count) &&
+ (prev->cred == entry->cred));
+}
+
+static void
+vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
+{
+
+ vm_map_entry_unlink(map, entry);
+ /*
+ * If the backing object is a vnode object,
+ * vm_object_deallocate() calls vrele().
+ * However, vrele() does not lock the vnode
+ * because the vnode has additional
+ * references. Thus, the map lock can be kept
+ * without causing a lock-order reversal with
+ * the vnode lock.
+ *
+ * Since we count the number of virtual page
+ * mappings in object->un_pager.vnp.writemappings,
+ * the writemappings value should not be adjusted
+ * when the entry is disposed of.
+ */
+ if (entry->object.vm_object)
+ vm_object_deallocate(entry->object.vm_object);
+ if (entry->cred != NULL)
+ crfree(entry->cred);
+ vm_map_entry_dispose(map, entry);
+}
+
/*
* vm_map_simplify_entry:
*
@@ -1659,7 +1726,6 @@
vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
{
vm_map_entry_t next, prev;
- vm_size_t prevsize, esize;
if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP |
MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0)
@@ -1667,70 +1733,18 @@
prev = entry->prev;
if (prev != &map->header) {
- prevsize = prev->end - prev->start;
- if ( (prev->end == entry->start) &&
- (prev->object.vm_object == entry->object.vm_object) &&
- (!prev->object.vm_object ||
- (prev->offset + prevsize == entry->offset)) &&
- (prev->eflags == entry->eflags) &&
- (prev->protection == entry->protection) &&
- (prev->max_protection == entry->max_protection) &&
- (prev->inheritance == entry->inheritance) &&
- (prev->wired_count == entry->wired_count) &&
- (prev->cred == entry->cred)) {
- vm_map_entry_unlink(map, prev);
+ if (vm_map_mergeable_neighbors(prev, entry)) {
entry->start = prev->start;
entry->offset = prev->offset;
- if (entry->prev != &map->header)
- vm_map_entry_resize_free(map, entry->prev);
-
- /*
- * If the backing object is a vnode object,
- * vm_object_deallocate() calls vrele().
- * However, vrele() does not lock the vnode
- * because the vnode has additional
- * references. Thus, the map lock can be kept
- * without causing a lock-order reversal with
- * the vnode lock.
- *
- * Since we count the number of virtual page
- * mappings in object->un_pager.vnp.writemappings,
- * the writemappings value should not be adjusted
- * when the entry is disposed of.
- */
- if (prev->object.vm_object)
- vm_object_deallocate(prev->object.vm_object);
- if (prev->cred != NULL)
- crfree(prev->cred);
- vm_map_entry_dispose(map, prev);
+ vm_map_merged_neighbor_dispose(map, prev);
}
}
next = entry->next;
if (next != &map->header) {
- esize = entry->end - entry->start;
- if ((entry->end == next->start) &&
- (next->object.vm_object == entry->object.vm_object) &&
- (!entry->object.vm_object ||
- (entry->offset + esize == next->offset)) &&
- (next->eflags == entry->eflags) &&
- (next->protection == entry->protection) &&
- (next->max_protection == entry->max_protection) &&
- (next->inheritance == entry->inheritance) &&
- (next->wired_count == entry->wired_count) &&
- (next->cred == entry->cred)) {
- vm_map_entry_unlink(map, next);
+ if (vm_map_mergeable_neighbors(entry, next)) {
entry->end = next->end;
- vm_map_entry_resize_free(map, entry);
-
- /*
- * See comment above.
- */
- if (next->object.vm_object)
- vm_object_deallocate(next->object.vm_object);
- if (next->cred != NULL)
- crfree(next->cred);
- vm_map_entry_dispose(map, next);
+ vm_map_merged_neighbor_dispose(map, next);
}
}
}
@@ -1807,7 +1821,7 @@
if (new_entry->cred != NULL)
crhold(entry->cred);
- vm_map_entry_link(map, entry->prev, new_entry);
+ vm_map_entry_link(map, new_entry);
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
vm_object_reference(new_entry->object.vm_object);
@@ -1889,7 +1903,7 @@
if (new_entry->cred != NULL)
crhold(entry->cred);
- vm_map_entry_link(map, entry, new_entry);
+ vm_map_entry_link(map, new_entry);
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
vm_object_reference(new_entry->object.vm_object);
@@ -3541,8 +3555,7 @@
* Insert the entry into the new map -- we know we're
* inserting at the end of the new map.
*/
- vm_map_entry_link(new_map, new_map->header.prev,
- new_entry);
+ vm_map_entry_link(new_map, new_entry);
vmspace_map_entry_forked(vm1, vm2, new_entry);
/*
@@ -3569,8 +3582,7 @@
new_entry->wired_count = 0;
new_entry->object.vm_object = NULL;
new_entry->cred = NULL;
- vm_map_entry_link(new_map, new_map->header.prev,
- new_entry);
+ vm_map_entry_link(new_map, new_entry);
vmspace_map_entry_forked(vm1, vm2, new_entry);
vm_map_copy_entry(old_map, new_map, old_entry,
new_entry, fork_charge);
@@ -3593,8 +3605,7 @@
new_entry->max_protection = old_entry->max_protection;
new_entry->inheritance = VM_INHERIT_ZERO;
- vm_map_entry_link(new_map, new_map->header.prev,
- new_entry);
+ vm_map_entry_link(new_map, new_entry);
vmspace_map_entry_forked(vm1, vm2, new_entry);
new_entry->cred = curthread->td_ucred;
@@ -4355,15 +4366,15 @@
static void
vm_map_print(vm_map_t map)
{
- vm_map_entry_t entry;
+ vm_map_entry_t entry, prev;
db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
(void *)map,
(void *)map->pmap, map->nentries, map->timestamp);
db_indent += 2;
- for (entry = map->header.next; entry != &map->header;
- entry = entry->next) {
+ for (prev = &map->header; (entry = prev->next) != &map->header;
+ prev = entry) {
db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
(void *)entry, (void *)entry->start, (void *)entry->end,
entry->eflags);
@@ -4382,8 +4393,8 @@
db_printf(", share=%p, offset=0x%jx\n",
(void *)entry->object.sub_map,
(uintmax_t)entry->offset);
- if ((entry->prev == &map->header) ||
- (entry->prev->object.sub_map !=
+ if ((prev == &map->header) ||
+ (prev->object.sub_map !=
entry->object.sub_map)) {
db_indent += 2;
vm_map_print((vm_map_t)entry->object.sub_map);
@@ -4404,8 +4415,8 @@
(entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
db_printf("\n");
- if ((entry->prev == &map->header) ||
- (entry->prev->object.vm_object !=
+ if ((prev == &map->header) ||
+ (prev->object.vm_object !=
entry->object.vm_object)) {
db_indent += 2;
vm_object_print((db_expr_t)(intptr_t)

File Metadata

Mime Type
text/plain
Expires
Thu, Oct 23, 9:40 PM (4 h, 45 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
24104266
Default Alt Text
D13999.id49284.diff (19 KB)

Event Timeline