Page MenuHomeFreeBSD

D47036.id145187.diff
No OneTemporary

D47036.id145187.diff

Index: sys/vm/vm_glue.c
===================================================================
--- sys/vm/vm_glue.c
+++ sys/vm/vm_glue.c
@@ -97,6 +97,7 @@
#include <vm/vm_pageout.h>
#include <vm/vm_pagequeue.h>
#include <vm/vm_object.h>
+#include <vm/vm_radix.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/vm_pager.h>
@@ -611,25 +612,27 @@
vm_thread_stack_back(vm_offset_t ks, vm_page_t ma[], int npages, int req_class,
int domain)
{
+ struct pctrie_iter pages;
vm_object_t obj = vm_thread_kstack_size_to_obj(npages);
vm_pindex_t pindex;
- vm_page_t m;
+ vm_page_t m, mpred;
int n;
pindex = vm_kstack_pindex(ks, npages);
VM_OBJECT_WLOCK(obj);
+ vm_page_iter_init(&pages, obj);
+ mpred = vm_page_iter_lookup_le(&pages, pindex);
for (n = 0; n < npages;) {
m = vm_page_grab(obj, pindex + n,
VM_ALLOC_NOCREAT | VM_ALLOC_WIRED);
if (m == NULL) {
- m = n > 0 ? ma[n - 1] : vm_page_mpred(obj, pindex);
- m = vm_page_alloc_domain_after(obj, pindex + n, domain,
- req_class | VM_ALLOC_WIRED, m);
+ m = vm_page_alloc_domain(obj, pindex + n, domain,
+ req_class | VM_ALLOC_WIRED, mpred, &pages, true);
}
if (m == NULL)
break;
- ma[n++] = m;
+ ma[n++] = mpred = m;
}
if (n < npages)
goto cleanup;
Index: sys/vm/vm_kern.c
===================================================================
--- sys/vm/vm_kern.c
+++ sys/vm/vm_kern.c
@@ -530,6 +530,7 @@
kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
vm_size_t size, int flags)
{
+ struct pctrie_iter pages;
vm_offset_t offset, i;
vm_page_t m, mpred;
vm_prot_t prot;
@@ -547,11 +548,13 @@
i = 0;
VM_OBJECT_WLOCK(object);
+ vm_page_iter_init(&pages, object);
retry:
- mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
+ pctrie_iter_reset(&pages);
+ mpred = vm_radix_iter_lookup_le(&pages, atop(offset + i));
for (; i < size; i += PAGE_SIZE, mpred = m) {
- m = vm_page_alloc_domain_after(object, atop(offset + i),
- domain, pflags, mpred);
+ m = vm_page_alloc_domain(object, atop(offset + i),
+ domain, pflags, mpred, &pages, true);
/*
* Ran out of space, free everything up and return. Don't need
Index: sys/vm/vm_page.h
===================================================================
--- sys/vm/vm_page.h
+++ sys/vm/vm_page.h
@@ -606,11 +606,9 @@
void vm_page_activate (vm_page_t);
void vm_page_advise(vm_page_t m, int advice);
-vm_page_t vm_page_mpred(vm_object_t, vm_pindex_t);
vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
-vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
-vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
- vm_page_t);
+vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int,
+ vm_page_t, struct pctrie_iter *, bool);
vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary, vm_memattr_t memattr);
@@ -644,6 +642,7 @@
void vm_page_dequeue_deferred(vm_page_t m);
vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
vm_page_t vm_page_iter_lookup_ge(struct pctrie_iter *, vm_pindex_t);
+vm_page_t vm_page_iter_lookup_le(struct pctrie_iter *, vm_pindex_t);
void vm_page_free_invalid(vm_page_t);
vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -171,8 +171,6 @@
static bool vm_page_free_prep(vm_page_t m);
static void vm_page_free_toq(vm_page_t m);
static void vm_page_init(void *dummy);
-static int vm_page_insert_after(vm_page_t m, vm_object_t object,
- vm_pindex_t pindex, vm_page_t mpred);
static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
vm_page_t mpred);
static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
@@ -1475,16 +1473,14 @@
/*
* Insert the given page into the given object at the given pindex. mpred is
* used for memq linkage. From vm_page_insert, lookup is true, mpred is
- * initially NULL, and this procedure looks it up. From vm_page_insert_after,
- * lookup is false and mpred is known to the caller to be valid, and may be
- * NULL if this will be the page with the lowest pindex.
+ * initially NULL, and this procedure looks it up.
*
* The procedure is marked __always_inline to suggest to the compiler to
* eliminate the lookup parameter and the associated alternate branch.
*/
static __always_inline int
vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
- vm_page_t mpred, bool lookup)
+ vm_page_t mpred, bool lookup, struct pctrie_iter *pages, bool hasIter)
{
int error;
@@ -1505,6 +1501,8 @@
*/
if (lookup)
error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred);
+ else if (hasIter)
+ error = vm_radix_iter_insert(pages, m);
else
error = vm_radix_insert(&object->rtree, m);
if (__predict_false(error != 0)) {
@@ -1523,33 +1521,28 @@
}
/*
- * vm_page_insert: [ internal use only ]
- *
- * Inserts the given mem entry into the object and object list.
- *
- * The object must be locked.
+ * Try to insert the given page at the given pindex, using an iterator.
*/
-int
-vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
+static int
+vm_page_iter_insert(struct pctrie_iter *pages, vm_page_t m, vm_object_t object,
+ vm_pindex_t pindex, vm_page_t mpred)
{
- return (vm_page_insert_lookup(m, object, pindex, NULL, true));
+ return (vm_page_insert_lookup(m, object, pindex,
+ mpred, false, pages, true));
}
/*
- * vm_page_insert_after:
- *
- * Inserts the page "m" into the specified object at offset "pindex".
+ * vm_page_insert: [ internal use only ]
*
- * The page "mpred" must immediately precede the offset "pindex" within
- * the specified object.
+ * Inserts the given mem entry into the object and object list.
*
* The object must be locked.
*/
-static int
-vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
- vm_page_t mpred)
+int
+vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
{
- return (vm_page_insert_lookup(m, object, pindex, mpred, false));
+ return (vm_page_insert_lookup(m, object, pindex, NULL, true,
+ NULL, false));
}
/*
@@ -1848,6 +1841,22 @@
return (vm_radix_iter_lookup_ge(pages, pindex));
}
+/*
+ * vm_page_iter_lookup_le:
+ *
+ * Returns the page associated with the object with greatest pindex
+ * less than or equal to the parameter pindex, or NULL. Initializes the
+ * iterator to point to that page.
+ *
+ * The iter pctrie must be locked.
+ */
+vm_page_t
+vm_page_iter_lookup_le(struct pctrie_iter *pages, vm_pindex_t pindex)
+{
+
+ return (vm_radix_iter_lookup_le(pages, pindex));
+}
+
/*
* Returns the given page's successor (by pindex) within the object if it is
* resident; if none is found, NULL is returned.
@@ -2014,15 +2023,28 @@
}
/*
- * vm_page_mpred:
- *
- * Return the greatest page of the object with index <= pindex,
- * or NULL, if there is none. Assumes object lock is held.
+ * Allocate a page in the specified object with the given page index. To
+ * optimize insertion of the page into the object, the caller must also specify
+ * the resident page in the object with largest index smaller than the given
+ * page index, or NULL if no such page exists.
*/
-vm_page_t
-vm_page_mpred(vm_object_t object, vm_pindex_t pindex)
+static vm_page_t
+vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
+ int req, vm_page_t mpred, struct pctrie_iter *pages, bool hasIter)
{
- return (vm_radix_lookup_le(&object->rtree, pindex));
+ struct vm_domainset_iter di;
+ vm_page_t m;
+ int domain;
+
+ vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
+ do {
+ m = vm_page_alloc_domain(object, pindex, domain, req,
+ mpred, pages, hasIter);
+ if (m != NULL)
+ break;
+ } while (vm_domainset_iter_page(&di, object, &domain) == 0);
+
+ return (m);
}
/*
@@ -2052,32 +2074,7 @@
{
return (vm_page_alloc_after(object, pindex, req,
- vm_page_mpred(object, pindex)));
-}
-
-/*
- * Allocate a page in the specified object with the given page index. To
- * optimize insertion of the page into the object, the caller must also specify
- * the resident page in the object with largest index smaller than the given
- * page index, or NULL if no such page exists.
- */
-vm_page_t
-vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
- int req, vm_page_t mpred)
-{
- struct vm_domainset_iter di;
- vm_page_t m;
- int domain;
-
- vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
- do {
- m = vm_page_alloc_domain_after(object, pindex, domain, req,
- mpred);
- if (m != NULL)
- break;
- } while (vm_domainset_iter_page(&di, object, &domain) == 0);
-
- return (m);
+ vm_radix_lookup_le(&object->rtree, pindex), NULL, false));
}
/*
@@ -2134,8 +2131,8 @@
}
vm_page_t
-vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
- int req, vm_page_t mpred)
+vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain,
+ int req, vm_page_t mpred, struct pctrie_iter *pages, bool hasIter)
{
struct vm_domain *vmd;
vm_page_t m;
@@ -2239,7 +2236,8 @@
}
m->a.act_count = 0;
- if (vm_page_insert_after(m, object, pindex, mpred)) {
+ if (vm_page_insert_lookup(m, object, pindex, mpred, false,
+ pages, hasIter)) {
if (req & VM_ALLOC_WIRED) {
vm_wire_sub(1);
m->ref_count = 0;
@@ -2375,6 +2373,7 @@
int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary, vm_memattr_t memattr)
{
+ struct pctrie_iter pages;
vm_page_t m, m_ret, mpred;
u_int busy_lock, flags, oflags;
@@ -2393,7 +2392,8 @@
object));
KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
- mpred = vm_page_mpred(object, pindex);
+ vm_page_iter_init(&pages, object);
+ mpred = vm_page_iter_lookup_le(&pages, pindex);
KASSERT(mpred == NULL || mpred->pindex != pindex,
("vm_page_alloc_contig: pindex already allocated"));
for (;;) {
@@ -2442,7 +2442,7 @@
m->ref_count = 1;
m->a.act_count = 0;
m->oflags = oflags;
- if (vm_page_insert_after(m, object, pindex, mpred)) {
+ if (vm_page_iter_insert(&pages, m, object, pindex, mpred)) {
if ((req & VM_ALLOC_WIRED) != 0)
vm_wire_sub(npages);
KASSERT(m->object == NULL,
@@ -5062,6 +5062,7 @@
vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
vm_page_t *ma, int count)
{
+ struct pctrie_iter pages;
vm_page_t m, mpred;
int pflags;
int i;
@@ -5075,8 +5076,10 @@
pflags = vm_page_grab_pflags(allocflags);
i = 0;
+ vm_page_iter_init(&pages, object);
retrylookup:
- m = vm_page_mpred(object, pindex + i);
+ pctrie_iter_reset(&pages);
+ m = vm_radix_iter_lookup_le(&pages, pindex + i);
if (m == NULL || m->pindex != pindex + i) {
mpred = m;
m = NULL;
@@ -5094,7 +5097,8 @@
if ((allocflags & VM_ALLOC_NOCREAT) != 0)
break;
m = vm_page_alloc_after(object, pindex + i,
- pflags | VM_ALLOC_COUNT(count - i), mpred);
+ pflags | VM_ALLOC_COUNT(count - i), mpred,
+ &pages, true);
if (m == NULL) {
if ((allocflags & (VM_ALLOC_NOWAIT |
VM_ALLOC_WAITFAIL)) != 0)
Index: sys/vm/vm_radix.h
===================================================================
--- sys/vm/vm_radix.h
+++ sys/vm/vm_radix.h
@@ -257,6 +257,19 @@
return (VM_RADIX_PCTRIE_ITER_STEP_GE(pages));
}
+/*
+ * Initialize an iterator pointing to the page with the greatest pindex that is
+ * less than or equal to the specified pindex, or NULL if there are no such
+ * pages. Return the page.
+ *
+ * Requires that access be externally synchronized by a lock.
+ */
+static __inline vm_page_t
+vm_radix_iter_lookup_le(struct pctrie_iter *pages, vm_pindex_t index)
+{
+ return (VM_RADIX_PCTRIE_ITER_LOOKUP_LE(pages, index));
+}
+
/*
* Update the iterator to point to the page with the pindex that is one greater
* than the current pindex, or NULL if there is no such page. Return the page.

File Metadata

Mime Type
text/plain
Expires
Fri, Oct 10, 9:20 AM (17 h, 33 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
23526349
Default Alt Text
D47036.id145187.diff (11 KB)

Event Timeline