Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -172,7 +172,7 @@ static void vm_page_free_toq(vm_page_t m); static void vm_page_init(void *dummy); static int vm_page_insert_after(vm_page_t m, vm_object_t object, - vm_pindex_t pindex, vm_page_t mpred); + vm_pindex_t pindex, vm_page_t mpred, struct pctrie_iter *, bool); static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred); static void vm_page_mvqueue(vm_page_t m, const uint8_t queue, @@ -1484,7 +1484,7 @@ */ static __always_inline int vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex, - vm_page_t mpred, bool lookup) + vm_page_t mpred, bool lookup, struct pctrie_iter *pages, bool hasIter) { int error; @@ -1505,6 +1505,8 @@ */ if (lookup) error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred); + else if (hasIter) + error = vm_radix_iter_insert(pages, m); else error = vm_radix_insert(&object->rtree, m); if (__predict_false(error != 0)) { @@ -1532,7 +1534,8 @@ int vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) { - return (vm_page_insert_lookup(m, object, pindex, NULL, true)); + return (vm_page_insert_lookup(m, object, pindex, NULL, true, + NULL, false)); } /* @@ -1547,9 +1550,10 @@ */ static int vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, - vm_page_t mpred) + vm_page_t mpred, struct pctrie_iter *pages, bool hasIter) { - return (vm_page_insert_lookup(m, object, pindex, mpred, false)); + return (vm_page_insert_lookup(m, object, pindex, mpred, false, + pages, hasIter)); } /* @@ -2236,7 +2240,7 @@ } m->a.act_count = 0; - if (vm_page_insert_after(m, object, pindex, mpred)) { + if (vm_page_insert_after(m, object, pindex, mpred, NULL, false)) { if (req & VM_ALLOC_WIRED) { vm_wire_sub(1); m->ref_count = 0; @@ -2372,6 +2376,7 @@ int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { + struct pctrie_iter pages; vm_page_t m, m_ret, mpred; u_int busy_lock, flags, oflags; @@ -2390,7 +2395,8 @@ object)); KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); - mpred = vm_radix_lookup_le(&object->rtree, pindex); + vm_page_iter_init(&pages, object); + mpred = vm_radix_iter_lookup_le(&pages, pindex); KASSERT(mpred == NULL || mpred->pindex != pindex, ("vm_page_alloc_contig: pindex already allocated")); for (;;) { @@ -2439,7 +2445,8 @@ m->ref_count = 1; m->a.act_count = 0; m->oflags = oflags; - if (vm_page_insert_after(m, object, pindex, mpred)) { + if (vm_page_insert_after(m, object, pindex, mpred, + &pages, true)) { if ((req & VM_ALLOC_WIRED) != 0) vm_wire_sub(npages); KASSERT(m->object == NULL, Index: sys/vm/vm_radix.h =================================================================== --- sys/vm/vm_radix.h +++ sys/vm/vm_radix.h @@ -257,6 +257,19 @@ return (VM_RADIX_PCTRIE_ITER_STEP_GE(pages)); } +/* + * Initialize an iterator pointing to the page with the greatest pindex that is + * less than or equal to the specified pindex, or NULL if there are no such + * pages. Return the page. + * + * Requires that access be externally synchronized by a lock. + */ +static __inline vm_page_t +vm_radix_iter_lookup_le(struct pctrie_iter *pages, vm_pindex_t index) +{ + return (VM_RADIX_PCTRIE_ITER_LOOKUP_LE(pages, index)); +} + /* * Update the iterator to point to the page with the pindex that is one greater * than the current pindex, or NULL if there is no such page. Return the page.