Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -171,8 +171,6 @@ static bool vm_page_free_prep(vm_page_t m); static void vm_page_free_toq(vm_page_t m); static void vm_page_init(void *dummy); -static int vm_page_insert_after(vm_page_t m, vm_object_t object, - vm_pindex_t pindex, vm_page_t mpred); static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred); static void vm_page_mvqueue(vm_page_t m, const uint8_t queue, @@ -1473,53 +1471,25 @@ } /* - * Insert the given page into the given object at the given pindex. mpred is - * used for memq linkage. From vm_page_insert, lookup is true, mpred is - * initially NULL, and this procedure looks it up. From vm_page_insert_after, - * lookup is false and mpred is known to the caller to be valid, and may be - * NULL if this will be the page with the lowest pindex. - * - * The procedure is marked __always_inline to suggest to the compiler to - * eliminate the lookup parameter and the associated alternate branch. + * Finish inserting the given page into the given object at the given pindex, + * after it has been inserted into the object's radix tree. mpred is used for + * memq linkage; it is known to the caller to be valid, and may be NULL if this + * will be the page with the lowest pindex. */ -static __always_inline int -vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex, - vm_page_t mpred, bool lookup) +static void +vm_page_insert_finish(vm_page_t m, vm_object_t object, vm_page_t mpred) { - int error; - - VM_OBJECT_ASSERT_WLOCKED(object); - KASSERT(m->object == NULL, - ("vm_page_insert: page %p already inserted", m)); - /* * Record the object/offset pair in this page. */ m->object = object; - m->pindex = pindex; m->ref_count |= VPRC_OBJREF; - /* - * Add this page to the object's radix tree, and look up mpred if - * needed. - */ - if (lookup) - error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred); - else - error = vm_radix_insert(&object->rtree, m); - if (__predict_false(error != 0)) { - m->object = NULL; - m->pindex = 0; - m->ref_count &= ~VPRC_OBJREF; - return (1); - } - /* * Now link into the object's ordered list of backed pages. */ vm_page_insert_radixdone(m, object, mpred); vm_pager_page_inserted(object, m); - return (0); } /* @@ -1532,24 +1502,22 @@ int vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) { - return (vm_page_insert_lookup(m, object, pindex, NULL, true)); -} + vm_page_t mpred; + int error; -/* - * vm_page_insert_after: - * - * Inserts the page "m" into the specified object at offset "pindex". - * - * The page "mpred" must immediately precede the offset "pindex" within - * the specified object. - * - * The object must be locked. - */ -static int -vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, - vm_page_t mpred) -{ - return (vm_page_insert_lookup(m, object, pindex, mpred, false)); + VM_OBJECT_ASSERT_WLOCKED(object); + KASSERT(m->object == NULL, + ("vm_page_insert: page %p already inserted", m)); + + /* + * Add this page to the object's radix tree, and look up mpred + */ + m->pindex = pindex; + error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred); + if (__predict_false(error != 0)) + return (1); + vm_page_insert_finish(m, object, mpred); + return (0); } /* @@ -2235,8 +2203,8 @@ m->ref_count = 1; } m->a.act_count = 0; - - if (vm_page_insert_after(m, object, pindex, mpred)) { + m->pindex = pindex; + if (__predict_false(vm_radix_insert(&object->rtree, m) != 0)) { if (req & VM_ALLOC_WIRED) { vm_wire_sub(1); m->ref_count = 0; @@ -2253,6 +2221,7 @@ } return (NULL); } + vm_page_insert_finish(m, object, mpred); /* Ignore device objects; the pager sets "memattr" for them. */ if (object->memattr != VM_MEMATTR_DEFAULT && @@ -2372,6 +2341,7 @@ int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { + struct pctrie_iter pages; vm_page_t m, m_ret, mpred; u_int busy_lock, flags, oflags; @@ -2390,7 +2360,8 @@ object)); KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); - mpred = vm_radix_lookup_le(&object->rtree, pindex); + vm_page_iter_init(&pages, object); + mpred = vm_radix_iter_lookup_le(&pages, pindex); KASSERT(mpred == NULL || mpred->pindex != pindex, ("vm_page_alloc_contig: pindex already allocated")); for (;;) { @@ -2439,7 +2410,8 @@ m->ref_count = 1; m->a.act_count = 0; m->oflags = oflags; - if (vm_page_insert_after(m, object, pindex, mpred)) { + m->pindex = pindex; + if (__predict_false(vm_radix_iter_insert(&pages, m) != 0)) { if ((req & VM_ALLOC_WIRED) != 0) vm_wire_sub(npages); KASSERT(m->object == NULL, @@ -2461,6 +2433,7 @@ } return (NULL); } + vm_page_insert_finish(m, object, mpred); mpred = m; if (memattr != VM_MEMATTR_DEFAULT) pmap_page_set_memattr(m, memattr); Index: sys/vm/vm_radix.h =================================================================== --- sys/vm/vm_radix.h +++ sys/vm/vm_radix.h @@ -257,6 +257,19 @@ return (VM_RADIX_PCTRIE_ITER_STEP_GE(pages)); } +/* + * Initialize an iterator pointing to the page with the greatest pindex that is + * less than or equal to the specified pindex, or NULL if there are no such + * pages. Return the page. + * + * Requires that access be externally synchronized by a lock. + */ +static __inline vm_page_t +vm_radix_iter_lookup_le(struct pctrie_iter *pages, vm_pindex_t index) +{ + return (VM_RADIX_PCTRIE_ITER_LOOKUP_LE(pages, index)); +} + /* * Update the iterator to point to the page with the pindex that is one greater * than the current pindex, or NULL if there is no such page. Return the page.