Index: sys/amd64/sgx/sgx.c =================================================================== --- sys/amd64/sgx/sgx.c +++ sys/amd64/sgx/sgx.c @@ -193,18 +193,18 @@ static void sgx_insert_epc_page_by_index(vm_page_t page, vm_object_t object, - vm_pindex_t pidx, struct pctrie_iter *pages) + vm_pindex_t pidx) { VM_OBJECT_ASSERT_WLOCKED(object); page->valid = VM_PAGE_BITS_ALL; - vm_page_iter_insert(page, object, pidx, pages); + vm_page_insert(page, object, pidx); } static int sgx_va_slot_init_by_index(struct sgx_softc *sc, vm_object_t object, - uint64_t idx, struct pctrie_iter *pages) + uint64_t idx) { struct epc_page *epc; vm_page_t page; @@ -213,7 +213,7 @@ VM_OBJECT_ASSERT_WLOCKED(object); - p = vm_radix_iter_lookup(pages, idx); + p = vm_page_lookup(object, idx); if (p == NULL) { ret = sgx_get_epc_page(sc, &epc); if (ret) { @@ -227,7 +227,7 @@ mtx_unlock(&sc->mtx_encls); page = PHYS_TO_VM_PAGE(epc->phys); - sgx_insert_epc_page_by_index(page, object, idx, pages); + sgx_insert_epc_page_by_index(page, object, idx); } return (0); @@ -235,7 +235,7 @@ static int sgx_va_slot_init(struct sgx_softc *sc, struct sgx_enclave *enclave, - vm_pindex_t pidx, struct pctrie_iter *pages) + vm_pindex_t pidx) { uint64_t va_page_idx; uint64_t idx; @@ -249,7 +249,7 @@ va_page_idx = pidx / SGX_VA_PAGE_SLOTS; idx = - SGX_VA_PAGES_OFFS - va_page_idx; - ret = sgx_va_slot_init_by_index(sc, object, idx, pages); + ret = sgx_va_slot_init_by_index(sc, object, idx); return (ret); } @@ -351,18 +351,13 @@ } static void -sgx_page_remove(struct sgx_softc *sc, vm_page_t p, - struct pctrie_iter *pages) +sgx_page_remove(struct sgx_softc *sc, vm_page_t p) { struct epc_page *epc; vm_paddr_t pa; uint64_t offs; - if (pages != NULL) - (void)vm_page_iter_remove(pages, p); - else - (void) vm_page_remove(p); - + (void)vm_page_remove(p); dprintf("%s: p->pidx %ld\n", __func__, p->pindex); pa = VM_PAGE_TO_PHYS(p); @@ -378,7 +373,6 @@ sgx_enclave_remove(struct sgx_softc *sc, struct sgx_enclave *enclave) { - struct pctrie_iter pages; vm_object_t object; vm_page_t p, p_secs; @@ -388,7 +382,6 @@ object = enclave->object; - vm_page_iter_init(&pages, object); VM_OBJECT_WLOCK(object); /* @@ -396,20 +389,19 @@ * then remove SECS page. */ restart: - VM_RADIX_FOREACH(p, &pages) { + for (p = vm_radix_lookup_ge(&object->rtree, 0); + p != NULL; p = vm_radix_lookup_ge_step(&object->rtree, p)) { if (p->pindex == SGX_SECS_VM_OBJECT_INDEX) continue; - if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) { - pctrie_iter_reset(&pages); + if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) goto restart; - } - sgx_page_remove(sc, p, &pages); + sgx_page_remove(sc, p); } p_secs = vm_page_grab(object, SGX_SECS_VM_OBJECT_INDEX, VM_ALLOC_NOCREAT); /* Now remove SECS page */ if (p_secs != NULL) - sgx_page_remove(sc, p_secs, NULL); + sgx_page_remove(sc, p_secs); KASSERT(object->resident_page_count == 0, ("count")); @@ -620,7 +612,7 @@ static void sgx_insert_epc_page(struct sgx_enclave *enclave, struct epc_page *epc, - uint64_t addr, struct pctrie_iter *pages) + uint64_t addr) { vm_pindex_t pidx; vm_page_t page; @@ -630,13 +622,12 @@ pidx = OFF_TO_IDX(addr); page = PHYS_TO_VM_PAGE(epc->phys); - sgx_insert_epc_page_by_index(page, enclave->object, pidx, pages); + sgx_insert_epc_page_by_index(page, enclave->object, pidx); } static int sgx_ioctl_create(struct sgx_softc *sc, struct sgx_enclave_create *param) { - struct pctrie_iter pages; struct sgx_vm_handle *vmh; vm_map_entry_t entry; vm_page_t p; @@ -707,9 +698,8 @@ } enclave->secs_epc_page = epc; - vm_page_iter_init(&pages, object); VM_OBJECT_WLOCK(object); - p = vm_radix_iter_lookup(&pages, SGX_SECS_VM_OBJECT_INDEX); + p = vm_page_lookup(object, SGX_SECS_VM_OBJECT_INDEX); if (p) { VM_OBJECT_WUNLOCK(object); /* SECS page already added. */ @@ -718,7 +708,7 @@ } ret = sgx_va_slot_init_by_index(sc, object, - - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX, &pages); + - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX); if (ret) { VM_OBJECT_WUNLOCK(object); dprintf("%s: Can't init va slot.\n", __func__); @@ -732,7 +722,7 @@ p = vm_page_grab(enclave->object, - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX, VM_ALLOC_NOCREAT); - sgx_page_remove(sc, p, NULL); + sgx_page_remove(sc, p); VM_OBJECT_WUNLOCK(object); goto error; } @@ -746,7 +736,7 @@ p = vm_page_grab(enclave->object, - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX, VM_ALLOC_NOCREAT); - sgx_page_remove(sc, p, NULL); + sgx_page_remove(sc, p); VM_OBJECT_WUNLOCK(object); goto error; } @@ -758,7 +748,7 @@ page = PHYS_TO_VM_PAGE(epc->phys); sgx_insert_epc_page_by_index(page, enclave->object, - SGX_SECS_VM_OBJECT_INDEX, &pages); + SGX_SECS_VM_OBJECT_INDEX); VM_OBJECT_WUNLOCK(object); @@ -782,7 +772,6 @@ sgx_ioctl_add_page(struct sgx_softc *sc, struct sgx_enclave_add_page *addp) { - struct pctrie_iter pages; struct epc_page *secs_epc_page; struct sgx_enclave *enclave; struct sgx_vm_handle *vmh; @@ -854,9 +843,8 @@ addr = (addp->addr - vmh->base); pidx = OFF_TO_IDX(addr); - vm_page_iter_init(&pages, object); VM_OBJECT_WLOCK(object); - p = vm_radix_iter_lookup(&pages, pidx); + p = vm_page_lookup(object, pidx); if (p) { VM_OBJECT_WUNLOCK(object); /* Page already added. */ @@ -864,7 +852,7 @@ goto error; } - ret = sgx_va_slot_init(sc, enclave, pidx, &pages); + ret = sgx_va_slot_init(sc, enclave, pidx); if (ret) { VM_OBJECT_WUNLOCK(object); dprintf("%s: Can't init va slot.\n", __func__); @@ -896,7 +884,7 @@ goto error; } - sgx_insert_epc_page(enclave, epc, addr, &pages); + sgx_insert_epc_page(enclave, epc, addr); VM_OBJECT_WUNLOCK(object); Index: sys/compat/linuxkpi/common/src/linux_page.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_page.c +++ sys/compat/linuxkpi/common/src/linux_page.c @@ -323,27 +323,23 @@ lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) { - struct pctrie_iter pages; vm_object_t vm_obj = vma->vm_obj; vm_object_t tmp_obj; vm_page_t page; vm_pindex_t pindex; VM_OBJECT_ASSERT_WLOCKED(vm_obj); - vm_page_iter_init(&pages, vm_obj); pindex = OFF_TO_IDX(addr - vma->vm_start); if (vma->vm_pfn_count == 0) vma->vm_pfn_first = pindex; MPASS(pindex <= OFF_TO_IDX(vma->vm_end)); retry: - page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages); + page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NOCREAT); if (page == NULL) { page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn)); - if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { - pctrie_iter_reset(&pages); + if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) goto retry; - } if (page->object != NULL) { tmp_obj = page->object; vm_page_xunbusy(page); @@ -367,11 +363,10 @@ vm_page_remove(page); } VM_OBJECT_WUNLOCK(tmp_obj); - pctrie_iter_reset(&pages); VM_OBJECT_WLOCK(vm_obj); goto retry; } - if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) { + if (vm_page_insert(page, vm_obj, pindex) != 0) { vm_page_xunbusy(page); return (VM_FAULT_OOM); } Index: sys/kern/uipc_shm.c =================================================================== --- sys/kern/uipc_shm.c +++ sys/kern/uipc_shm.c @@ -196,7 +196,6 @@ static int uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) { - struct pctrie_iter pages; vm_page_t m; vm_pindex_t idx; size_t tlen; @@ -216,9 +215,8 @@ * page: use zero_region. This is intended to avoid instantiating * pages on read from a sparse region. */ - vm_page_iter_init(&pages, obj); VM_OBJECT_WLOCK(obj); - m = vm_radix_iter_lookup(&pages, idx); + m = vm_radix_lookup(&obj->rtree, idx); if (uio->uio_rw == UIO_READ && m == NULL && !vm_pager_has_page(obj, idx, NULL, NULL)) { VM_OBJECT_WUNLOCK(obj); @@ -232,8 +230,8 @@ * lock to page out tobj's pages because tobj is a OBJT_SWAP * type object. */ - rv = vm_page_grab_valid_iter(&m, obj, idx, - VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY, &pages); + rv = vm_page_grab_valid(&m, obj, idx, + VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY); if (rv != VM_PAGER_OK) { VM_OBJECT_WUNLOCK(obj); if (bootverbose) { Index: sys/vm/phys_pager.c =================================================================== --- sys/vm/phys_pager.c +++ sys/vm/phys_pager.c @@ -231,7 +231,6 @@ int fault_type __unused, vm_prot_t max_prot __unused, vm_pindex_t *first, vm_pindex_t *last) { - struct pctrie_iter pages; vm_page_t m; vm_pindex_t base, end, i; int ahead; @@ -247,12 +246,11 @@ end = *last; *first = base; *last = end; - vm_page_iter_init(&pages, object); for (i = base; i <= end; i++) { ahead = MIN(end - i, PHYSALLOC); - m = vm_page_grab_iter(object, i, - VM_ALLOC_NORMAL | VM_ALLOC_COUNT(ahead), &pages); + m = vm_page_grab(object, i, + VM_ALLOC_NORMAL | VM_ALLOC_COUNT(ahead)); if (!vm_page_all_valid(m)) vm_page_zero_invalid(m, TRUE); KASSERT(m->dirty == 0, Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c +++ sys/vm/swap_pager.c @@ -1935,9 +1935,8 @@ if (!vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL)) break; } else { - m = vm_page_alloc_iter(object, blks.index + i, - VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL, - &pages); + m = vm_page_alloc(object, blks.index + i, + VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); if (m == NULL) break; } Index: sys/vm/vm_domainset.h =================================================================== --- sys/vm/vm_domainset.h +++ sys/vm/vm_domainset.h @@ -28,8 +28,6 @@ #ifndef __VM_DOMAINSET_H__ #define __VM_DOMAINSET_H__ -struct pctrie_iter; - struct vm_domainset_iter { struct domainset *di_domain; unsigned int *di_iter; @@ -42,10 +40,9 @@ }; int vm_domainset_iter_page(struct vm_domainset_iter *, struct vm_object *, - int *, struct pctrie_iter *); + int *); void vm_domainset_iter_page_init(struct vm_domainset_iter *, - struct vm_object *, vm_pindex_t, int *, int *, - struct pctrie_iter *); + struct vm_object *, vm_pindex_t, int *, int *); int vm_domainset_iter_policy(struct vm_domainset_iter *, int *); void vm_domainset_iter_policy_init(struct vm_domainset_iter *, struct domainset *, int *, int *); Index: sys/vm/vm_domainset.c =================================================================== --- sys/vm/vm_domainset.c +++ sys/vm/vm_domainset.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include @@ -200,7 +199,7 @@ void vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj, - vm_pindex_t pindex, int *domain, int *req, struct pctrie_iter *pages) + vm_pindex_t pindex, int *domain, int *req) { struct domainset_ref *dr; @@ -219,12 +218,12 @@ VM_ALLOC_NOWAIT; vm_domainset_iter_first(di, domain); if (vm_page_count_min_domain(*domain)) - vm_domainset_iter_page(di, obj, domain, pages); + vm_domainset_iter_page(di, obj, domain); } int vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj, - int *domain, struct pctrie_iter *pages) + int *domain) { if (__predict_false(DOMAINSET_EMPTY(&di->di_valid_mask))) return (ENOMEM); @@ -251,8 +250,6 @@ /* Wait for one of the domains to accumulate some free pages. */ if (obj != NULL) { VM_OBJECT_WUNLOCK(obj); - if (pages != NULL) - pctrie_iter_reset(pages); } vm_wait_doms(&di->di_valid_mask, 0); if (obj != NULL) @@ -343,7 +340,7 @@ int vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj, - int *domain, struct pctrie_iter *pages) + int *domain) { return (EJUSTRETURN); @@ -351,7 +348,7 @@ void vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj, - vm_pindex_t pindex, int *domain, int *flags, struct pctrie_iter *pages) + vm_pindex_t pindex, int *domain, int *flags) { *domain = 0; Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -1229,7 +1229,7 @@ * Allocate a page directly or via the object populate method. */ static enum fault_status -vm_fault_allocate(struct faultstate *fs, struct pctrie_iter *pages) +vm_fault_allocate(struct faultstate *fs) { struct domainset *dset; enum fault_status res; @@ -1257,7 +1257,6 @@ vm_fault_unlock_and_deallocate(fs); return (res); case FAULT_CONTINUE: - pctrie_iter_reset(pages); /* * Pager's populate() method * returned VM_PAGER_BAD. @@ -1291,8 +1290,8 @@ vm_fault_unlock_and_deallocate(fs); return (FAULT_FAILURE); } - fs->m = vm_page_alloc_iter(fs->object, fs->pindex, - P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0, pages); + fs->m = vm_page_alloc(fs->object, fs->pindex, + P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0); } if (fs->m == NULL) { if (vm_fault_allocate_oom(fs)) @@ -1458,7 +1457,7 @@ static enum fault_status vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp) { - struct pctrie_iter pages; + struct vm_radix copy; enum fault_status res; bool dead; @@ -1484,8 +1483,8 @@ /* * See if the page is resident. */ - vm_page_iter_init(&pages, fs->object); - fs->m = vm_radix_iter_lookup(&pages, fs->pindex); + copy = fs->object->rtree; + fs->m = vm_radix_lookup(©, fs->pindex); if (fs->m != NULL) { if (!vm_page_tryxbusy(fs->m)) { vm_fault_busy_sleep(fs); @@ -1515,7 +1514,8 @@ vm_fault_unlock_and_deallocate(fs); return (FAULT_RESTART); } - res = vm_fault_allocate(fs, &pages); + fs->object->rtree = copy; + res = vm_fault_allocate(fs); if (res != FAULT_CONTINUE) return (res); } @@ -1550,7 +1550,6 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags, vm_page_t *m_hold) { - struct pctrie_iter pages; struct faultstate fs; int ahead, behind, faultcount, rv; enum fault_status res; @@ -1604,10 +1603,8 @@ return (KERN_SUCCESS); } VM_OBJECT_ASSERT_WLOCKED(fs.first_object); - } else { - vm_page_iter_init(&pages, fs.first_object); + } else VM_OBJECT_WLOCK(fs.first_object); - } /* * Make a reference to this object to prevent its disposal while we @@ -1630,7 +1627,7 @@ fs.pindex = fs.first_pindex; if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { - res = vm_fault_allocate(&fs, &pages); + res = vm_fault_allocate(&fs); switch (res) { case FAULT_RESTART: goto RetryFault; @@ -2096,7 +2093,6 @@ vm_map_entry_t dst_entry, vm_map_entry_t src_entry, vm_ooffset_t *fork_charge) { - struct pctrie_iter pages; vm_object_t backing_object, dst_object, object, src_object; vm_pindex_t dst_pindex, pindex, src_pindex; vm_prot_t access, prot; @@ -2173,7 +2169,6 @@ * with the source object, all of its pages must be dirtied, * regardless of whether they can be written. */ - vm_page_iter_init(&pages, dst_object); for (vaddr = dst_entry->start, dst_pindex = 0; vaddr < dst_entry->end; vaddr += PAGE_SIZE, dst_pindex++) { @@ -2217,14 +2212,13 @@ */ pindex = (src_object == dst_object ? src_pindex : 0) + dst_pindex; - dst_m = vm_page_alloc_iter(dst_object, pindex, - VM_ALLOC_NORMAL, &pages); + dst_m = vm_page_alloc(dst_object, pindex, + VM_ALLOC_NORMAL); if (dst_m == NULL) { VM_OBJECT_WUNLOCK(dst_object); VM_OBJECT_RUNLOCK(object); vm_wait(dst_object); VM_OBJECT_WLOCK(dst_object); - pctrie_iter_reset(&pages); goto again; } @@ -2247,10 +2241,8 @@ } else { dst_m = src_m; if (vm_page_busy_acquire( - dst_m, VM_ALLOC_WAITFAIL) == 0) { - pctrie_iter_reset(&pages); + dst_m, VM_ALLOC_WAITFAIL) == 0) goto again; - } if (dst_m->pindex >= dst_object->size) { /* * We are upgrading. Index can occur Index: sys/vm/vm_glue.c =================================================================== --- sys/vm/vm_glue.c +++ sys/vm/vm_glue.c @@ -453,7 +453,7 @@ obj = vm_thread_kstack_size_to_obj(pages); if (vm_ndomains > 1) obj->domain.dr_policy = ds; - vm_domainset_iter_page_init(&di, obj, 0, &domain, &req, NULL); + vm_domainset_iter_page_init(&di, obj, 0, &domain, &req); do { /* * Get a kernel virtual address for this thread's kstack. @@ -480,7 +480,7 @@ vm_page_valid(ma[i]); pmap_qenter(ks, ma, pages); return (ks); - } while (vm_domainset_iter_page(&di, obj, &domain, NULL) == 0); + } while (vm_domainset_iter_page(&di, obj, &domain) == 0); return (0); } @@ -612,7 +612,6 @@ vm_thread_stack_back(vm_offset_t ks, vm_page_t ma[], int npages, int req_class, int domain) { - struct pctrie_iter pages; vm_object_t obj = vm_thread_kstack_size_to_obj(npages); vm_pindex_t pindex; vm_page_t m; @@ -620,15 +619,14 @@ pindex = vm_kstack_pindex(ks, npages); - vm_page_iter_init(&pages, obj); VM_OBJECT_WLOCK(obj); for (n = 0; n < npages; ma[n++] = m) { - m = vm_page_grab_iter(obj, pindex + n, - VM_ALLOC_NOCREAT | VM_ALLOC_WIRED, &pages); + m = vm_page_grab(obj, pindex + n, + VM_ALLOC_NOCREAT | VM_ALLOC_WIRED); if (m != NULL) continue; - m = vm_page_alloc_domain_iter(obj, pindex + n, - domain, req_class | VM_ALLOC_WIRED, &pages); + m = vm_page_alloc_domain(obj, pindex + n, domain, + req_class | VM_ALLOC_WIRED); if (m != NULL) continue; for (int i = 0; i < n; i++) { Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -530,7 +530,6 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) { - struct pctrie_iter pages; vm_offset_t offset, i; vm_page_t m; vm_prot_t prot; @@ -547,12 +546,11 @@ prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW; i = 0; - vm_page_iter_init(&pages, object); VM_OBJECT_WLOCK(object); retry: for (; i < size; i += PAGE_SIZE) { - m = vm_page_alloc_domain_iter(object, atop(offset + i), - domain, pflags, &pages); + m = vm_page_alloc_domain(object, atop(offset + i), + domain, pflags); /* * Ran out of space, free everything up and return. Don't need Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -2119,8 +2119,7 @@ vm_page_iter_init(&pages, object); VM_OBJECT_ASSERT_WLOCKED(object); for (pindex = start; pindex < end; pindex++) { - rv = vm_page_grab_valid_iter(&m, object, pindex, - VM_ALLOC_NORMAL, &pages); + rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL); if (rv != VM_PAGER_OK) break; @@ -2259,21 +2258,19 @@ vm_object_prepare_buf_pages(vm_object_t object, vm_page_t *ma_dst, int count, int *rbehind, int *rahead, vm_page_t *ma_src) { - struct pctrie_iter pages; vm_pindex_t pindex; vm_page_t m, mpred, msucc; - vm_page_iter_init(&pages, object); VM_OBJECT_ASSERT_LOCKED(object); if (*rbehind != 0) { m = ma_src[0]; pindex = m->pindex; - mpred = vm_radix_iter_lookup_lt(&pages, pindex); + mpred = vm_radix_lookup_lt(&object->rtree, pindex); *rbehind = MIN(*rbehind, pindex - (mpred != NULL ? mpred->pindex + 1 : 0)); for (int i = 0; i < *rbehind; i++) { - m = vm_page_alloc_iter(object, pindex - i - 1, - VM_ALLOC_NORMAL, &pages); + m = vm_page_alloc(object, pindex - i - 1, + VM_ALLOC_NORMAL); if (m == NULL) { /* Shift the array. */ for (int j = 0; j < i; j++) @@ -2290,12 +2287,11 @@ if (*rahead != 0) { m = ma_src[count - 1]; pindex = m->pindex + 1; - msucc = vm_radix_iter_lookup_ge(&pages, pindex); + msucc = vm_radix_lookup_ge(&object->rtree, pindex); *rahead = MIN(*rahead, (msucc != NULL ? msucc->pindex : object->size) - pindex); for (int i = 0; i < *rahead; i++) { - m = vm_page_alloc_iter(object, pindex + i, - VM_ALLOC_NORMAL, &pages); + m = vm_page_alloc(object, pindex + i, VM_ALLOC_NORMAL); if (m == NULL) { *rahead = i; break; Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -558,10 +558,8 @@ vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); -vm_page_t vm_page_alloc_domain_iter(vm_object_t object, vm_pindex_t pindex, - int domain, int req, struct pctrie_iter *pages); -vm_page_t vm_page_alloc_iter(vm_object_t object, vm_pindex_t pindex, int req, - struct pctrie_iter *pages); +vm_page_t vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, + int domain, int req); vm_page_t vm_page_alloc_noobj(int); vm_page_t vm_page_alloc_noobj_domain(int, int); vm_page_t vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low, @@ -590,8 +588,6 @@ int vm_page_grab_zero_partial(vm_object_t object, vm_pindex_t pindex, int base, int end); vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int); -vm_page_t vm_page_grab_iter(vm_object_t object, vm_pindex_t pindex, - int allocflags, struct pctrie_iter *pages); vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int); int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count); @@ -599,8 +595,6 @@ int allocflags, vm_page_t *ma, int count); int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags); -int vm_page_grab_valid_iter(vm_page_t *mp, vm_object_t object, - vm_pindex_t pindex, int allocflags, struct pctrie_iter *pages); int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags); void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); @@ -610,8 +604,6 @@ void vm_page_invalid(vm_page_t m); void vm_page_iter_free(struct pctrie_iter *pages, vm_page_t m); void vm_page_iter_init(struct pctrie_iter *, vm_object_t); -int vm_page_iter_insert(vm_page_t m, vm_object_t, vm_pindex_t, - struct pctrie_iter *); void vm_page_iter_limit_init(struct pctrie_iter *, vm_object_t, vm_pindex_t); bool vm_page_iter_remove(struct pctrie_iter *pages, vm_page_t m); bool vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m, Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -1474,14 +1474,14 @@ } /* - * Insert the given page into the given object at the given pindex. + * vm_page_insert: [ internal use only ] + * + * Inserts the given mem entry into the object. * - * The procedure is marked __always_inline to suggest to the compiler to - * eliminate the iter parameter and the associated alternate branch. + * The object must be locked. */ -static __always_inline int -vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex, - bool iter, struct pctrie_iter *pages) +int +vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) { int error; @@ -1499,10 +1499,7 @@ /* * Add this page to the object's radix tree. */ - if (iter) - error = vm_radix_iter_insert(pages, m); - else - error = vm_radix_insert(&object->rtree, m); + error = vm_radix_insert(&object->rtree, m); if (__predict_false(error != 0)) { m->object = NULL; m->pindex = 0; @@ -1515,35 +1512,6 @@ return (0); } -/* - * vm_page_insert: [ internal use only ] - * - * Inserts the given mem entry into the object and object list. - * - * The object must be locked. - */ -int -vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) -{ - return (vm_page_insert_lookup(m, object, pindex, false, NULL)); -} - -/* - * vm_page_iter_insert: - * - * Tries to insert the page "m" into the specified object at offset - * "pindex" using the iterator "pages". Returns 0 if the insertion was - * successful. - * - * The object must be locked. - */ -int -vm_page_iter_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex, - struct pctrie_iter *pages) -{ - return (vm_page_insert_lookup(m, object, pindex, true, pages)); -} - /* * vm_page_insert_radixdone: * @@ -2010,33 +1978,17 @@ */ vm_page_t vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) -{ - struct pctrie_iter pages; - - vm_page_iter_init(&pages, object); - return (vm_page_alloc_iter(object, pindex, req, &pages)); -} - -/* - * Allocate a page in the specified object with the given page index. If the - * object lock is dropped and regained, the pages iter is reset. - */ -vm_page_t -vm_page_alloc_iter(vm_object_t object, vm_pindex_t pindex, int req, - struct pctrie_iter *pages) { struct vm_domainset_iter di; vm_page_t m; int domain; - vm_domainset_iter_page_init(&di, object, pindex, &domain, &req, - pages); + vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); do { - m = vm_page_alloc_domain_iter(object, pindex, domain, req, - pages); + m = vm_page_alloc_domain(object, pindex, domain, req); if (m != NULL) break; - } while (vm_domainset_iter_page(&di, object, &domain, pages) == 0); + } while (vm_domainset_iter_page(&di, object, &domain) == 0); return (m); } @@ -2095,8 +2047,8 @@ } vm_page_t -vm_page_alloc_domain_iter(vm_object_t object, vm_pindex_t pindex, int domain, - int req, struct pctrie_iter *pages) +vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, + int req) { struct vm_domain *vmd; vm_page_t m; @@ -2132,8 +2084,7 @@ * Can we allocate the page from a reservation? */ if (vm_object_reserv(object) && - (m = vm_reserv_alloc_page(object, pindex, domain, req, pages)) != - NULL) { + (m = vm_reserv_alloc_page(object, pindex, domain, req)) != NULL) { goto found; } #endif @@ -2166,8 +2117,6 @@ * Not allocatable, give up. */ (void)vm_domain_alloc_fail(vmd, object, req); - if ((req & VM_ALLOC_WAITFAIL) != 0) - pctrie_iter_reset(pages); return (NULL); } @@ -2202,7 +2151,7 @@ } m->a.act_count = 0; - if (vm_page_iter_insert(m, object, pindex, pages)) { + if (vm_page_insert(m, object, pindex)) { if (req & VM_ALLOC_WIRED) { vm_wire_sub(1); m->ref_count = 0; @@ -2215,7 +2164,6 @@ if (req & VM_ALLOC_WAITFAIL) { VM_OBJECT_WUNLOCK(object); vm_radix_wait(); - pctrie_iter_reset(pages); VM_OBJECT_WLOCK(object); } return (NULL); @@ -2282,7 +2230,7 @@ start_segind = -1; - vm_domainset_iter_page_init(&di, object, pindex, &domain, &req, NULL); + vm_domainset_iter_page_init(&di, object, pindex, &domain, &req); do { m = vm_page_alloc_contig_domain(object, pindex, domain, req, npages, low, high, alignment, boundary, memattr); @@ -2294,7 +2242,7 @@ npages, low, high) == -1) { vm_domainset_iter_ignore(&di, domain); } - } while (vm_domainset_iter_page(&di, object, &domain, NULL) == 0); + } while (vm_domainset_iter_page(&di, object, &domain) == 0); return (m); } @@ -2342,7 +2290,6 @@ int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { - struct pctrie_iter pages; vm_page_t m, m_ret, mpred; u_int busy_lock, flags, oflags; @@ -2360,7 +2307,6 @@ object)); KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); - vm_page_iter_init(&pages, object); m_ret = NULL; #if VM_NRESERVLEVEL > 0 /* @@ -2368,7 +2314,7 @@ */ if (vm_object_reserv(object)) { m_ret = vm_reserv_alloc_contig(object, pindex, domain, - req, npages, low, high, alignment, boundary, &pages); + req, npages, low, high, alignment, boundary); } #endif if (m_ret == NULL) { @@ -2409,7 +2355,7 @@ m->a.act_count = 0; m->oflags = oflags; m->pool = VM_FREEPOOL_DEFAULT; - if (vm_page_iter_insert(m, object, pindex, &pages)) { + if (vm_page_insert(m, object, pindex)) { if ((req & VM_ALLOC_WIRED) != 0) vm_wire_sub(npages); KASSERT(m->object == NULL, @@ -2610,12 +2556,12 @@ vm_page_t m; int domain; - vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req, NULL); + vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); do { m = vm_page_alloc_noobj_domain(domain, req); if (m != NULL) break; - } while (vm_domainset_iter_page(&di, NULL, &domain, NULL) == 0); + } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); return (m); } @@ -2629,13 +2575,13 @@ vm_page_t m; int domain; - vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req, NULL); + vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); do { m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low, high, alignment, boundary, memattr); if (m != NULL) break; - } while (vm_domainset_iter_page(&di, NULL, &domain, NULL) == 0); + } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); return (m); } @@ -3348,7 +3294,7 @@ ret = ERANGE; - vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req, NULL); + vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req); do { status = vm_page_reclaim_contig_domain(domain, req, npages, low, high, alignment, boundary); @@ -3361,7 +3307,7 @@ "from vm_page_reclaim_contig_domain()", status)); ret = ENOMEM; } - } while (vm_domainset_iter_page(&di, NULL, &domain, NULL) == 0); + } while (vm_domainset_iter_page(&di, NULL, &domain) == 0); return (ret); } @@ -4753,31 +4699,29 @@ */ static inline vm_page_t vm_page_grab_lookup(vm_object_t object, vm_pindex_t pindex, int allocflags, - bool *found, struct pctrie_iter *pages) + bool *found) { vm_page_t m; - while ((*found = (m = vm_radix_iter_lookup(pages, pindex)) != NULL) && + while ((*found = + (m = vm_page_lookup(object, pindex)) != NULL) && !vm_page_tryacquire(m, allocflags)) { if (!vm_page_grab_sleep(object, m, pindex, "pgrbwt", allocflags, true)) return (NULL); - pctrie_iter_reset(pages); } return (m); } /* - * Grab a page. Use an iterator parameter. Keep on waiting, as long as the page - * exists in the object. If the page doesn't exist, first allocate it and then - * conditionally zero it. + * Grab a page. Keep on waiting, as long as the page exists in the object. If + * the page doesn't exist, first allocate it and then conditionally zero it. * * The object must be locked on entry. This routine may sleep. The lock will, * however, be released and reacquired if the routine sleeps. */ vm_page_t -vm_page_grab_iter(vm_object_t object, vm_pindex_t pindex, int allocflags, - struct pctrie_iter *pages) +vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) { vm_page_t m; bool found; @@ -4786,14 +4730,14 @@ vm_page_grab_check(allocflags); while ((m = vm_page_grab_lookup( - object, pindex, allocflags, &found, pages)) == NULL) { + object, pindex, allocflags, &found)) == NULL) { if ((allocflags & VM_ALLOC_NOCREAT) != 0) return (NULL); if (found && (allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) return (NULL); - m = vm_page_alloc_iter(object, pindex, - vm_page_grab_pflags(allocflags), pages); + m = vm_page_alloc(object, pindex, + vm_page_grab_pflags(allocflags)); if (m != NULL) { if ((allocflags & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) @@ -4809,23 +4753,6 @@ return (m); } -/* - * Grab a page. Keep on waiting, as long as the page exists in the object. If - * the page doesn't exist, first allocate it and then conditionally zero it. - * - * The object must be locked on entry. This routine may sleep. The lock will, - * however, be released and reacquired if the routine sleeps. - */ -vm_page_t -vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) -{ - struct pctrie_iter pages; - - VM_OBJECT_ASSERT_WLOCKED(object); - vm_page_iter_init(&pages, object); - return (vm_page_grab_iter(object, pindex, allocflags, &pages)); -} - /* * Attempt to validate a page, locklessly acquiring it if necessary, given a * (object, pindex) tuple and either an invalided page or NULL. The resulting @@ -4899,16 +4826,15 @@ } /* - * Grab a page and make it valid, paging in if necessary. Use an iterator - * parameter. Pages missing from their pager are zero filled and validated. If - * a VM_ALLOC_COUNT is supplied and the page is not valid as many as - * VM_INITIAL_PAGEIN pages can be brought in simultaneously. Additional pages - * will be left on a paging queue but will neither be wired nor busy regardless - * of allocflags. + * Grab a page and make it valid, paging in if necessary. Pages missing from + * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied + * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought + * in simultaneously. Additional pages will be left on a paging queue but + * will neither be wired nor busy regardless of allocflags. */ int -vm_page_grab_valid_iter(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, - int allocflags, struct pctrie_iter *pages) +vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, + int allocflags) { vm_page_t m; vm_page_t ma[VM_INITIAL_PAGEIN]; @@ -4926,7 +4852,7 @@ pflags |= VM_ALLOC_WAITFAIL; retrylookup: - if ((m = vm_radix_iter_lookup(pages, pindex)) != NULL) { + if ((m = vm_page_lookup(object, pindex)) != NULL) { /* * If the page is fully valid it can only become invalid * with the object lock held. If it is not valid it can @@ -4940,7 +4866,6 @@ vm_page_all_valid(m) ? allocflags : 0)) { (void)vm_page_grab_sleep(object, m, pindex, "pgrbwt", allocflags, true); - pctrie_iter_reset(pages); goto retrylookup; } if (vm_page_all_valid(m)) @@ -4954,7 +4879,7 @@ *mp = NULL; return (VM_PAGER_FAIL); } else { - m = vm_page_alloc_iter(object, pindex, pflags, pages); + m = vm_page_alloc(object, pindex, pflags); if (m == NULL) { if (!vm_pager_can_alloc_page(object, pindex)) { *mp = NULL; @@ -4970,15 +4895,14 @@ after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT); after = MAX(after, 1); ma[0] = m; - pctrie_iter_reset(pages); for (i = 1; i < after; i++) { - m = vm_radix_iter_lookup_ge(pages, pindex + i); + m = vm_radix_lookup_ge(&object->rtree, pindex + i); ahead = after; if (m != NULL) ahead = MIN(ahead, m->pindex - pindex); for (; i < ahead; i++) { - ma[i] = vm_page_alloc_iter(object, pindex + i, - VM_ALLOC_NORMAL, pages); + ma[i] = vm_page_alloc(object, pindex + i, + VM_ALLOC_NORMAL); if (ma[i] == NULL) break; } @@ -4991,7 +4915,6 @@ vm_object_pip_add(object, after); VM_OBJECT_WUNLOCK(object); rv = vm_pager_get_pages(object, ma, after, NULL, NULL); - pctrie_iter_reset(pages); VM_OBJECT_WLOCK(object); vm_object_pip_wakeupn(object, after); /* Pager may have replaced a page. */ @@ -5009,10 +4932,8 @@ for (i = 1; i < after; i++) vm_page_readahead_finish(ma[i]); MPASS(vm_page_all_valid(m)); - } else { + } else vm_page_zero_invalid(m, TRUE); - pctrie_iter_reset(pages); - } out: if ((allocflags & VM_ALLOC_WIRED) != 0) vm_page_wire(m); @@ -5024,25 +4945,6 @@ return (VM_PAGER_OK); } -/* - * Grab a page and make it valid, paging in if necessary. Pages missing from - * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied - * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought - * in simultaneously. Additional pages will be left on a paging queue but - * will neither be wired nor busy regardless of allocflags. - */ -int -vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, - int allocflags) -{ - struct pctrie_iter pages; - - VM_OBJECT_ASSERT_WLOCKED(object); - vm_page_iter_init(&pages, object); - return (vm_page_grab_valid_iter(mp, object, pindex, allocflags, - &pages)); -} - /* * Grab a page. Keep on waiting, as long as the page exists in the object. If * the page doesn't exist, and the pager has it, allocate it and zero part of @@ -5055,7 +4957,6 @@ vm_page_grab_zero_partial(vm_object_t object, vm_pindex_t pindex, int base, int end) { - struct pctrie_iter pages; vm_page_t m; int allocflags, rv; bool found; @@ -5066,13 +4967,12 @@ end)); allocflags = VM_ALLOC_NOCREAT | VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL; - vm_page_iter_init(&pages, object); while ((m = vm_page_grab_lookup( - object, pindex, allocflags, &found, &pages)) == NULL) { + object, pindex, allocflags, &found)) == NULL) { if (!vm_pager_has_page(object, pindex, NULL, NULL)) return (0); - m = vm_page_alloc_iter(object, pindex, - vm_page_grab_pflags(allocflags), &pages); + m = vm_page_alloc(object, pindex, + vm_page_grab_pflags(allocflags)); if (m != NULL) { vm_object_pip_add(object, 1); VM_OBJECT_WUNLOCK(object); @@ -5187,7 +5087,6 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count) { - struct pctrie_iter pages; vm_page_t m; int pflags; int ahead, i; @@ -5201,30 +5100,26 @@ pflags = vm_page_grab_pflags(allocflags); i = 0; - vm_page_iter_init(&pages, object); retrylookup: ahead = -1; for (; i < count; i++) { if (ahead < 0) { - ahead = vm_radix_iter_lookup_range( - &pages, pindex + i, &ma[i], count - i); + ahead = vm_radix_lookup_range( + &object->rtree, pindex + i, &ma[i], count - i); } if (ahead-- > 0) { m = ma[i]; if (!vm_page_tryacquire(m, allocflags)) { if (vm_page_grab_sleep(object, m, pindex + i, - "grbmaw", allocflags, true)) { - pctrie_iter_reset(&pages); + "grbmaw", allocflags, true)) goto retrylookup; - } break; } } else { if ((allocflags & VM_ALLOC_NOCREAT) != 0) break; - m = vm_page_alloc_iter(object, pindex + i, - pflags | VM_ALLOC_COUNT(count - i), &pages); - /* pages was reset if alloc_iter lost the lock. */ + m = vm_page_alloc(object, pindex + i, + pflags | VM_ALLOC_COUNT(count - i)); if (m == NULL) { if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) Index: sys/vm/vm_radix.h =================================================================== --- sys/vm/vm_radix.h +++ sys/vm/vm_radix.h @@ -113,6 +113,18 @@ ma, count)); } +/* + * Returns the number of contiguous, non-NULL pages read into the ma[] + * array, without requiring an external lock. + */ +static __inline int +vm_radix_lookup_range(struct vm_radix *rtree, vm_pindex_t index, + vm_page_t ma[], int count) +{ + return (VM_RADIX_PCTRIE_LOOKUP_RANGE(&rtree->rt_trie, index, ma, + count)); +} + /* * Returns the number of contiguous, non-NULL pages read into the ma[] * array, without requiring an external lock. @@ -179,6 +191,18 @@ return (VM_RADIX_PCTRIE_LOOKUP_GE(&rtree->rt_trie, index)); } +/* + * Returns the page with the least pindex that is greater than or equal to the + * specified pindex, or NULL if there are no such pages. + * + * Requires that access be externally synchronized by a lock. + */ +static __inline vm_page_t +vm_radix_lookup_ge_step(struct vm_radix *rtree, vm_page_t m) +{ + return (VM_RADIX_PCTRIE_LOOKUP_GE_STEP(&rtree->rt_trie, m)); +} + /* * Returns the page with the greatest pindex that is less than or equal to the * specified pindex, or NULL if there are no such pages. @@ -286,16 +310,16 @@ } /* - * Initialize an iterator pointing to the page with the greatest pindex that is + * Fine the page with the greatest pindex that is * less than to the specified pindex, or NULL if there are no such * pages. Return the page. * * Requires that access be externally synchronized by a lock. */ static __inline vm_page_t -vm_radix_iter_lookup_lt(struct pctrie_iter *pages, vm_pindex_t index) +vm_radix_lookup_lt(struct vm_radix *rtree, vm_pindex_t index) { - return (index == 0 ? NULL : vm_radix_iter_lookup_le(pages, index - 1)); + return (index == 0 ? NULL : vm_radix_lookup_le(rtree, index - 1)); } /* Index: sys/vm/vm_reserv.h =================================================================== --- sys/vm/vm_reserv.h +++ sys/vm/vm_reserv.h @@ -47,10 +47,9 @@ */ vm_page_t vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, - vm_paddr_t high, u_long alignment, vm_paddr_t boundary, - struct pctrie_iter *pages); + vm_paddr_t high, u_long alignment, vm_paddr_t boundary); vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, - int domain, int req, struct pctrie_iter *pages); + int domain, int req); void vm_reserv_break_all(vm_object_t object); boolean_t vm_reserv_free_page(vm_page_t m); void vm_reserv_init(void); Index: sys/vm/vm_reserv.c =================================================================== --- sys/vm/vm_reserv.c +++ sys/vm/vm_reserv.c @@ -511,12 +511,12 @@ */ static vm_reserv_t vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex, - vm_page_t *mpredp, vm_page_t *msuccp, struct pctrie_iter *pages) + vm_page_t *mpredp, vm_page_t *msuccp) { vm_reserv_t rv; vm_page_t mpred, msucc; - mpred = vm_radix_iter_lookup_lt(pages, pindex); + mpred = vm_radix_lookup_le(&object->rtree, pindex); if (mpred != NULL) { KASSERT(mpred->object == object, ("vm_reserv_from_object: object doesn't contain mpred")); @@ -527,7 +527,7 @@ return (rv); } - msucc = vm_radix_iter_lookup_ge(pages, pindex); + msucc = vm_radix_lookup_ge(&object->rtree, pindex); if (msucc != NULL) { KASSERT(msucc->pindex > pindex, ("vm_reserv_from_object: msucc doesn't succeed pindex")); @@ -687,7 +687,7 @@ vm_page_t vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, - vm_paddr_t boundary, struct pctrie_iter *pages) + vm_paddr_t boundary) { struct vm_domain *vmd; vm_paddr_t pa, size; @@ -725,7 +725,7 @@ /* * Look for an existing reservation. */ - rv = vm_reserv_from_object(object, pindex, &mpred, &msucc, pages); + rv = vm_reserv_from_object(object, pindex, &mpred, &msucc); if (rv != NULL) { KASSERT(object != kernel_object || rv->domain == domain, ("vm_reserv_alloc_contig: domain mismatch")); @@ -833,7 +833,7 @@ */ vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain, - int req, struct pctrie_iter *pages) + int req) { struct vm_domain *vmd; vm_page_t m, mpred, msucc; @@ -853,7 +853,7 @@ /* * Look for an existing reservation. */ - rv = vm_reserv_from_object(object, pindex, &mpred, &msucc, pages); + rv = vm_reserv_from_object(object, pindex, &mpred, &msucc); if (rv != NULL) { KASSERT(object != kernel_object || rv->domain == domain, ("vm_reserv_alloc_page: domain mismatch"));