Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -951,7 +951,7 @@ vmd = vm_pagequeue_domain(m); vm_domain_free_lock(vmd); - vm_phys_free_pages(m, 0); + vm_phys_free_pages(m, m->pool, 0); vm_domain_free_unlock(vmd); vm_domain_freecnt_inc(vmd, 1); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -569,6 +569,7 @@ #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) long ii; #endif + int pool; #ifdef VM_FREEPOOL_LAZYINIT int lazyinit; #endif @@ -752,9 +753,12 @@ */ vm_phys_init(); + pool = VM_FREEPOOL_DEFAULT; #ifdef VM_FREEPOOL_LAZYINIT lazyinit = 1; TUNABLE_INT_FETCH("debug.vm.lazy_page_init", &lazyinit); + if (lazyinit) + pool = VM_FREEPOOL_LAZYINIT; #endif /* @@ -774,48 +778,27 @@ seg = &vm_phys_segs[segind]; /* - * If lazy vm_page initialization is not enabled, simply - * initialize all of the pages in the segment. Otherwise, we - * only initialize: - * 1. Pages not covered by phys_avail[], since they might be - * freed to the allocator at some future point, e.g., by - * kmem_bootstrap_free(). - * 2. The first page of each run of free pages handed to the - * vm_phys allocator, which in turn defers initialization - * of pages until they are needed. - * This avoids blocking the boot process for long periods, which - * may be relevant for VMs (which ought to boot as quickly as - * possible) and/or systems with large amounts of physical - * memory. + * Initialize pages not covered by phys_avail[], since they + * might be freed to the allocator at some future point, e.g., + * by kmem_bootstrap_free(). */ -#ifdef VM_FREEPOOL_LAZYINIT - if (lazyinit) { - startp = seg->start; - for (i = 0; phys_avail[i + 1] != 0; i += 2) { - if (startp >= seg->end) - break; - - if (phys_avail[i + 1] < startp) - continue; - if (phys_avail[i] <= startp) { - startp = phys_avail[i + 1]; - continue; - } - - m = vm_phys_seg_paddr_to_vm_page(seg, startp); - for (endp = MIN(phys_avail[i], seg->end); - startp < endp; startp += PAGE_SIZE, m++) { - vm_page_init_page(m, startp, segind, - VM_FREEPOOL_DEFAULT); - } + startp = seg->start; + for (i = 0; phys_avail[i + 1] != 0; i += 2) { + if (startp >= seg->end) + break; + if (phys_avail[i + 1] < startp) + continue; + if (phys_avail[i] <= startp) { + startp = phys_avail[i + 1]; + continue; } - } else -#endif - for (m = seg->first_page, pa = seg->start; - pa < seg->end; m++, pa += PAGE_SIZE) { - vm_page_init_page(m, pa, segind, + m = vm_phys_seg_paddr_to_vm_page(seg, startp); + for (endp = MIN(phys_avail[i], seg->end); + startp < endp; startp += PAGE_SIZE, m++) { + vm_page_init_page(m, startp, segind, VM_FREEPOOL_DEFAULT); } + } /* * Add the segment's pages that are covered by one of @@ -832,16 +815,30 @@ if (pagecount == 0) continue; + /* + * If lazy vm_page initialization is not enabled, simply + * initialize all of the pages in the segment covered by + * phys_avail. Otherwise, initialize only the first + * page of each run of free pages handed to the vm_phys + * allocator, which in turn defers initialization of + * pages until they are needed. + * + * This avoids blocking the boot process for long + * periods, which may be relevant for VMs (which ought + * to boot as quickly as possible) and/or systems with + * large amounts of physical memory. + */ m = vm_phys_seg_paddr_to_vm_page(seg, startp); -#ifdef VM_FREEPOOL_LAZYINIT - if (lazyinit) { - vm_page_init_page(m, startp, segind, - VM_FREEPOOL_LAZYINIT); + vm_page_init_page(m, startp, segind, pool); + if (pool == VM_FREEPOOL_DEFAULT) { + for (int j = 1; j < pagecount; j++) { + vm_page_init_page(&m[j], + startp + ptoa(j), segind, pool); + } } -#endif vmd = VM_DOMAIN(seg->domain); vm_domain_free_lock(vmd); - vm_phys_enqueue_contig(m, pagecount); + vm_phys_enqueue_contig(m, pool, pagecount); vm_domain_free_unlock(vmd); vm_domain_freecnt_inc(vmd, pagecount); vm_cnt.v_page_count += (u_int)pagecount; @@ -2166,6 +2163,7 @@ m->flags = flags; m->a.flags = 0; m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; + m->pool = VM_FREEPOOL_DEFAULT; if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; else if ((req & VM_ALLOC_SBUSY) != 0) @@ -2383,6 +2381,7 @@ m->ref_count = 1; m->a.act_count = 0; m->oflags = oflags; + m->pool = VM_FREEPOOL_DEFAULT; if (vm_page_insert_after(m, object, pindex, mpred)) { if ((req & VM_ALLOC_WIRED) != 0) vm_wire_sub(npages); @@ -2480,6 +2479,7 @@ m->flags = (m->flags & PG_ZERO) | flags; m->a.flags = 0; m->oflags = VPO_UNMANAGED; + m->pool = VM_FREEPOOL_DIRECT; m->busy_lock = VPB_UNBUSIED; if ((req & VM_ALLOC_WIRED) != 0) { vm_wire_add(1); @@ -2627,6 +2627,7 @@ m->ref_count = 1; m->a.act_count = 0; m->oflags = VPO_UNMANAGED; + m->pool = VM_FREEPOOL_DIRECT; /* * Zero the page before updating any mappings since the page is @@ -2705,7 +2706,7 @@ vm_domain_free_lock(vmd); for (i = 0; i < cnt; i++) { m = (vm_page_t)store[i]; - vm_phys_free_pages(m, 0); + vm_phys_free_pages(m, pgcache->pool, 0); } vm_domain_free_unlock(vmd); vm_domain_freecnt_inc(vmd, cnt); @@ -3090,7 +3091,7 @@ do { MPASS(vm_page_domain(m) == domain); SLIST_REMOVE_HEAD(&free, plinks.s.ss); - vm_phys_free_pages(m, 0); + vm_phys_free_pages(m, m->pool, 0); cnt++; } while ((m = SLIST_FIRST(&free)) != NULL); vm_domain_free_unlock(vmd); @@ -4109,7 +4110,7 @@ return; } vm_domain_free_lock(vmd); - vm_phys_free_pages(m, 0); + vm_phys_free_pages(m, m->pool, 0); vm_domain_free_unlock(vmd); vm_domain_freecnt_inc(vmd, 1); } Index: sys/vm/vm_phys.h =================================================================== --- sys/vm/vm_phys.h +++ sys/vm/vm_phys.h @@ -64,15 +64,15 @@ int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]); vm_page_t vm_phys_alloc_pages(int domain, int pool, int order); int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high); -void vm_phys_enqueue_contig(vm_page_t m, u_long npages); +void vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages); int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, vm_memattr_t memattr); void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end); vm_page_t vm_phys_fictitious_to_vm_page(vm_paddr_t pa); int vm_phys_find_range(vm_page_t bounds[], int segind, int domain, u_long npages, vm_paddr_t low, vm_paddr_t high); -void vm_phys_free_contig(vm_page_t m, u_long npages); -void vm_phys_free_pages(vm_page_t m, int order); +void vm_phys_free_contig(vm_page_t m, int pool, u_long npages); +void vm_phys_free_pages(vm_page_t m, int pool, int order); void vm_phys_init(void); vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa); vm_page_t vm_phys_seg_paddr_to_vm_page(struct vm_phys_seg *seg, vm_paddr_t pa); Index: sys/vm/vm_phys.c =================================================================== --- sys/vm/vm_phys.c +++ sys/vm/vm_phys.c @@ -187,7 +187,7 @@ static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, - int order, int tail); + int order, int pool, int tail); static bool __diagused vm_phys_pool_valid(int pool) @@ -391,10 +391,12 @@ #endif static void -vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) +vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int pool, + int tail) { m->order = order; + m->pool = pool; if (tail) TAILQ_INSERT_TAIL(&fl[order].pl, m, listq); else @@ -687,7 +689,7 @@ */ static __inline void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order, - int tail) + int pool, int tail) { vm_page_t m_buddy; @@ -697,19 +699,20 @@ KASSERT(m_buddy->order == VM_NFREEORDER, ("vm_phys_split_pages: page %p has unexpected order %d", m_buddy, m_buddy->order)); - vm_freelist_add(fl, m_buddy, oind, tail); + vm_freelist_add(fl, m_buddy, oind, pool, tail); } } static void -vm_phys_enq_chunk(struct vm_freelist *fl, vm_page_t m, int order, int tail) +vm_phys_enq_chunk(struct vm_freelist *fl, vm_page_t m, int order, int pool, + int tail) { KASSERT(order >= 0 && order < VM_NFREEORDER, ("%s: invalid order %d", __func__, order)); - vm_freelist_add(fl, m, order, tail); + vm_freelist_add(fl, m, order, pool, tail); #ifdef VM_FREEPOOL_LAZYINIT - if (__predict_false(m->pool == VM_FREEPOOL_LAZYINIT)) { + if (__predict_false(pool == VM_FREEPOOL_LAZYINIT)) { vm_page_t m_next; vm_paddr_t pa; int npages; @@ -738,7 +741,8 @@ * The physical page m's buddy must not be free. */ static void -vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) +vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool, + int tail) { int order; @@ -754,7 +758,7 @@ order = ilog2(npages); KASSERT(order < VM_NFREEORDER, ("%s: order %d is out of range", __func__, order)); - vm_phys_enq_chunk(fl, m, order, tail); + vm_phys_enq_chunk(fl, m, order, pool, tail); m += 1 << order; npages -= 1 << order; } @@ -774,7 +778,8 @@ * parameter m. Otherwise, the physical page m's buddy must not be free. */ static vm_page_t -vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) +vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool, + int tail) { int order; @@ -788,7 +793,7 @@ ("vm_phys_enq_range: page %p has unexpected order %d", m, m->order)); order = ffs(npages) - 1; - vm_phys_enq_chunk(fl, m, order, tail); + vm_phys_enq_chunk(fl, m, order, pool, tail); m += 1 << order; npages -= 1 << order; } @@ -796,33 +801,30 @@ } /* - * Set the pool for a contiguous, power of two-sized set of physical pages. + * Complete initialization a contiguous, power of two-sized set of physical + * pages. * * If the pages currently belong to the lazy init pool, then the corresponding * page structures must be initialized. In this case it is assumed that the * first page in the run has already been initialized. */ static void -vm_phys_set_pool(int pool, vm_page_t m, int order) +vm_phys_finish_init(vm_page_t m, int order) { #ifdef VM_FREEPOOL_LAZYINIT if (__predict_false(m->pool == VM_FREEPOOL_LAZYINIT)) { vm_paddr_t pa; int segind; - m->pool = pool; - TSENTER(); pa = m->phys_addr + PAGE_SIZE; segind = m->segind; for (vm_page_t m_tmp = m + 1; m_tmp < &m[1 << order]; m_tmp++, pa += PAGE_SIZE) - vm_page_init_page(m_tmp, pa, segind, pool); + vm_page_init_page(m_tmp, pa, segind, VM_NFREEPOOL); TSEXIT(); - } else + } #endif - for (vm_page_t m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) - m_tmp->pool = pool; } /* @@ -833,7 +835,8 @@ * The returned pages may not be physically contiguous. However, in contrast * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0), * calling this function once to allocate the desired number of pages will - * avoid wasted time in vm_phys_split_pages(). + * avoid wasted time in vm_phys_split_pages(). The allocated pages have no + * valid pool field set. * * The free page queues for the specified domain must be locked. */ @@ -869,7 +872,8 @@ * Return excess pages to fl. Its order * [0, oind) queues are empty. */ - vm_phys_enq_range(m, avail - i, fl, 1); + vm_phys_enq_range(m, avail - i, fl, + pool, 1); return (npages); } } @@ -881,7 +885,7 @@ while ((m = TAILQ_FIRST(&alt[oind].pl)) != NULL) { vm_freelist_rem(alt, m, oind); - vm_phys_set_pool(pool, m, oind); + vm_phys_finish_init(m, oind); avail = i + (1 << oind); end = imin(npages, avail); while (i < end) @@ -893,7 +897,7 @@ * are empty. */ vm_phys_enq_range(m, avail - i, - fl, 1); + fl, pool, 1); return (npages); } } @@ -940,7 +944,7 @@ if (m != NULL) { vm_freelist_rem(fl, m, oind); /* The order [order, oind) queues are empty. */ - vm_phys_split_pages(m, oind, fl, order, 1); + vm_phys_split_pages(m, oind, fl, order, pool, 1); return (m); } } @@ -957,9 +961,9 @@ m = TAILQ_FIRST(&alt[oind].pl); if (m != NULL) { vm_freelist_rem(alt, m, oind); - vm_phys_set_pool(pool, m, oind); + vm_phys_finish_init(m, oind); /* The order [order, oind) queues are empty. */ - vm_phys_split_pages(m, oind, fl, order, 1); + vm_phys_split_pages(m, oind, fl, order, pool, 1); return (m); } } @@ -1198,11 +1202,12 @@ /* * Free a contiguous, power of two-sized set of physical pages. + * The pool field in the first page determines the destination pool. * * The free page queues must be locked. */ void -vm_phys_free_pages(vm_page_t m, int order) +vm_phys_free_pages(vm_page_t m, int pool, int order) { struct vm_freelist *fl; struct vm_phys_seg *seg; @@ -1210,13 +1215,12 @@ vm_page_t m_buddy; KASSERT(m->order == VM_NFREEORDER, - ("vm_phys_free_pages: page %p has unexpected order %d", - m, m->order)); - KASSERT(vm_phys_pool_valid(m->pool), - ("vm_phys_free_pages: page %p has unexpected pool %d", - m, m->pool)); + ("%s: page %p has unexpected order %d", + __func__, m, m->order)); + KASSERT(vm_phys_pool_valid(pool), + ("%s: unexpected pool param %d", __func__, pool)); KASSERT(order < VM_NFREEORDER, - ("vm_phys_free_pages: order %d is out of range", order)); + ("%s: order %d is out of range", __func__, order)); seg = &vm_phys_segs[m->segind]; vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); if (order < VM_NFREEORDER - 1) { @@ -1230,15 +1234,14 @@ break; fl = (*seg->free_queues)[m_buddy->pool]; vm_freelist_rem(fl, m_buddy, order); - if (m_buddy->pool != m->pool) - vm_phys_set_pool(m->pool, m_buddy, order); + vm_phys_finish_init(m_buddy, order); order++; pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); m = vm_phys_seg_paddr_to_vm_page(seg, pa); } while (order < VM_NFREEORDER - 1); } - fl = (*seg->free_queues)[m->pool]; - vm_freelist_add(fl, m, order, 1); + fl = (*seg->free_queues)[pool]; + vm_freelist_add(fl, m, order, pool, 1); } #ifdef VM_FREEPOOL_LAZYINIT @@ -1290,12 +1293,13 @@ VM_ALLOC_NORMAL, 1 << oind); if (unlocked) vm_domain_free_unlock(vmd); - vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind); + vm_phys_finish_init(m, oind); if (unlocked) { vm_domain_freecnt_inc(vmd, 1 << oind); vm_domain_free_lock(vmd); } - vm_phys_free_pages(m, oind); + vm_phys_free_pages(m, VM_FREEPOOL_DEFAULT, + oind); } } } @@ -1344,12 +1348,12 @@ /* * Free a contiguous, arbitrarily sized set of physical pages, without - * merging across set boundaries. + * merging across set boundaries. Assumes no pages have a valid pool field. * * The free page queues must be locked. */ void -vm_phys_enqueue_contig(vm_page_t m, u_long npages) +vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages) { struct vm_freelist *fl; struct vm_phys_seg *seg; @@ -1363,14 +1367,15 @@ */ vm_domain_free_assert_locked(vm_pagequeue_domain(m)); seg = &vm_phys_segs[m->segind]; - fl = (*seg->free_queues)[m->pool]; + fl = (*seg->free_queues)[pool]; m_end = m + npages; /* Free blocks of increasing size. */ lo = atop(VM_PAGE_TO_PHYS(m)); if (m < m_end && (diff = lo ^ (lo + npages - 1)) != 0) { order = min(ilog2(diff), VM_NFREEORDER - 1); - m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, 1); + m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, + pool, 1); } /* Free blocks of maximum size. */ @@ -1379,20 +1384,22 @@ KASSERT(seg == &vm_phys_segs[m->segind], ("%s: page range [%p,%p) spans multiple segments", __func__, m_end - npages, m)); - vm_phys_enq_chunk(fl, m, order, 1); + vm_phys_enq_chunk(fl, m, order, pool, 1); m += 1 << order; } /* Free blocks of diminishing size. */ - vm_phys_enq_beg(m, m_end - m, fl, 1); + vm_phys_enq_beg(m, m_end - m, fl, pool, 1); } /* * Free a contiguous, arbitrarily sized set of physical pages. + * Assumes that every page but the first has no valid pool field. + * Uses the pool value in the first page if valid, otherwise default. * * The free page queues must be locked. */ void -vm_phys_free_contig(vm_page_t m, u_long npages) +vm_phys_free_contig(vm_page_t m, int pool, u_long npages) { vm_paddr_t lo; vm_page_t m_start, m_end; @@ -1416,11 +1423,11 @@ * end of the range last. */ if (m_start < m_end) - vm_phys_enqueue_contig(m_start, m_end - m_start); + vm_phys_enqueue_contig(m_start, pool, m_end - m_start); if (order_start < max_order) - vm_phys_free_pages(m, order_start); + vm_phys_free_pages(m, pool, order_start); if (order_end < max_order) - vm_phys_free_pages(m_end, order_end); + vm_phys_free_pages(m_end, pool, order_end); } /* @@ -1474,15 +1481,15 @@ struct vm_phys_seg *seg; vm_paddr_t pa_half; vm_page_t m, m_set, m_tmp; - int order; + int order, pool; seg = vm_phys_paddr_to_seg(pa); vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); +#ifdef VM_FREEPOOL_LAZYINIT /* * The pages on the free lists must be initialized. */ -#ifdef VM_FREEPOOL_LAZYINIT vm_phys_lazy_init_domain(seg->domain, true); #endif @@ -1515,7 +1522,8 @@ * is larger than a page, shrink "m_set" by returning the half * of "m_set" that does not contain "m" to the free lists. */ - fl = (*seg->free_queues)[m_set->pool]; + pool = m_set->pool; + fl = (*seg->free_queues)[pool]; order = m_set->order; vm_freelist_rem(fl, m_set, order); while (order > 0) { @@ -1527,7 +1535,7 @@ m_tmp = m_set; m_set = vm_phys_seg_paddr_to_vm_page(seg, pa_half); } - vm_freelist_add(fl, m_tmp, order, 0); + vm_freelist_add(fl, m_tmp, order, pool, 0); } KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); return (true); @@ -1668,7 +1676,8 @@ * alignment of the first physical page in the set. If the given value * "boundary" is non-zero, then the set of physical pages cannot cross * any physical address boundary that is a multiple of that value. Both - * "alignment" and "boundary" must be a power of two. + * "alignment" and "boundary" must be a power of two. Sets the pool + * field to DEFAULT in the first allocated page. */ vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, @@ -1727,12 +1736,12 @@ fl = (*queues)[m->pool]; oind = m->order; vm_freelist_rem(fl, m, oind); - if (m->pool != VM_FREEPOOL_DEFAULT) - vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind); + vm_phys_finish_init(m, oind); } /* Return excess pages to the free lists. */ fl = (*queues)[VM_FREEPOOL_DEFAULT]; - vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0); + vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, + VM_FREEPOOL_DEFAULT, 0); /* Return page verified to satisfy conditions of request. */ pa_start = VM_PAGE_TO_PHYS(m_run); Index: sys/vm/vm_reserv.c =================================================================== --- sys/vm/vm_reserv.c +++ sys/vm/vm_reserv.c @@ -480,7 +480,7 @@ if (rv->popcnt == 0) { vm_reserv_remove(rv); vm_domain_free_lock(vmd); - vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER); + vm_phys_free_pages(rv->pages, rv->pages->pool, VM_LEVEL_0_ORDER); vm_domain_free_unlock(vmd); counter_u64_add(vm_reserv_freed, 1); } @@ -943,7 +943,7 @@ vm_reserv_break(vm_reserv_t rv) { vm_page_t m; - int hi, lo, pos; + int pool, pos, pos0, pos1; vm_reserv_assert_locked(rv); CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", @@ -954,23 +954,27 @@ for (; m < rv->pages + VM_LEVEL_0_NPAGES; m += VM_SUBLEVEL_0_NPAGES) #endif m->psind = 0; - hi = lo = -1; - pos = 0; - for (;;) { - bit_ff_at(rv->popmap, pos, VM_LEVEL_0_NPAGES, lo != hi, &pos); - if (lo == hi) { - if (pos == -1) - break; - lo = pos; - continue; - } + pool = rv->pages->pool; + pos0 = bit_test(rv->popmap, 0) ? -1 : 0; + pos1 = -1 - pos0; + for (pos = 0; pos < VM_LEVEL_0_NPAGES; ) { + /* Find the first different bit after pos. */ + bit_ff_at(rv->popmap, pos + 1, VM_LEVEL_0_NPAGES, + pos1 < pos0, &pos); if (pos == -1) pos = VM_LEVEL_0_NPAGES; - hi = pos; + if (pos0 < pos1) { + /* Set pool for pages from pos1 to pos. */ + pos0 = pos1; + while (pos0 < pos) + rv->pages[pos0++].pool = pool; + continue; + } + /* Free unused pages from pos0 to pos. */ + pos1 = pos; vm_domain_free_lock(VM_DOMAIN(rv->domain)); - vm_phys_enqueue_contig(&rv->pages[lo], hi - lo); + vm_phys_enqueue_contig(&rv->pages[pos0], pool, pos1 - pos0); vm_domain_free_unlock(VM_DOMAIN(rv->domain)); - lo = hi; } bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1); rv->popcnt = 0;