diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 22776e2196b0..6343fb66cfa3 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -1,1056 +1,1058 @@
 /*-
  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
  *
  * Copyright (c) 1991, 1993
  *	The Regents of the University of California.  All rights reserved.
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. Neither the name of the University nor the names of its contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  * All rights reserved.
  *
  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
  *
  * Permission to use, copy, modify and distribute this software and
  * its documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
  *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  *
  * Carnegie Mellon requests users of this software to return to
  *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
  *
  * any improvements or extensions that they make and grant Carnegie the
  * rights to redistribute these changes.
  */
 
 /*
  *	Kernel memory management.
  */
 
 #include <sys/cdefs.h>
 #include "opt_vm.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/asan.h>
 #include <sys/domainset.h>
 #include <sys/eventhandler.h>
 #include <sys/kernel.h>
 #include <sys/lock.h>
 #include <sys/malloc.h>
 #include <sys/msan.h>
 #include <sys/proc.h>
 #include <sys/rwlock.h>
 #include <sys/smp.h>
 #include <sys/sysctl.h>
 #include <sys/vmem.h>
 #include <sys/vmmeter.h>
 
 #include <vm/vm.h>
 #include <vm/vm_param.h>
 #include <vm/vm_domainset.h>
 #include <vm/vm_kern.h>
 #include <vm/pmap.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
 #include <vm/vm_pageout.h>
 #include <vm/vm_pagequeue.h>
 #include <vm/vm_phys.h>
 #include <vm/vm_radix.h>
 #include <vm/vm_extern.h>
 #include <vm/uma.h>
 
 struct vm_map kernel_map_store;
 struct vm_map exec_map_store;
 struct vm_map pipe_map_store;
 
 const void *zero_region;
 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
 
 /* NB: Used by kernel debuggers. */
 const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS;
 
 u_int exec_map_entry_size;
 u_int exec_map_entries;
 
 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
     SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
 
 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
 #if defined(__arm__)
     &vm_max_kernel_address, 0,
 #else
     SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
 #endif
     "Max kernel address");
 
 #if VM_NRESERVLEVEL > 1
 #define	KVA_QUANTUM_SHIFT	(VM_LEVEL_1_ORDER + VM_LEVEL_0_ORDER + \
     PAGE_SHIFT)
 #elif VM_NRESERVLEVEL > 0
 #define	KVA_QUANTUM_SHIFT	(VM_LEVEL_0_ORDER + PAGE_SHIFT)
 #else
 /* On non-superpage architectures we want large import sizes. */
 #define	KVA_QUANTUM_SHIFT	(8 + PAGE_SHIFT)
 #endif
 #define	KVA_QUANTUM		(1ul << KVA_QUANTUM_SHIFT)
 #define	KVA_NUMA_IMPORT_QUANTUM	(KVA_QUANTUM * 128)
 
 extern void     uma_startup2(void);
 
 /*
  *	kva_alloc:
  *
  *	Allocate a virtual address range with no underlying object and
  *	no initial mapping to physical memory.  Any mapping from this
  *	range to physical memory must be explicitly created prior to
  *	its use, typically with pmap_qenter().  Any attempt to create
  *	a mapping on demand through vm_fault() will result in a panic. 
  */
 vm_offset_t
 kva_alloc(vm_size_t size)
 {
 	vm_offset_t addr;
 
 	TSENTER();
 	size = round_page(size);
 	if (vmem_xalloc(kernel_arena, size, 0, 0, 0, VMEM_ADDR_MIN,
 	    VMEM_ADDR_MAX, M_BESTFIT | M_NOWAIT, &addr))
 		return (0);
 	TSEXIT();
 
 	return (addr);
 }
 
 /*
  *	kva_alloc_aligned:
  *
  *	Allocate a virtual address range as in kva_alloc where the base
  *	address is aligned to align.
  */
 vm_offset_t
 kva_alloc_aligned(vm_size_t size, vm_size_t align)
 {
 	vm_offset_t addr;
 
 	TSENTER();
 	size = round_page(size);
 	if (vmem_xalloc(kernel_arena, size, align, 0, 0, VMEM_ADDR_MIN,
 	    VMEM_ADDR_MAX, M_BESTFIT | M_NOWAIT, &addr))
 		return (0);
 	TSEXIT();
 
 	return (addr);
 }
 
 /*
  *	kva_free:
  *
  *	Release a region of kernel virtual memory allocated
  *	with kva_alloc, and return the physical pages
  *	associated with that region.
  *
  *	This routine may not block on kernel maps.
  */
 void
 kva_free(vm_offset_t addr, vm_size_t size)
 {
 
 	size = round_page(size);
 	vmem_xfree(kernel_arena, addr, size);
 }
 
 /*
  * Update sanitizer shadow state to reflect a new allocation.  Force inlining to
  * help make KMSAN origin tracking more precise.
  */
 static __always_inline void
 kmem_alloc_san(vm_offset_t addr, vm_size_t size, vm_size_t asize, int flags)
 {
 	if ((flags & M_ZERO) == 0) {
 		kmsan_mark((void *)addr, asize, KMSAN_STATE_UNINIT);
 		kmsan_orig((void *)addr, asize, KMSAN_TYPE_KMEM,
 		    KMSAN_RET_ADDR);
 	} else {
 		kmsan_mark((void *)addr, asize, KMSAN_STATE_INITED);
 	}
 	kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE);
 }
 
 static vm_page_t
 kmem_alloc_contig_pages(vm_object_t object, vm_pindex_t pindex, int domain,
     int pflags, u_long npages, vm_paddr_t low, vm_paddr_t high,
     u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
 {
 	vm_page_t m;
 	int tries;
 	bool wait, reclaim;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	wait = (pflags & VM_ALLOC_WAITOK) != 0;
 	reclaim = (pflags & VM_ALLOC_NORECLAIM) == 0;
 	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
 	pflags |= VM_ALLOC_NOWAIT;
 	for (tries = wait ? 3 : 1;; tries--) {
 		m = vm_page_alloc_contig_domain(object, pindex, domain, pflags,
 		    npages, low, high, alignment, boundary, memattr);
 		if (m != NULL || tries == 0 || !reclaim)
 			break;
 
 		VM_OBJECT_WUNLOCK(object);
 		if (vm_page_reclaim_contig_domain(domain, pflags, npages,
 		    low, high, alignment, boundary) == ENOMEM && wait)
 			vm_wait_domain(domain);
 		VM_OBJECT_WLOCK(object);
 	}
 	return (m);
 }
 
 /*
  *	Allocates a region from the kernel address map and physical pages
  *	within the specified address range to the kernel object.  Creates a
  *	wired mapping from this region to these pages, and returns the
  *	region's starting virtual address.  The allocated pages are not
  *	necessarily physically contiguous.  If M_ZERO is specified through the
  *	given flags, then the pages are zeroed before they are mapped.
  */
 static void *
 kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
     vm_paddr_t high, vm_memattr_t memattr)
 {
 	vmem_t *vmem;
 	vm_object_t object;
 	vm_offset_t addr, i, offset;
 	vm_page_t m;
 	vm_size_t asize;
 	int pflags;
 	vm_prot_t prot;
 
 	object = kernel_object;
 	asize = round_page(size);
 	vmem = vm_dom[domain].vmd_kernel_arena;
 	if (vmem_alloc(vmem, asize, M_BESTFIT | flags, &addr))
 		return (0);
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
 	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
 	prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
 	VM_OBJECT_WLOCK(object);
 	for (i = 0; i < asize; i += PAGE_SIZE) {
 		m = kmem_alloc_contig_pages(object, atop(offset + i),
 		    domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr);
 		if (m == NULL) {
 			VM_OBJECT_WUNLOCK(object);
 			kmem_unback(object, addr, i);
 			vmem_free(vmem, addr, asize);
 			return (0);
 		}
 		KASSERT(vm_page_domain(m) == domain,
 		    ("kmem_alloc_attr_domain: Domain mismatch %d != %d",
 		    vm_page_domain(m), domain));
 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
 		vm_page_valid(m);
 		pmap_enter(kernel_pmap, addr + i, m, prot,
 		    prot | PMAP_ENTER_WIRED, 0);
 	}
 	VM_OBJECT_WUNLOCK(object);
 	kmem_alloc_san(addr, size, asize, flags);
 	return ((void *)addr);
 }
 
 void *
 kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
     vm_memattr_t memattr)
 {
 
 	return (kmem_alloc_attr_domainset(DOMAINSET_RR(), size, flags, low,
 	    high, memattr));
 }
 
 void *
 kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags,
     vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
 {
 	struct vm_domainset_iter di;
 	vm_page_t bounds[2];
 	void *addr;
 	int domain;
 	int start_segind;
 
 	start_segind = -1;
 
 	vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
 	do {
 		addr = kmem_alloc_attr_domain(domain, size, flags, low, high,
 		    memattr);
 		if (addr != NULL)
 			break;
 		if (start_segind == -1)
 			start_segind = vm_phys_lookup_segind(low);
 		if (vm_phys_find_range(bounds, start_segind, domain,
 		    atop(round_page(size)), low, high) == -1) {
 			vm_domainset_iter_ignore(&di, domain);
 		}
 	} while (vm_domainset_iter_policy(&di, &domain) == 0);
 
 	return (addr);
 }
 
 /*
  *	Allocates a region from the kernel address map and physically
  *	contiguous pages within the specified address range to the kernel
  *	object.  Creates a wired mapping from this region to these pages, and
  *	returns the region's starting virtual address.  If M_ZERO is specified
  *	through the given flags, then the pages are zeroed before they are
  *	mapped.
  */
 static void *
 kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     vm_memattr_t memattr)
 {
 	vmem_t *vmem;
 	vm_object_t object;
 	vm_offset_t addr, offset, tmp;
 	vm_page_t end_m, m;
 	vm_size_t asize;
 	u_long npages;
 	int pflags;
 
 	object = kernel_object;
 	asize = round_page(size);
 	vmem = vm_dom[domain].vmd_kernel_arena;
 	if (vmem_alloc(vmem, asize, flags | M_BESTFIT, &addr))
 		return (NULL);
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
 	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
 	npages = atop(asize);
 	VM_OBJECT_WLOCK(object);
 	m = kmem_alloc_contig_pages(object, atop(offset), domain,
 	    pflags, npages, low, high, alignment, boundary, memattr);
 	if (m == NULL) {
 		VM_OBJECT_WUNLOCK(object);
 		vmem_free(vmem, addr, asize);
 		return (NULL);
 	}
 	KASSERT(vm_page_domain(m) == domain,
 	    ("kmem_alloc_contig_domain: Domain mismatch %d != %d",
 	    vm_page_domain(m), domain));
 	end_m = m + npages;
 	tmp = addr;
 	for (; m < end_m; m++) {
 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
 		vm_page_valid(m);
 		pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW,
 		    VM_PROT_RW | PMAP_ENTER_WIRED, 0);
 		tmp += PAGE_SIZE;
 	}
 	VM_OBJECT_WUNLOCK(object);
 	kmem_alloc_san(addr, size, asize, flags);
 	return ((void *)addr);
 }
 
 void *
 kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
     u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
 {
 
 	return (kmem_alloc_contig_domainset(DOMAINSET_RR(), size, flags, low,
 	    high, alignment, boundary, memattr));
 }
 
 void *
 kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     vm_memattr_t memattr)
 {
 	struct vm_domainset_iter di;
 	vm_page_t bounds[2];
 	void *addr;
 	int domain;
 	int start_segind;
 
 	start_segind = -1;
 
 	vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
 	do {
 		addr = kmem_alloc_contig_domain(domain, size, flags, low, high,
 		    alignment, boundary, memattr);
 		if (addr != NULL)
 			break;
 		if (start_segind == -1)
 			start_segind = vm_phys_lookup_segind(low);
 		if (vm_phys_find_range(bounds, start_segind, domain,
 		    atop(round_page(size)), low, high) == -1) {
 			vm_domainset_iter_ignore(&di, domain);
 		}
 	} while (vm_domainset_iter_policy(&di, &domain) == 0);
 
 	return (addr);
 }
 
 /*
  *	kmem_subinit:
  *
  *	Initializes a map to manage a subrange
  *	of the kernel virtual address space.
  *
  *	Arguments are as follows:
  *
  *	parent		Map to take range from
  *	min, max	Returned endpoints of map
  *	size		Size of range to find
  *	superpage_align	Request that min is superpage aligned
  */
 void
 kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
     vm_size_t size, bool superpage_align)
 {
 	int ret;
 
 	size = round_page(size);
 
 	*min = vm_map_min(parent);
 	ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ?
 	    VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
 	    MAP_ACC_NO_CHARGE);
 	if (ret != KERN_SUCCESS)
 		panic("kmem_subinit: bad status return of %d", ret);
 	*max = *min + size;
 	vm_map_init(map, vm_map_pmap(parent), *min, *max);
 	if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS)
 		panic("kmem_subinit: unable to change range to submap");
 }
 
 /*
  *	kmem_malloc_domain:
  *
  *	Allocate wired-down pages in the kernel's address space.
  */
 static void *
 kmem_malloc_domain(int domain, vm_size_t size, int flags)
 {
 	vmem_t *arena;
 	vm_offset_t addr;
 	vm_size_t asize;
 	int rv;
 
 	if (__predict_true((flags & (M_EXEC | M_NEVERFREED)) == 0))
 		arena = vm_dom[domain].vmd_kernel_arena;
 	else if ((flags & M_EXEC) != 0)
 		arena = vm_dom[domain].vmd_kernel_rwx_arena;
 	else
 		arena = vm_dom[domain].vmd_kernel_nofree_arena;
 	asize = round_page(size);
 	if (vmem_alloc(arena, asize, flags | M_BESTFIT, &addr))
 		return (0);
 
 	rv = kmem_back_domain(domain, kernel_object, addr, asize, flags);
 	if (rv != KERN_SUCCESS) {
 		vmem_free(arena, addr, asize);
 		return (0);
 	}
 	kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE);
 	return ((void *)addr);
 }
 
 void *
 kmem_malloc(vm_size_t size, int flags)
 {
 	void * p;
 
 	TSENTER();
 	p = kmem_malloc_domainset(DOMAINSET_RR(), size, flags);
 	TSEXIT();
 	return (p);
 }
 
 void *
 kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags)
 {
 	struct vm_domainset_iter di;
 	void *addr;
 	int domain;
 
 	vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
 	do {
 		addr = kmem_malloc_domain(domain, size, flags);
 		if (addr != NULL)
 			break;
 	} while (vm_domainset_iter_policy(&di, &domain) == 0);
 
 	return (addr);
 }
 
 /*
  *	kmem_back_domain:
  *
  *	Allocate physical pages from the specified domain for the specified
  *	virtual address range.
  */
 int
 kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
     vm_size_t size, int flags)
 {
 	vm_offset_t offset, i;
 	vm_page_t m, mpred;
 	vm_prot_t prot;
 	int pflags;
 
 	KASSERT(object == kernel_object,
 	    ("kmem_back_domain: only supports kernel object."));
 
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
 	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
 	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
 	if (flags & M_WAITOK)
 		pflags |= VM_ALLOC_WAITFAIL;
 	prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
 
 	i = 0;
 	VM_OBJECT_WLOCK(object);
 retry:
 	mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
 	for (; i < size; i += PAGE_SIZE, mpred = m) {
 		m = vm_page_alloc_domain_after(object, atop(offset + i),
 		    domain, pflags, mpred);
 
 		/*
 		 * Ran out of space, free everything up and return. Don't need
 		 * to lock page queues here as we know that the pages we got
 		 * aren't on any queues.
 		 */
 		if (m == NULL) {
 			if ((flags & M_NOWAIT) == 0)
 				goto retry;
 			VM_OBJECT_WUNLOCK(object);
 			kmem_unback(object, addr, i);
 			return (KERN_NO_SPACE);
 		}
 		KASSERT(vm_page_domain(m) == domain,
 		    ("kmem_back_domain: Domain mismatch %d != %d",
 		    vm_page_domain(m), domain));
 		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
 		    ("kmem_malloc: page %p is managed", m));
 		vm_page_valid(m);
 		pmap_enter(kernel_pmap, addr + i, m, prot,
 		    prot | PMAP_ENTER_WIRED, 0);
 		if (__predict_false((prot & VM_PROT_EXECUTE) != 0))
 			m->oflags |= VPO_KMEM_EXEC;
 	}
 	VM_OBJECT_WUNLOCK(object);
 	kmem_alloc_san(addr, size, size, flags);
 	return (KERN_SUCCESS);
 }
 
 /*
  *	kmem_back:
  *
  *	Allocate physical pages for the specified virtual address range.
  */
 int
 kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
 {
 	vm_offset_t end, next, start;
 	int domain, rv;
 
 	KASSERT(object == kernel_object,
 	    ("kmem_back: only supports kernel object."));
 
 	for (start = addr, end = addr + size; addr < end; addr = next) {
 		/*
 		 * We must ensure that pages backing a given large virtual page
 		 * all come from the same physical domain.
 		 */
 		if (vm_ndomains > 1) {
 			domain = (addr >> KVA_QUANTUM_SHIFT) % vm_ndomains;
 			while (VM_DOMAIN_EMPTY(domain))
 				domain++;
 			next = roundup2(addr + 1, KVA_QUANTUM);
 			if (next > end || next < start)
 				next = end;
 		} else {
 			domain = 0;
 			next = end;
 		}
 		rv = kmem_back_domain(domain, object, addr, next - addr, flags);
 		if (rv != KERN_SUCCESS) {
 			kmem_unback(object, start, addr - start);
 			break;
 		}
 	}
 	return (rv);
 }
 
 /*
  *	kmem_unback:
  *
  *	Unmap and free the physical pages underlying the specified virtual
  *	address range.
  *
  *	A physical page must exist within the specified object at each index
  *	that is being unmapped.
  */
 static struct vmem *
 _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
 {
+	struct pctrie_iter pages;
 	struct vmem *arena;
-	vm_page_t m, next;
+	vm_page_t m;
 	vm_offset_t end, offset;
 	int domain;
 
 	KASSERT(object == kernel_object,
 	    ("kmem_unback: only supports kernel object."));
 
 	if (size == 0)
 		return (NULL);
 	pmap_remove(kernel_pmap, addr, addr + size);
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
 	end = offset + size;
 	VM_OBJECT_WLOCK(object);
-	m = vm_page_lookup(object, atop(offset)); 
+	vm_page_iter_init(&pages, object);
+	m = vm_page_iter_lookup(&pages, atop(offset)); 
 	domain = vm_page_domain(m);
 	if (__predict_true((m->oflags & VPO_KMEM_EXEC) == 0))
 		arena = vm_dom[domain].vmd_kernel_arena;
 	else
 		arena = vm_dom[domain].vmd_kernel_rwx_arena;
-	for (; offset < end; offset += PAGE_SIZE, m = next) {
-		next = vm_page_next(m);
+	for (; offset < end; offset += PAGE_SIZE,
+	    m = vm_page_iter_lookup(&pages, atop(offset))) {
 		vm_page_xbusy_claim(m);
 		vm_page_unwire_noq(m);
-		vm_page_free(m);
+		vm_page_iter_free(&pages);
 	}
 	VM_OBJECT_WUNLOCK(object);
 
 	return (arena);
 }
 
 void
 kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
 {
 
 	(void)_kmem_unback(object, addr, size);
 }
 
 /*
  *	kmem_free:
  *
  *	Free memory allocated with kmem_malloc.  The size must match the
  *	original allocation.
  */
 void
 kmem_free(void *addr, vm_size_t size)
 {
 	struct vmem *arena;
 
 	size = round_page(size);
 	kasan_mark(addr, size, size, 0);
 	arena = _kmem_unback(kernel_object, (uintptr_t)addr, size);
 	if (arena != NULL)
 		vmem_free(arena, (uintptr_t)addr, size);
 }
 
 /*
  *	kmap_alloc_wait:
  *
  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
  *	has no room, the caller sleeps waiting for more memory in the submap.
  *
  *	This routine may block.
  */
 vm_offset_t
 kmap_alloc_wait(vm_map_t map, vm_size_t size)
 {
 	vm_offset_t addr;
 
 	size = round_page(size);
 	if (!swap_reserve(size))
 		return (0);
 
 	for (;;) {
 		/*
 		 * To make this work for more than one map, use the map's lock
 		 * to lock out sleepers/wakers.
 		 */
 		vm_map_lock(map);
 		addr = vm_map_findspace(map, vm_map_min(map), size);
 		if (addr + size <= vm_map_max(map))
 			break;
 		/* no space now; see if we can ever get space */
 		if (vm_map_max(map) - vm_map_min(map) < size) {
 			vm_map_unlock(map);
 			swap_release(size);
 			return (0);
 		}
 		map->needs_wakeup = TRUE;
 		vm_map_unlock_and_wait(map, 0);
 	}
 	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_RW, VM_PROT_RW,
 	    MAP_ACC_CHARGED);
 	vm_map_unlock(map);
 	return (addr);
 }
 
 /*
  *	kmap_free_wakeup:
  *
  *	Returns memory to a submap of the kernel, and wakes up any processes
  *	waiting for memory in that map.
  */
 void
 kmap_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
 {
 
 	vm_map_lock(map);
 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
 	if (map->needs_wakeup) {
 		map->needs_wakeup = FALSE;
 		vm_map_wakeup(map);
 	}
 	vm_map_unlock(map);
 }
 
 void
 kmem_init_zero_region(void)
 {
 	vm_offset_t addr, i;
 	vm_page_t m;
 
 	/*
 	 * Map a single physical page of zeros to a larger virtual range.
 	 * This requires less looping in places that want large amounts of
 	 * zeros, while not using much more physical resources.
 	 */
 	addr = kva_alloc(ZERO_REGION_SIZE);
 	m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO |
 	    VM_ALLOC_NOFREE);
 	for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
 		pmap_qenter(addr + i, &m, 1);
 	pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);
 
 	zero_region = (const void *)addr;
 }
 
 /*
  * Import KVA from the kernel map into the kernel arena.
  */
 static int
 kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
 {
 	vm_offset_t addr;
 	int result;
 
 	TSENTER();
 	KASSERT((size % KVA_QUANTUM) == 0,
 	    ("kva_import: Size %jd is not a multiple of %d",
 	    (intmax_t)size, (int)KVA_QUANTUM));
 	addr = vm_map_min(kernel_map);
 	result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0,
 	    VMFS_SUPER_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
 	if (result != KERN_SUCCESS) {
 		TSEXIT();
                 return (ENOMEM);
 	}
 
 	*addrp = addr;
 
 	TSEXIT();
 	return (0);
 }
 
 /*
  * Import KVA from a parent arena into a per-domain arena.  Imports must be
  * KVA_QUANTUM-aligned and a multiple of KVA_QUANTUM in size.
  */
 static int
 kva_import_domain(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp)
 {
 
 	KASSERT((size % KVA_QUANTUM) == 0,
 	    ("kva_import_domain: Size %jd is not a multiple of %d",
 	    (intmax_t)size, (int)KVA_QUANTUM));
 	return (vmem_xalloc(arena, size, KVA_QUANTUM, 0, 0, VMEM_ADDR_MIN,
 	    VMEM_ADDR_MAX, flags, addrp));
 }
 
 /*
  * 	kmem_init:
  *
  *	Create the kernel map; insert a mapping covering kernel text, 
  *	data, bss, and all space allocated thus far (`boostrap' data).  The 
  *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 
  *	`start' as allocated, and the range between `start' and `end' as free.
  *	Create the kernel vmem arena and its per-domain children.
  */
 void
 kmem_init(vm_offset_t start, vm_offset_t end)
 {
 	vm_size_t quantum;
 	int domain;
 
 	vm_map_init(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
 	kernel_map->system_map = 1;
 	vm_map_lock(kernel_map);
 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
 	(void)vm_map_insert(kernel_map, NULL, 0,
 #ifdef __amd64__
 	    KERNBASE,
 #else		     
 	    VM_MIN_KERNEL_ADDRESS,
 #endif
 	    start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
 	/* ... and ending with the completion of the above `insert' */
 
 #ifdef __amd64__
 	/*
 	 * Mark KVA used for the page array as allocated.  Other platforms
 	 * that handle vm_page_array allocation can simply adjust virtual_avail
 	 * instead.
 	 */
 	(void)vm_map_insert(kernel_map, NULL, 0, (vm_offset_t)vm_page_array,
 	    (vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size *
 	    sizeof(struct vm_page)),
 	    VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
 #endif
 	vm_map_unlock(kernel_map);
 
 	/*
 	 * Use a large import quantum on NUMA systems.  This helps minimize
 	 * interleaving of superpages, reducing internal fragmentation within
 	 * the per-domain arenas.
 	 */
 	if (vm_ndomains > 1 && PMAP_HAS_DMAP)
 		quantum = KVA_NUMA_IMPORT_QUANTUM;
 	else
 		quantum = KVA_QUANTUM;
 
 	/*
 	 * Initialize the kernel_arena.  This can grow on demand.
 	 */
 	vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
 	vmem_set_import(kernel_arena, kva_import, NULL, NULL, quantum);
 
 	for (domain = 0; domain < vm_ndomains; domain++) {
 		/*
 		 * Initialize the per-domain arenas.  These are used to color
 		 * the KVA space in a way that ensures that virtual large pages
 		 * are backed by memory from the same physical domain,
 		 * maximizing the potential for superpage promotion.
 		 */
 		vm_dom[domain].vmd_kernel_arena = vmem_create(
 		    "kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
 		vmem_set_import(vm_dom[domain].vmd_kernel_arena,
 		    kva_import_domain, NULL, kernel_arena, quantum);
 
 		/*
 		 * In architectures with superpages, maintain separate arenas
 		 * for allocations with permissions that differ from the
 		 * "standard" read/write permissions used for kernel memory
 		 * and pages that are never released, so as not to inhibit
 		 * superpage promotion.
 		 *
 		 * Use the base import quantum since these arenas are rarely
 		 * used.
 		 */
 #if VM_NRESERVLEVEL > 0
 		vm_dom[domain].vmd_kernel_rwx_arena = vmem_create(
 		    "kernel rwx arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
 		vm_dom[domain].vmd_kernel_nofree_arena = vmem_create(
 		    "kernel NOFREE arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
 		vmem_set_import(vm_dom[domain].vmd_kernel_rwx_arena,
 		    kva_import_domain, (vmem_release_t *)vmem_xfree,
 		    kernel_arena, KVA_QUANTUM);
 		vmem_set_import(vm_dom[domain].vmd_kernel_nofree_arena,
 		    kva_import_domain, (vmem_release_t *)vmem_xfree,
 		    kernel_arena, KVA_QUANTUM);
 #else
 		vm_dom[domain].vmd_kernel_rwx_arena =
 		    vm_dom[domain].vmd_kernel_arena;
 		vm_dom[domain].vmd_kernel_nofree_arena =
 		    vm_dom[domain].vmd_kernel_arena;
 #endif
 	}
 
 	/*
 	 * This must be the very first call so that the virtual address
 	 * space used for early allocations is properly marked used in
 	 * the map.
 	 */
 	uma_startup2();
 }
 
 /*
  *	kmem_bootstrap_free:
  *
  *	Free pages backing preloaded data (e.g., kernel modules) to the
  *	system.  Currently only supported on platforms that create a
  *	vm_phys segment for preloaded data.
  */
 void
 kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
 {
 #if defined(__i386__) || defined(__amd64__)
 	struct vm_domain *vmd;
 	vm_offset_t end, va;
 	vm_paddr_t pa;
 	vm_page_t m;
 
 	end = trunc_page(start + size);
 	start = round_page(start);
 
 #ifdef __amd64__
 	/*
 	 * Preloaded files do not have execute permissions by default on amd64.
 	 * Restore the default permissions to ensure that the direct map alias
 	 * is updated.
 	 */
 	pmap_change_prot(start, end - start, VM_PROT_RW);
 #endif
 	for (va = start; va < end; va += PAGE_SIZE) {
 		pa = pmap_kextract(va);
 		m = PHYS_TO_VM_PAGE(pa);
 
 		vmd = vm_pagequeue_domain(m);
 		vm_domain_free_lock(vmd);
 		vm_phys_free_pages(m, 0);
 		vm_domain_free_unlock(vmd);
 
 		vm_domain_freecnt_inc(vmd, 1);
 		vm_cnt.v_page_count++;
 	}
 	pmap_remove(kernel_pmap, start, end);
 	(void)vmem_add(kernel_arena, start, end - start, M_WAITOK);
 #endif
 }
 
 #ifdef PMAP_WANT_ACTIVE_CPUS_NAIVE
 void
 pmap_active_cpus(pmap_t pmap, cpuset_t *res)
 {
 	struct thread *td;
 	struct proc *p;
 	struct vmspace *vm;
 	int c;
 
 	CPU_ZERO(res);
 	CPU_FOREACH(c) {
 		td = cpuid_to_pcpu[c]->pc_curthread;
 		p = td->td_proc;
 		if (p == NULL)
 			continue;
 		vm = vmspace_acquire_ref(p);
 		if (vm == NULL)
 			continue;
 		if (pmap == vmspace_pmap(vm))
 			CPU_SET(c, res);
 		vmspace_free(vm);
 	}
 }
 #endif
 
 /*
  * Allow userspace to directly trigger the VM drain routine for testing
  * purposes.
  */
 static int
 debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
 {
 	int error, i;
 
 	i = 0;
 	error = sysctl_handle_int(oidp, &i, 0, req);
 	if (error != 0)
 		return (error);
 	if ((i & ~(VM_LOW_KMEM | VM_LOW_PAGES)) != 0)
 		return (EINVAL);
 	if (i != 0)
 		EVENTHANDLER_INVOKE(vm_lowmem, i);
 	return (0);
 }
 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem,
     CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, 0, debug_vm_lowmem, "I",
     "set to trigger vm_lowmem event with given flags");
 
 static int
 debug_uma_reclaim(SYSCTL_HANDLER_ARGS)
 {
 	int error, i;
 
 	i = 0;
 	error = sysctl_handle_int(oidp, &i, 0, req);
 	if (error != 0 || req->newptr == NULL)
 		return (error);
 	if (i != UMA_RECLAIM_TRIM && i != UMA_RECLAIM_DRAIN &&
 	    i != UMA_RECLAIM_DRAIN_CPU)
 		return (EINVAL);
 	uma_reclaim(i);
 	return (0);
 }
 SYSCTL_PROC(_debug, OID_AUTO, uma_reclaim,
     CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, 0, debug_uma_reclaim, "I",
     "set to generate request to reclaim uma caches");
 
 static int
 debug_uma_reclaim_domain(SYSCTL_HANDLER_ARGS)
 {
 	int domain, error, request;
 
 	request = 0;
 	error = sysctl_handle_int(oidp, &request, 0, req);
 	if (error != 0 || req->newptr == NULL)
 		return (error);
 
 	domain = request >> 4;
 	request &= 0xf;
 	if (request != UMA_RECLAIM_TRIM && request != UMA_RECLAIM_DRAIN &&
 	    request != UMA_RECLAIM_DRAIN_CPU)
 		return (EINVAL);
 	if (domain < 0 || domain >= vm_ndomains)
 		return (EINVAL);
 	uma_reclaim_domain(request, domain);
 	return (0);
 }
 SYSCTL_PROC(_debug, OID_AUTO, uma_reclaim_domain,
     CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, 0,
     debug_uma_reclaim_domain, "I",
     "");
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index e6324647e29e..21773318cea0 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1,2808 +1,2808 @@
 /*-
  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
  *
  * Copyright (c) 1991, 1993
  *	The Regents of the University of California.  All rights reserved.
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. Neither the name of the University nor the names of its contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  * All rights reserved.
  *
  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
  *
  * Permission to use, copy, modify and distribute this software and
  * its documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
  *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  *
  * Carnegie Mellon requests users of this software to return to
  *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
  *
  * any improvements or extensions that they make and grant Carnegie the
  * rights to redistribute these changes.
  */
 
 /*
  *	Virtual memory object module.
  */
 
 #include "opt_vm.h"
 
 #include <sys/systm.h>
 #include <sys/blockcount.h>
 #include <sys/conf.h>
 #include <sys/cpuset.h>
 #include <sys/ipc.h>
 #include <sys/jail.h>
 #include <sys/limits.h>
 #include <sys/lock.h>
 #include <sys/mman.h>
 #include <sys/mount.h>
 #include <sys/kernel.h>
 #include <sys/mutex.h>
 #include <sys/pctrie.h>
 #include <sys/proc.h>
 #include <sys/refcount.h>
 #include <sys/shm.h>
 #include <sys/sx.h>
 #include <sys/sysctl.h>
 #include <sys/resourcevar.h>
 #include <sys/refcount.h>
 #include <sys/rwlock.h>
 #include <sys/user.h>
 #include <sys/vnode.h>
 #include <sys/vmmeter.h>
 
 #include <vm/vm.h>
 #include <vm/vm_param.h>
 #include <vm/pmap.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
 #include <vm/vm_pageout.h>
 #include <vm/vm_pager.h>
 #include <vm/vm_phys.h>
 #include <vm/vm_pagequeue.h>
 #include <vm/swap_pager.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_radix.h>
 #include <vm/vm_reserv.h>
 #include <vm/uma.h>
 
 static int old_msync;
 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
     "Use old (insecure) msync behavior");
 
 static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
 		    int pagerflags, int flags, boolean_t *allclean,
 		    boolean_t *eio);
 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
 		    boolean_t *allclean);
 static void	vm_object_backing_remove(vm_object_t object);
 
 /*
  *	Virtual memory objects maintain the actual data
  *	associated with allocated virtual memory.  A given
  *	page of memory exists within exactly one object.
  *
  *	An object is only deallocated when all "references"
  *	are given up.  Only one "reference" to a given
  *	region of an object should be writeable.
  *
  *	Associated with each object is a list of all resident
  *	memory pages belonging to that object; this list is
  *	maintained by the "vm_page" module, and locked by the object's
  *	lock.
  *
  *	Each object also records a "pager" routine which is
  *	used to retrieve (and store) pages to the proper backing
  *	storage.  In addition, objects may be backed by other
  *	objects from which they were virtual-copied.
  *
  *	The only items within the object structure which are
  *	modified after time of creation are:
  *		reference count		locked by object's lock
  *		pager routine		locked by object's lock
  *
  */
 
 struct object_q vm_object_list;
 struct mtx vm_object_list_mtx;	/* lock for object list and count */
 
 struct vm_object kernel_object_store;
 
 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
     "VM object stats");
 
 static COUNTER_U64_DEFINE_EARLY(object_collapses);
 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
     &object_collapses,
     "VM object collapses");
 
 static COUNTER_U64_DEFINE_EARLY(object_bypasses);
 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
     &object_bypasses,
     "VM object bypasses");
 
 static COUNTER_U64_DEFINE_EARLY(object_collapse_waits);
 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapse_waits, CTLFLAG_RD,
     &object_collapse_waits,
     "Number of sleeps for collapse");
 
 static uma_zone_t obj_zone;
 
 static int vm_object_zinit(void *mem, int size, int flags);
 
 #ifdef INVARIANTS
 static void vm_object_zdtor(void *mem, int size, void *arg);
 
 static void
 vm_object_zdtor(void *mem, int size, void *arg)
 {
 	vm_object_t object;
 
 	object = (vm_object_t)mem;
 	KASSERT(object->ref_count == 0,
 	    ("object %p ref_count = %d", object, object->ref_count));
 	KASSERT(TAILQ_EMPTY(&object->memq),
 	    ("object %p has resident pages in its memq", object));
 	KASSERT(vm_radix_is_empty(&object->rtree),
 	    ("object %p has resident pages in its trie", object));
 #if VM_NRESERVLEVEL > 0
 	KASSERT(LIST_EMPTY(&object->rvq),
 	    ("object %p has reservations",
 	    object));
 #endif
 	KASSERT(!vm_object_busied(object),
 	    ("object %p busy = %d", object, blockcount_read(&object->busy)));
 	KASSERT(object->resident_page_count == 0,
 	    ("object %p resident_page_count = %d",
 	    object, object->resident_page_count));
 	KASSERT(atomic_load_int(&object->shadow_count) == 0,
 	    ("object %p shadow_count = %d",
 	    object, atomic_load_int(&object->shadow_count)));
 	KASSERT(object->type == OBJT_DEAD,
 	    ("object %p has non-dead type %d",
 	    object, object->type));
 	KASSERT(object->charge == 0 && object->cred == NULL,
 	    ("object %p has non-zero charge %ju (%p)",
 	    object, (uintmax_t)object->charge, object->cred));
 }
 #endif
 
 static int
 vm_object_zinit(void *mem, int size, int flags)
 {
 	vm_object_t object;
 
 	object = (vm_object_t)mem;
 	rw_init_flags(&object->lock, "vmobject", RW_DUPOK | RW_NEW);
 
 	/* These are true for any object that has been freed */
 	object->type = OBJT_DEAD;
 	vm_radix_init(&object->rtree);
 	refcount_init(&object->ref_count, 0);
 	blockcount_init(&object->paging_in_progress);
 	blockcount_init(&object->busy);
 	object->resident_page_count = 0;
 	atomic_store_int(&object->shadow_count, 0);
 	object->flags = OBJ_DEAD;
 
 	mtx_lock(&vm_object_list_mtx);
 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
 	mtx_unlock(&vm_object_list_mtx);
 	return (0);
 }
 
 static void
 _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
     vm_object_t object, void *handle)
 {
 
 	TAILQ_INIT(&object->memq);
 	LIST_INIT(&object->shadow_head);
 
 	object->type = type;
 	object->flags = flags;
 	if ((flags & OBJ_SWAP) != 0) {
 		pctrie_init(&object->un_pager.swp.swp_blks);
 		object->un_pager.swp.writemappings = 0;
 	}
 
 	/*
 	 * Ensure that swap_pager_swapoff() iteration over object_list
 	 * sees up to date type and pctrie head if it observed
 	 * non-dead object.
 	 */
 	atomic_thread_fence_rel();
 
 	object->pg_color = 0;
 	object->size = size;
 	object->domain.dr_policy = NULL;
 	object->generation = 1;
 	object->cleangeneration = 1;
 	refcount_init(&object->ref_count, 1);
 	object->memattr = VM_MEMATTR_DEFAULT;
 	object->cred = NULL;
 	object->charge = 0;
 	object->handle = handle;
 	object->backing_object = NULL;
 	object->backing_object_offset = (vm_ooffset_t) 0;
 #if VM_NRESERVLEVEL > 0
 	LIST_INIT(&object->rvq);
 #endif
 	umtx_shm_object_init(object);
 }
 
 /*
  *	vm_object_init:
  *
  *	Initialize the VM objects module.
  */
 void
 vm_object_init(void)
 {
 	TAILQ_INIT(&vm_object_list);
 	mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
 
 	rw_init(&kernel_object->lock, "kernel vm object");
 	vm_radix_init(&kernel_object->rtree);
 	_vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
 	    VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL);
 #if VM_NRESERVLEVEL > 0
 	kernel_object->flags |= OBJ_COLORED;
 	kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
 #endif
 	kernel_object->un_pager.phys.ops = &default_phys_pg_ops;
 
 	/*
 	 * The lock portion of struct vm_object must be type stable due
 	 * to vm_pageout_fallback_object_lock locking a vm object
 	 * without holding any references to it.
 	 *
 	 * paging_in_progress is valid always.  Lockless references to
 	 * the objects may acquire pip and then check OBJ_DEAD.
 	 */
 	obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
 #ifdef INVARIANTS
 	    vm_object_zdtor,
 #else
 	    NULL,
 #endif
 	    vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
 
 	vm_radix_zinit();
 }
 
 void
 vm_object_clear_flag(vm_object_t object, u_short bits)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	object->flags &= ~bits;
 }
 
 /*
  *	Sets the default memory attribute for the specified object.  Pages
  *	that are allocated to this object are by default assigned this memory
  *	attribute.
  *
  *	Presently, this function must be called before any pages are allocated
  *	to the object.  In the future, this requirement may be relaxed for
  *	"default" and "swap" objects.
  */
 int
 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	if (object->type == OBJT_DEAD)
 		return (KERN_INVALID_ARGUMENT);
 	if (!TAILQ_EMPTY(&object->memq))
 		return (KERN_FAILURE);
 
 	object->memattr = memattr;
 	return (KERN_SUCCESS);
 }
 
 void
 vm_object_pip_add(vm_object_t object, short i)
 {
 
 	if (i > 0)
 		blockcount_acquire(&object->paging_in_progress, i);
 }
 
 void
 vm_object_pip_wakeup(vm_object_t object)
 {
 
 	vm_object_pip_wakeupn(object, 1);
 }
 
 void
 vm_object_pip_wakeupn(vm_object_t object, short i)
 {
 
 	if (i > 0)
 		blockcount_release(&object->paging_in_progress, i);
 }
 
 /*
  * Atomically drop the object lock and wait for pip to drain.  This protects
  * from sleep/wakeup races due to identity changes.  The lock is not re-acquired
  * on return.
  */
 static void
 vm_object_pip_sleep(vm_object_t object, const char *waitid)
 {
 
 	(void)blockcount_sleep(&object->paging_in_progress, &object->lock,
 	    waitid, PVM | PDROP);
 }
 
 void
 vm_object_pip_wait(vm_object_t object, const char *waitid)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	blockcount_wait(&object->paging_in_progress, &object->lock, waitid,
 	    PVM);
 }
 
 void
 vm_object_pip_wait_unlocked(vm_object_t object, const char *waitid)
 {
 
 	VM_OBJECT_ASSERT_UNLOCKED(object);
 
 	blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM);
 }
 
 /*
  *	vm_object_allocate:
  *
  *	Returns a new object with the given size.
  */
 vm_object_t
 vm_object_allocate(objtype_t type, vm_pindex_t size)
 {
 	vm_object_t object;
 	u_short flags;
 
 	switch (type) {
 	case OBJT_DEAD:
 		panic("vm_object_allocate: can't create OBJT_DEAD");
 	case OBJT_SWAP:
 		flags = OBJ_COLORED | OBJ_SWAP;
 		break;
 	case OBJT_DEVICE:
 	case OBJT_SG:
 		flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
 		break;
 	case OBJT_MGTDEVICE:
 		flags = OBJ_FICTITIOUS;
 		break;
 	case OBJT_PHYS:
 		flags = OBJ_UNMANAGED;
 		break;
 	case OBJT_VNODE:
 		flags = 0;
 		break;
 	default:
 		panic("vm_object_allocate: type %d is undefined or dynamic",
 		    type);
 	}
 	object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
 	_vm_object_allocate(type, size, flags, object, NULL);
 
 	return (object);
 }
 
 vm_object_t
 vm_object_allocate_dyn(objtype_t dyntype, vm_pindex_t size, u_short flags)
 {
 	vm_object_t object;
 
 	MPASS(dyntype >= OBJT_FIRST_DYN /* && dyntype < nitems(pagertab) */);
 	object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
 	_vm_object_allocate(dyntype, size, flags, object, NULL);
 
 	return (object);
 }
 
 /*
  *	vm_object_allocate_anon:
  *
  *	Returns a new default object of the given size and marked as
  *	anonymous memory for special split/collapse handling.  Color
  *	to be initialized by the caller.
  */
 vm_object_t
 vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object,
     struct ucred *cred, vm_size_t charge)
 {
 	vm_object_t handle, object;
 
 	if (backing_object == NULL)
 		handle = NULL;
 	else if ((backing_object->flags & OBJ_ANON) != 0)
 		handle = backing_object->handle;
 	else
 		handle = backing_object;
 	object = uma_zalloc(obj_zone, M_WAITOK);
 	_vm_object_allocate(OBJT_SWAP, size,
 	    OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle);
 	object->cred = cred;
 	object->charge = cred != NULL ? charge : 0;
 	return (object);
 }
 
 static void
 vm_object_reference_vnode(vm_object_t object)
 {
 	u_int old;
 
 	/*
 	 * vnode objects need the lock for the first reference
 	 * to serialize with vnode_object_deallocate().
 	 */
 	if (!refcount_acquire_if_gt(&object->ref_count, 0)) {
 		VM_OBJECT_RLOCK(object);
 		old = refcount_acquire(&object->ref_count);
 		if (object->type == OBJT_VNODE && old == 0)
 			vref(object->handle);
 		VM_OBJECT_RUNLOCK(object);
 	}
 }
 
 /*
  *	vm_object_reference:
  *
  *	Acquires a reference to the given object.
  */
 void
 vm_object_reference(vm_object_t object)
 {
 
 	if (object == NULL)
 		return;
 
 	if (object->type == OBJT_VNODE)
 		vm_object_reference_vnode(object);
 	else
 		refcount_acquire(&object->ref_count);
 	KASSERT((object->flags & OBJ_DEAD) == 0,
 	    ("vm_object_reference: Referenced dead object."));
 }
 
 /*
  *	vm_object_reference_locked:
  *
  *	Gets another reference to the given object.
  *
  *	The object must be locked.
  */
 void
 vm_object_reference_locked(vm_object_t object)
 {
 	u_int old;
 
 	VM_OBJECT_ASSERT_LOCKED(object);
 	old = refcount_acquire(&object->ref_count);
 	if (object->type == OBJT_VNODE && old == 0)
 		vref(object->handle);
 	KASSERT((object->flags & OBJ_DEAD) == 0,
 	    ("vm_object_reference: Referenced dead object."));
 }
 
 /*
  * Handle deallocating an object of type OBJT_VNODE.
  */
 static void
 vm_object_deallocate_vnode(vm_object_t object)
 {
 	struct vnode *vp = (struct vnode *) object->handle;
 	bool last;
 
 	KASSERT(object->type == OBJT_VNODE,
 	    ("vm_object_deallocate_vnode: not a vnode object"));
 	KASSERT(vp != NULL, ("vm_object_deallocate_vnode: missing vp"));
 
 	/* Object lock to protect handle lookup. */
 	last = refcount_release(&object->ref_count);
 	VM_OBJECT_RUNLOCK(object);
 
 	if (!last)
 		return;
 
 	if (!umtx_shm_vnobj_persistent)
 		umtx_shm_object_terminated(object);
 
 	/* vrele may need the vnode lock. */
 	vrele(vp);
 }
 
 /*
  * We dropped a reference on an object and discovered that it had a
  * single remaining shadow.  This is a sibling of the reference we
  * dropped.  Attempt to collapse the sibling and backing object.
  */
 static vm_object_t
 vm_object_deallocate_anon(vm_object_t backing_object)
 {
 	vm_object_t object;
 
 	/* Fetch the final shadow.  */
 	object = LIST_FIRST(&backing_object->shadow_head);
 	KASSERT(object != NULL &&
 	    atomic_load_int(&backing_object->shadow_count) == 1,
 	    ("vm_object_anon_deallocate: ref_count: %d, shadow_count: %d",
 	    backing_object->ref_count,
 	    atomic_load_int(&backing_object->shadow_count)));
 	KASSERT((object->flags & OBJ_ANON) != 0,
 	    ("invalid shadow object %p", object));
 
 	if (!VM_OBJECT_TRYWLOCK(object)) {
 		/*
 		 * Prevent object from disappearing since we do not have a
 		 * reference.
 		 */
 		vm_object_pip_add(object, 1);
 		VM_OBJECT_WUNLOCK(backing_object);
 		VM_OBJECT_WLOCK(object);
 		vm_object_pip_wakeup(object);
 	} else
 		VM_OBJECT_WUNLOCK(backing_object);
 
 	/*
 	 * Check for a collapse/terminate race with the last reference holder.
 	 */
 	if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 ||
 	    !refcount_acquire_if_not_zero(&object->ref_count)) {
 		VM_OBJECT_WUNLOCK(object);
 		return (NULL);
 	}
 	backing_object = object->backing_object;
 	if (backing_object != NULL && (backing_object->flags & OBJ_ANON) != 0)
 		vm_object_collapse(object);
 	VM_OBJECT_WUNLOCK(object);
 
 	return (object);
 }
 
 /*
  *	vm_object_deallocate:
  *
  *	Release a reference to the specified object,
  *	gained either through a vm_object_allocate
  *	or a vm_object_reference call.  When all references
  *	are gone, storage associated with this object
  *	may be relinquished.
  *
  *	No object may be locked.
  */
 void
 vm_object_deallocate(vm_object_t object)
 {
 	vm_object_t temp;
 	bool released;
 
 	while (object != NULL) {
 		/*
 		 * If the reference count goes to 0 we start calling
 		 * vm_object_terminate() on the object chain.  A ref count
 		 * of 1 may be a special case depending on the shadow count
 		 * being 0 or 1.  These cases require a write lock on the
 		 * object.
 		 */
 		if ((object->flags & OBJ_ANON) == 0)
 			released = refcount_release_if_gt(&object->ref_count, 1);
 		else
 			released = refcount_release_if_gt(&object->ref_count, 2);
 		if (released)
 			return;
 
 		if (object->type == OBJT_VNODE) {
 			VM_OBJECT_RLOCK(object);
 			if (object->type == OBJT_VNODE) {
 				vm_object_deallocate_vnode(object);
 				return;
 			}
 			VM_OBJECT_RUNLOCK(object);
 		}
 
 		VM_OBJECT_WLOCK(object);
 		KASSERT(object->ref_count > 0,
 		    ("vm_object_deallocate: object deallocated too many times: %d",
 		    object->type));
 
 		/*
 		 * If this is not the final reference to an anonymous
 		 * object we may need to collapse the shadow chain.
 		 */
 		if (!refcount_release(&object->ref_count)) {
 			if (object->ref_count > 1 ||
 			    atomic_load_int(&object->shadow_count) == 0) {
 				if ((object->flags & OBJ_ANON) != 0 &&
 				    object->ref_count == 1)
 					vm_object_set_flag(object,
 					    OBJ_ONEMAPPING);
 				VM_OBJECT_WUNLOCK(object);
 				return;
 			}
 
 			/* Handle collapsing last ref on anonymous objects. */
 			object = vm_object_deallocate_anon(object);
 			continue;
 		}
 
 		/*
 		 * Handle the final reference to an object.  We restart
 		 * the loop with the backing object to avoid recursion.
 		 */
 		umtx_shm_object_terminated(object);
 		temp = object->backing_object;
 		if (temp != NULL) {
 			KASSERT(object->type == OBJT_SWAP,
 			    ("shadowed tmpfs v_object 2 %p", object));
 			vm_object_backing_remove(object);
 		}
 
 		KASSERT((object->flags & OBJ_DEAD) == 0,
 		    ("vm_object_deallocate: Terminating dead object."));
 		vm_object_set_flag(object, OBJ_DEAD);
 		vm_object_terminate(object);
 		object = temp;
 	}
 }
 
 void
 vm_object_destroy(vm_object_t object)
 {
 	uma_zfree(obj_zone, object);
 }
 
 static void
 vm_object_sub_shadow(vm_object_t object)
 {
 	KASSERT(object->shadow_count >= 1,
 	    ("object %p sub_shadow count zero", object));
 	atomic_subtract_int(&object->shadow_count, 1);
 }
 
 static void
 vm_object_backing_remove_locked(vm_object_t object)
 {
 	vm_object_t backing_object;
 
 	backing_object = object->backing_object;
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	VM_OBJECT_ASSERT_WLOCKED(backing_object);
 
 	KASSERT((object->flags & OBJ_COLLAPSING) == 0,
 	    ("vm_object_backing_remove: Removing collapsing object."));
 
 	vm_object_sub_shadow(backing_object);
 	if ((object->flags & OBJ_SHADOWLIST) != 0) {
 		LIST_REMOVE(object, shadow_list);
 		vm_object_clear_flag(object, OBJ_SHADOWLIST);
 	}
 	object->backing_object = NULL;
 }
 
 static void
 vm_object_backing_remove(vm_object_t object)
 {
 	vm_object_t backing_object;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	backing_object = object->backing_object;
 	if ((object->flags & OBJ_SHADOWLIST) != 0) {
 		VM_OBJECT_WLOCK(backing_object);
 		vm_object_backing_remove_locked(object);
 		VM_OBJECT_WUNLOCK(backing_object);
 	} else {
 		object->backing_object = NULL;
 		vm_object_sub_shadow(backing_object);
 	}
 }
 
 static void
 vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	atomic_add_int(&backing_object->shadow_count, 1);
 	if ((backing_object->flags & OBJ_ANON) != 0) {
 		VM_OBJECT_ASSERT_WLOCKED(backing_object);
 		LIST_INSERT_HEAD(&backing_object->shadow_head, object,
 		    shadow_list);
 		vm_object_set_flag(object, OBJ_SHADOWLIST);
 	}
 	object->backing_object = backing_object;
 }
 
 static void
 vm_object_backing_insert(vm_object_t object, vm_object_t backing_object)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	if ((backing_object->flags & OBJ_ANON) != 0) {
 		VM_OBJECT_WLOCK(backing_object);
 		vm_object_backing_insert_locked(object, backing_object);
 		VM_OBJECT_WUNLOCK(backing_object);
 	} else {
 		object->backing_object = backing_object;
 		atomic_add_int(&backing_object->shadow_count, 1);
 	}
 }
 
 /*
  * Insert an object into a backing_object's shadow list with an additional
  * reference to the backing_object added.
  */
 static void
 vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	if ((backing_object->flags & OBJ_ANON) != 0) {
 		VM_OBJECT_WLOCK(backing_object);
 		KASSERT((backing_object->flags & OBJ_DEAD) == 0,
 		    ("shadowing dead anonymous object"));
 		vm_object_reference_locked(backing_object);
 		vm_object_backing_insert_locked(object, backing_object);
 		vm_object_clear_flag(backing_object, OBJ_ONEMAPPING);
 		VM_OBJECT_WUNLOCK(backing_object);
 	} else {
 		vm_object_reference(backing_object);
 		atomic_add_int(&backing_object->shadow_count, 1);
 		object->backing_object = backing_object;
 	}
 }
 
 /*
  * Transfer a backing reference from backing_object to object.
  */
 static void
 vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object)
 {
 	vm_object_t new_backing_object;
 
 	/*
 	 * Note that the reference to backing_object->backing_object
 	 * moves from within backing_object to within object.
 	 */
 	vm_object_backing_remove_locked(object);
 	new_backing_object = backing_object->backing_object;
 	if (new_backing_object == NULL)
 		return;
 	if ((new_backing_object->flags & OBJ_ANON) != 0) {
 		VM_OBJECT_WLOCK(new_backing_object);
 		vm_object_backing_remove_locked(backing_object);
 		vm_object_backing_insert_locked(object, new_backing_object);
 		VM_OBJECT_WUNLOCK(new_backing_object);
 	} else {
 		/*
 		 * shadow_count for new_backing_object is left
 		 * unchanged, its reference provided by backing_object
 		 * is replaced by object.
 		 */
 		object->backing_object = new_backing_object;
 		backing_object->backing_object = NULL;
 	}
 }
 
 /*
  * Wait for a concurrent collapse to settle.
  */
 static void
 vm_object_collapse_wait(vm_object_t object)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	while ((object->flags & OBJ_COLLAPSING) != 0) {
 		vm_object_pip_wait(object, "vmcolwait");
 		counter_u64_add(object_collapse_waits, 1);
 	}
 }
 
 /*
  * Waits for a backing object to clear a pending collapse and returns
  * it locked if it is an ANON object.
  */
 static vm_object_t
 vm_object_backing_collapse_wait(vm_object_t object)
 {
 	vm_object_t backing_object;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	for (;;) {
 		backing_object = object->backing_object;
 		if (backing_object == NULL ||
 		    (backing_object->flags & OBJ_ANON) == 0)
 			return (NULL);
 		VM_OBJECT_WLOCK(backing_object);
 		if ((backing_object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) == 0)
 			break;
 		VM_OBJECT_WUNLOCK(object);
 		vm_object_pip_sleep(backing_object, "vmbckwait");
 		counter_u64_add(object_collapse_waits, 1);
 		VM_OBJECT_WLOCK(object);
 	}
 	return (backing_object);
 }
 
 /*
  *	vm_object_terminate_single_page removes a pageable page from the object,
  *	and removes it from the paging queues and frees it, if it is not wired.
  *	It is invoked via callback from vm_object_terminate_pages.
  */
 static void
 vm_object_terminate_single_page(vm_page_t p, void *objectv)
 {
 	vm_object_t object __diagused = objectv;
 
 	vm_page_assert_unbusied(p);
 	KASSERT(p->object == object &&
 	    (p->ref_count & VPRC_OBJREF) != 0,
 	    ("%s: page %p is inconsistent", __func__, p));
 	p->object = NULL;
 	if (vm_page_drop(p, VPRC_OBJREF) == VPRC_OBJREF) {
 		KASSERT((object->flags & OBJ_UNMANAGED) != 0 ||
 		    vm_page_astate_load(p).queue != PQ_NONE,
 		    ("%s: page %p does not belong to a queue", __func__, p));
 		VM_CNT_INC(v_pfree);
 		vm_page_free(p);
 	}
 }
 
 /*
  *	vm_object_terminate_pages removes any remaining pageable pages
  *	from the object and resets the object to an empty state.
  */
 static void
 vm_object_terminate_pages(vm_object_t object)
 {
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	/*
 	 * If the object contained any pages, then reset it to an empty state.
 	 * Rather than incrementally removing each page from the object, the
 	 * page and object are reset to any empty state.
 	 */
 	if (object->resident_page_count == 0)
 		return;
 
 	vm_radix_reclaim_callback(&object->rtree,
 	    vm_object_terminate_single_page, object);
 	TAILQ_INIT(&object->memq);
 	object->resident_page_count = 0;
 	if (object->type == OBJT_VNODE)
 		vdrop(object->handle);
 }
 
 /*
  *	vm_object_terminate actually destroys the specified object, freeing
  *	up all previously used resources.
  *
  *	The object must be locked.
  *	This routine may block.
  */
 void
 vm_object_terminate(vm_object_t object)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT((object->flags & OBJ_DEAD) != 0,
 	    ("terminating non-dead obj %p", object));
 	KASSERT((object->flags & OBJ_COLLAPSING) == 0,
 	    ("terminating collapsing obj %p", object));
 	KASSERT(object->backing_object == NULL,
 	    ("terminating shadow obj %p", object));
 
 	/*
 	 * Wait for the pageout daemon and other current users to be
 	 * done with the object.  Note that new paging_in_progress
 	 * users can come after this wait, but they must check
 	 * OBJ_DEAD flag set (without unlocking the object), and avoid
 	 * the object being terminated.
 	 */
 	vm_object_pip_wait(object, "objtrm");
 
 	KASSERT(object->ref_count == 0,
 	    ("vm_object_terminate: object with references, ref_count=%d",
 	    object->ref_count));
 
 	if ((object->flags & OBJ_PG_DTOR) == 0)
 		vm_object_terminate_pages(object);
 
 #if VM_NRESERVLEVEL > 0
 	if (__predict_false(!LIST_EMPTY(&object->rvq)))
 		vm_reserv_break_all(object);
 #endif
 
 	KASSERT(object->cred == NULL || (object->flags & OBJ_SWAP) != 0,
 	    ("%s: non-swap obj %p has cred", __func__, object));
 
 	/*
 	 * Let the pager know object is dead.
 	 */
 	vm_pager_deallocate(object);
 	VM_OBJECT_WUNLOCK(object);
 
 	vm_object_destroy(object);
 }
 
 /*
  * Make the page read-only so that we can clear the object flags.  However, if
  * this is a nosync mmap then the object is likely to stay dirty so do not
  * mess with the page and do not clear the object flags.  Returns TRUE if the
  * page should be flushed, and FALSE otherwise.
  */
 static boolean_t
 vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean)
 {
 
 	vm_page_assert_busied(p);
 
 	/*
 	 * If we have been asked to skip nosync pages and this is a
 	 * nosync page, skip it.  Note that the object flags were not
 	 * cleared in this case so we do not have to set them.
 	 */
 	if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) {
 		*allclean = FALSE;
 		return (FALSE);
 	} else {
 		pmap_remove_write(p);
 		return (p->dirty != 0);
 	}
 }
 
 /*
  *	vm_object_page_clean
  *
  *	Clean all dirty pages in the specified range of object.  Leaves page 
  * 	on whatever queue it is currently on.   If NOSYNC is set then do not
  *	write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC),
  *	leaving the object dirty.
  *
  *	For swap objects backing tmpfs regular files, do not flush anything,
  *	but remove write protection on the mapped pages to update mtime through
  *	mmaped writes.
  *
  *	When stuffing pages asynchronously, allow clustering.  XXX we need a
  *	synchronous clustering mode implementation.
  *
  *	Odd semantics: if start == end, we clean everything.
  *
  *	The object must be locked.
  *
  *	Returns FALSE if some page from the range was not written, as
  *	reported by the pager, and TRUE otherwise.
  */
 boolean_t
 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
     int flags)
 {
 	vm_page_t np, p;
 	vm_pindex_t pi, tend, tstart;
 	int curgeneration, n, pagerflags;
 	boolean_t eio, res, allclean;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	if (!vm_object_mightbedirty(object) || object->resident_page_count == 0)
 		return (TRUE);
 
 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
 	    VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
 	pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0;
 
 	tstart = OFF_TO_IDX(start);
 	tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
 	allclean = tstart == 0 && tend >= object->size;
 	res = TRUE;
 
 rescan:
 	curgeneration = object->generation;
 
 	for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
 		pi = p->pindex;
 		if (pi >= tend)
 			break;
 		np = TAILQ_NEXT(p, listq);
 		if (vm_page_none_valid(p))
 			continue;
 		if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) {
 			if (object->generation != curgeneration &&
 			    (flags & OBJPC_SYNC) != 0)
 				goto rescan;
 			np = vm_page_find_least(object, pi);
 			continue;
 		}
 		if (!vm_object_page_remove_write(p, flags, &allclean)) {
 			vm_page_xunbusy(p);
 			continue;
 		}
 		if (object->type == OBJT_VNODE) {
 			n = vm_object_page_collect_flush(object, p, pagerflags,
 			    flags, &allclean, &eio);
 			if (eio) {
 				res = FALSE;
 				allclean = FALSE;
 			}
 			if (object->generation != curgeneration &&
 			    (flags & OBJPC_SYNC) != 0)
 				goto rescan;
 
 			/*
 			 * If the VOP_PUTPAGES() did a truncated write, so
 			 * that even the first page of the run is not fully
 			 * written, vm_pageout_flush() returns 0 as the run
 			 * length.  Since the condition that caused truncated
 			 * write may be permanent, e.g. exhausted free space,
 			 * accepting n == 0 would cause an infinite loop.
 			 *
 			 * Forwarding the iterator leaves the unwritten page
 			 * behind, but there is not much we can do there if
 			 * filesystem refuses to write it.
 			 */
 			if (n == 0) {
 				n = 1;
 				allclean = FALSE;
 			}
 		} else {
 			n = 1;
 			vm_page_xunbusy(p);
 		}
 		np = vm_page_find_least(object, pi + n);
 	}
 #if 0
 	VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
 #endif
 
 	/*
 	 * Leave updating cleangeneration for tmpfs objects to tmpfs
 	 * scan.  It needs to update mtime, which happens for other
 	 * filesystems during page writeouts.
 	 */
 	if (allclean && object->type == OBJT_VNODE)
 		object->cleangeneration = curgeneration;
 	return (res);
 }
 
 static int
 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
     int flags, boolean_t *allclean, boolean_t *eio)
 {
 	vm_page_t ma[2 * vm_pageout_page_count - 1], tp;
 	int base, count, runlen;
 
 	vm_page_lock_assert(p, MA_NOTOWNED);
 	vm_page_assert_xbusied(p);
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	base = nitems(ma) / 2;
 	ma[base] = p;
 	for (count = 1, tp = p; count < vm_pageout_page_count; count++) {
 		tp = vm_page_next(tp);
 		if (tp == NULL || vm_page_tryxbusy(tp) == 0)
 			break;
 		if (!vm_object_page_remove_write(tp, flags, allclean)) {
 			vm_page_xunbusy(tp);
 			break;
 		}
 		ma[base + count] = tp;
 	}
 
 	for (tp = p; count < vm_pageout_page_count; count++) {
 		tp = vm_page_prev(tp);
 		if (tp == NULL || vm_page_tryxbusy(tp) == 0)
 			break;
 		if (!vm_object_page_remove_write(tp, flags, allclean)) {
 			vm_page_xunbusy(tp);
 			break;
 		}
 		ma[--base] = tp;
 	}
 
 	vm_pageout_flush(&ma[base], count, pagerflags, nitems(ma) / 2 - base,
 	    &runlen, eio);
 	return (runlen);
 }
 
 /*
  * Note that there is absolutely no sense in writing out
  * anonymous objects, so we track down the vnode object
  * to write out.
  * We invalidate (remove) all pages from the address space
  * for semantic correctness.
  *
  * If the backing object is a device object with unmanaged pages, then any
  * mappings to the specified range of pages must be removed before this
  * function is called.
  *
  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
  * may start out with a NULL object.
  */
 boolean_t
 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
     boolean_t syncio, boolean_t invalidate)
 {
 	vm_object_t backing_object;
 	struct vnode *vp;
 	struct mount *mp;
 	int error, flags, fsync_after;
 	boolean_t res;
 
 	if (object == NULL)
 		return (TRUE);
 	res = TRUE;
 	error = 0;
 	VM_OBJECT_WLOCK(object);
 	while ((backing_object = object->backing_object) != NULL) {
 		VM_OBJECT_WLOCK(backing_object);
 		offset += object->backing_object_offset;
 		VM_OBJECT_WUNLOCK(object);
 		object = backing_object;
 		if (object->size < OFF_TO_IDX(offset + size))
 			size = IDX_TO_OFF(object->size) - offset;
 	}
 	/*
 	 * Flush pages if writing is allowed, invalidate them
 	 * if invalidation requested.  Pages undergoing I/O
 	 * will be ignored by vm_object_page_remove().
 	 *
 	 * We cannot lock the vnode and then wait for paging
 	 * to complete without deadlocking against vm_fault.
 	 * Instead we simply call vm_object_page_remove() and
 	 * allow it to block internally on a page-by-page
 	 * basis when it encounters pages undergoing async
 	 * I/O.
 	 */
 	if (object->type == OBJT_VNODE &&
 	    vm_object_mightbedirty(object) != 0 &&
 	    ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) {
 		VM_OBJECT_WUNLOCK(object);
 		(void)vn_start_write(vp, &mp, V_WAIT);
 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 		if (syncio && !invalidate && offset == 0 &&
 		    atop(size) == object->size) {
 			/*
 			 * If syncing the whole mapping of the file,
 			 * it is faster to schedule all the writes in
 			 * async mode, also allowing the clustering,
 			 * and then wait for i/o to complete.
 			 */
 			flags = 0;
 			fsync_after = TRUE;
 		} else {
 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
 			flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
 			fsync_after = FALSE;
 		}
 		VM_OBJECT_WLOCK(object);
 		res = vm_object_page_clean(object, offset, offset + size,
 		    flags);
 		VM_OBJECT_WUNLOCK(object);
 		if (fsync_after) {
 			for (;;) {
 				error = VOP_FSYNC(vp, MNT_WAIT, curthread);
 				if (error != ERELOOKUP)
 					break;
 
 				/*
 				 * Allow SU/bufdaemon to handle more
 				 * dependencies in the meantime.
 				 */
 				VOP_UNLOCK(vp);
 				vn_finished_write(mp);
 
 				(void)vn_start_write(vp, &mp, V_WAIT);
 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 			}
 		}
 		VOP_UNLOCK(vp);
 		vn_finished_write(mp);
 		if (error != 0)
 			res = FALSE;
 		VM_OBJECT_WLOCK(object);
 	}
 	if ((object->type == OBJT_VNODE ||
 	     object->type == OBJT_DEVICE) && invalidate) {
 		if (object->type == OBJT_DEVICE)
 			/*
 			 * The option OBJPR_NOTMAPPED must be passed here
 			 * because vm_object_page_remove() cannot remove
 			 * unmanaged mappings.
 			 */
 			flags = OBJPR_NOTMAPPED;
 		else if (old_msync)
 			flags = 0;
 		else
 			flags = OBJPR_CLEANONLY;
 		vm_object_page_remove(object, OFF_TO_IDX(offset),
 		    OFF_TO_IDX(offset + size + PAGE_MASK), flags);
 	}
 	VM_OBJECT_WUNLOCK(object);
 	return (res);
 }
 
 /*
  * Determine whether the given advice can be applied to the object.  Advice is
  * not applied to unmanaged pages since they never belong to page queues, and
  * since MADV_FREE is destructive, it can apply only to anonymous pages that
  * have been mapped at most once.
  */
 static bool
 vm_object_advice_applies(vm_object_t object, int advice)
 {
 
 	if ((object->flags & OBJ_UNMANAGED) != 0)
 		return (false);
 	if (advice != MADV_FREE)
 		return (true);
 	return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) ==
 	    (OBJ_ONEMAPPING | OBJ_ANON));
 }
 
 static void
 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex,
     vm_size_t size)
 {
 
 	if (advice == MADV_FREE)
 		vm_pager_freespace(object, pindex, size);
 }
 
 /*
  *	vm_object_madvise:
  *
  *	Implements the madvise function at the object/page level.
  *
  *	MADV_WILLNEED	(any object)
  *
  *	    Activate the specified pages if they are resident.
  *
  *	MADV_DONTNEED	(any object)
  *
  *	    Deactivate the specified pages if they are resident.
  *
  *	MADV_FREE	(OBJT_SWAP objects, OBJ_ONEMAPPING only)
  *
  *	    Deactivate and clean the specified pages if they are
  *	    resident.  This permits the process to reuse the pages
  *	    without faulting or the kernel to reclaim the pages
  *	    without I/O.
  */
 void
 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
     int advice)
 {
 	vm_pindex_t tpindex;
 	vm_object_t backing_object, tobject;
 	vm_page_t m, tm;
 
 	if (object == NULL)
 		return;
 
 relookup:
 	VM_OBJECT_WLOCK(object);
 	if (!vm_object_advice_applies(object, advice)) {
 		VM_OBJECT_WUNLOCK(object);
 		return;
 	}
 	for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) {
 		tobject = object;
 
 		/*
 		 * If the next page isn't resident in the top-level object, we
 		 * need to search the shadow chain.  When applying MADV_FREE, we
 		 * take care to release any swap space used to store
 		 * non-resident pages.
 		 */
 		if (m == NULL || pindex < m->pindex) {
 			/*
 			 * Optimize a common case: if the top-level object has
 			 * no backing object, we can skip over the non-resident
 			 * range in constant time.
 			 */
 			if (object->backing_object == NULL) {
 				tpindex = (m != NULL && m->pindex < end) ?
 				    m->pindex : end;
 				vm_object_madvise_freespace(object, advice,
 				    pindex, tpindex - pindex);
 				if ((pindex = tpindex) == end)
 					break;
 				goto next_page;
 			}
 
 			tpindex = pindex;
 			do {
 				vm_object_madvise_freespace(tobject, advice,
 				    tpindex, 1);
 				/*
 				 * Prepare to search the next object in the
 				 * chain.
 				 */
 				backing_object = tobject->backing_object;
 				if (backing_object == NULL)
 					goto next_pindex;
 				VM_OBJECT_WLOCK(backing_object);
 				tpindex +=
 				    OFF_TO_IDX(tobject->backing_object_offset);
 				if (tobject != object)
 					VM_OBJECT_WUNLOCK(tobject);
 				tobject = backing_object;
 				if (!vm_object_advice_applies(tobject, advice))
 					goto next_pindex;
 			} while ((tm = vm_page_lookup(tobject, tpindex)) ==
 			    NULL);
 		} else {
 next_page:
 			tm = m;
 			m = TAILQ_NEXT(m, listq);
 		}
 
 		/*
 		 * If the page is not in a normal state, skip it.  The page
 		 * can not be invalidated while the object lock is held.
 		 */
 		if (!vm_page_all_valid(tm) || vm_page_wired(tm))
 			goto next_pindex;
 		KASSERT((tm->flags & PG_FICTITIOUS) == 0,
 		    ("vm_object_madvise: page %p is fictitious", tm));
 		KASSERT((tm->oflags & VPO_UNMANAGED) == 0,
 		    ("vm_object_madvise: page %p is not managed", tm));
 		if (vm_page_tryxbusy(tm) == 0) {
 			if (object != tobject)
 				VM_OBJECT_WUNLOCK(object);
 			if (advice == MADV_WILLNEED) {
 				/*
 				 * Reference the page before unlocking and
 				 * sleeping so that the page daemon is less
 				 * likely to reclaim it.
 				 */
 				vm_page_aflag_set(tm, PGA_REFERENCED);
 			}
 			if (!vm_page_busy_sleep(tm, "madvpo", 0))
 				VM_OBJECT_WUNLOCK(tobject);
   			goto relookup;
 		}
 		vm_page_advise(tm, advice);
 		vm_page_xunbusy(tm);
 		vm_object_madvise_freespace(tobject, advice, tm->pindex, 1);
 next_pindex:
 		if (tobject != object)
 			VM_OBJECT_WUNLOCK(tobject);
 	}
 	VM_OBJECT_WUNLOCK(object);
 }
 
 /*
  *	vm_object_shadow:
  *
  *	Create a new object which is backed by the
  *	specified existing object range.  The source
  *	object reference is deallocated.
  *
  *	The new object and offset into that object
  *	are returned in the source parameters.
  */
 void
 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length,
     struct ucred *cred, bool shared)
 {
 	vm_object_t source;
 	vm_object_t result;
 
 	source = *object;
 
 	/*
 	 * Don't create the new object if the old object isn't shared.
 	 *
 	 * If we hold the only reference we can guarantee that it won't
 	 * increase while we have the map locked.  Otherwise the race is
 	 * harmless and we will end up with an extra shadow object that
 	 * will be collapsed later.
 	 */
 	if (source != NULL && source->ref_count == 1 &&
 	    (source->flags & OBJ_ANON) != 0)
 		return;
 
 	/*
 	 * Allocate a new object with the given length.
 	 */
 	result = vm_object_allocate_anon(atop(length), source, cred, length);
 
 	/*
 	 * Store the offset into the source object, and fix up the offset into
 	 * the new object.
 	 */
 	result->backing_object_offset = *offset;
 
 	if (shared || source != NULL) {
 		VM_OBJECT_WLOCK(result);
 
 		/*
 		 * The new object shadows the source object, adding a
 		 * reference to it.  Our caller changes his reference
 		 * to point to the new object, removing a reference to
 		 * the source object.  Net result: no change of
 		 * reference count, unless the caller needs to add one
 		 * more reference due to forking a shared map entry.
 		 */
 		if (shared) {
 			vm_object_reference_locked(result);
 			vm_object_clear_flag(result, OBJ_ONEMAPPING);
 		}
 
 		/*
 		 * Try to optimize the result object's page color when
 		 * shadowing in order to maintain page coloring
 		 * consistency in the combined shadowed object.
 		 */
 		if (source != NULL) {
 			vm_object_backing_insert(result, source);
 			result->domain = source->domain;
 #if VM_NRESERVLEVEL > 0
 			vm_object_set_flag(result,
 			    (source->flags & OBJ_COLORED));
 			result->pg_color = (source->pg_color +
 			    OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER -
 			    1)) - 1);
 #endif
 		}
 		VM_OBJECT_WUNLOCK(result);
 	}
 
 	/*
 	 * Return the new things
 	 */
 	*offset = 0;
 	*object = result;
 }
 
 /*
  *	vm_object_split:
  *
  * Split the pages in a map entry into a new object.  This affords
  * easier removal of unused pages, and keeps object inheritance from
  * being a negative impact on memory usage.
  */
 void
 vm_object_split(vm_map_entry_t entry)
 {
-	vm_page_t m, m_next;
+	struct pctrie_iter pages;
+	vm_page_t m;
 	vm_object_t orig_object, new_object, backing_object;
-	vm_pindex_t idx, offidxstart;
+	vm_pindex_t offidxstart;
 	vm_size_t size;
 
 	orig_object = entry->object.vm_object;
 	KASSERT((orig_object->flags & OBJ_ONEMAPPING) != 0,
 	    ("vm_object_split:  Splitting object with multiple mappings."));
 	if ((orig_object->flags & OBJ_ANON) == 0)
 		return;
 	if (orig_object->ref_count <= 1)
 		return;
 	VM_OBJECT_WUNLOCK(orig_object);
 
 	offidxstart = OFF_TO_IDX(entry->offset);
 	size = atop(entry->end - entry->start);
 
 	new_object = vm_object_allocate_anon(size, orig_object,
 	    orig_object->cred, ptoa(size));
 
 	/*
 	 * We must wait for the orig_object to complete any in-progress
 	 * collapse so that the swap blocks are stable below.  The
 	 * additional reference on backing_object by new object will
 	 * prevent further collapse operations until split completes.
 	 */
 	VM_OBJECT_WLOCK(orig_object);
 	vm_object_collapse_wait(orig_object);
 
 	/*
 	 * At this point, the new object is still private, so the order in
 	 * which the original and new objects are locked does not matter.
 	 */
 	VM_OBJECT_WLOCK(new_object);
 	new_object->domain = orig_object->domain;
 	backing_object = orig_object->backing_object;
 	if (backing_object != NULL) {
 		vm_object_backing_insert_ref(new_object, backing_object);
 		new_object->backing_object_offset = 
 		    orig_object->backing_object_offset + entry->offset;
 	}
 	if (orig_object->cred != NULL) {
 		crhold(orig_object->cred);
 		KASSERT(orig_object->charge >= ptoa(size),
 		    ("orig_object->charge < 0"));
 		orig_object->charge -= ptoa(size);
 	}
 
 	/*
 	 * Mark the split operation so that swap_pager_getpages() knows
 	 * that the object is in transition.
 	 */
 	vm_object_set_flag(orig_object, OBJ_SPLIT);
-#ifdef INVARIANTS
-	idx = 0;
-#endif
+	vm_page_iter_limit_init(&pages, orig_object, offidxstart + size);
 retry:
-	m = vm_page_find_least(orig_object, offidxstart);
-	KASSERT(m == NULL || idx <= m->pindex - offidxstart,
-	    ("%s: object %p was repopulated", __func__, orig_object));
-	for (; m != NULL && (idx = m->pindex - offidxstart) < size;
-	    m = m_next) {
-		m_next = TAILQ_NEXT(m, listq);
-
+	pctrie_iter_reset(&pages);
+	for (m = vm_page_iter_lookup_ge(&pages, offidxstart); m != NULL;
+	    m = vm_radix_iter_step(&pages)) {
 		/*
 		 * We must wait for pending I/O to complete before we can
 		 * rename the page.
 		 *
 		 * We do not have to VM_PROT_NONE the page as mappings should
 		 * not be changed by this operation.
 		 */
 		if (vm_page_tryxbusy(m) == 0) {
 			VM_OBJECT_WUNLOCK(new_object);
 			if (vm_page_busy_sleep(m, "spltwt", 0))
 				VM_OBJECT_WLOCK(orig_object);
 			VM_OBJECT_WLOCK(new_object);
 			goto retry;
 		}
 
 		/*
 		 * The page was left invalid.  Likely placed there by
 		 * an incomplete fault.  Just remove and ignore.
 		 */
 		if (vm_page_none_valid(m)) {
-			if (vm_page_remove(m))
+			if (vm_page_iter_remove(&pages))
 				vm_page_free(m);
 			continue;
 		}
 
 		/* vm_page_rename() will dirty the page. */
-		if (vm_page_rename(m, new_object, idx)) {
+		if (vm_page_rename(&pages, new_object, m->pindex - offidxstart)) {
 			vm_page_xunbusy(m);
 			VM_OBJECT_WUNLOCK(new_object);
 			VM_OBJECT_WUNLOCK(orig_object);
 			vm_radix_wait();
 			VM_OBJECT_WLOCK(orig_object);
 			VM_OBJECT_WLOCK(new_object);
 			goto retry;
 		}
 
 #if VM_NRESERVLEVEL > 0
 		/*
 		 * If some of the reservation's allocated pages remain with
 		 * the original object, then transferring the reservation to
 		 * the new object is neither particularly beneficial nor
 		 * particularly harmful as compared to leaving the reservation
 		 * with the original object.  If, however, all of the
 		 * reservation's allocated pages are transferred to the new
 		 * object, then transferring the reservation is typically
 		 * beneficial.  Determining which of these two cases applies
 		 * would be more costly than unconditionally renaming the
 		 * reservation.
 		 */
 		vm_reserv_rename(m, new_object, orig_object, offidxstart);
 #endif
 	}
 
 	/*
 	 * swap_pager_copy() can sleep, in which case the orig_object's
 	 * and new_object's locks are released and reacquired.
 	 */
 	swap_pager_copy(orig_object, new_object, offidxstart, 0);
 
 	TAILQ_FOREACH(m, &new_object->memq, listq)
 		vm_page_xunbusy(m);
 
 	vm_object_clear_flag(orig_object, OBJ_SPLIT);
 	VM_OBJECT_WUNLOCK(orig_object);
 	VM_OBJECT_WUNLOCK(new_object);
 	entry->object.vm_object = new_object;
 	entry->offset = 0LL;
 	vm_object_deallocate(orig_object);
 	VM_OBJECT_WLOCK(new_object);
 }
 
 static vm_page_t
-vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p)
+vm_object_collapse_scan_wait(struct pctrie_iter *pages, vm_object_t object,
+    vm_page_t p)
 {
 	vm_object_t backing_object;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	backing_object = object->backing_object;
 	VM_OBJECT_ASSERT_WLOCKED(backing_object);
 
 	KASSERT(p == NULL || p->object == object || p->object == backing_object,
 	    ("invalid ownership %p %p %p", p, object, backing_object));
 	/* The page is only NULL when rename fails. */
 	if (p == NULL) {
 		VM_OBJECT_WUNLOCK(object);
 		VM_OBJECT_WUNLOCK(backing_object);
 		vm_radix_wait();
 		VM_OBJECT_WLOCK(object);
 	} else if (p->object == object) {
 		VM_OBJECT_WUNLOCK(backing_object);
 		if (vm_page_busy_sleep(p, "vmocol", 0))
 			VM_OBJECT_WLOCK(object);
 	} else {
 		VM_OBJECT_WUNLOCK(object);
 		if (!vm_page_busy_sleep(p, "vmocol", 0))
 			VM_OBJECT_WUNLOCK(backing_object);
 		VM_OBJECT_WLOCK(object);
 	}
 	VM_OBJECT_WLOCK(backing_object);
-	return (TAILQ_FIRST(&backing_object->memq));
+	vm_page_iter_init(pages, backing_object);
+	return (vm_page_iter_lookup_ge(pages, 0));
 }
 
 static void
 vm_object_collapse_scan(vm_object_t object)
 {
+	struct pctrie_iter pages;
 	vm_object_t backing_object;
 	vm_page_t next, p, pp;
 	vm_pindex_t backing_offset_index, new_pindex;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
 
 	backing_object = object->backing_object;
 	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
 
 	/*
 	 * Our scan
 	 */
-	for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) {
+	vm_page_iter_init(&pages, backing_object);
+	for (p = vm_page_iter_lookup_ge(&pages, 0); p != NULL; p = next) {
 		next = TAILQ_NEXT(p, listq);
 		new_pindex = p->pindex - backing_offset_index;
 
 		/*
 		 * Check for busy page
 		 */
 		if (vm_page_tryxbusy(p) == 0) {
-			next = vm_object_collapse_scan_wait(object, p);
+			next = vm_object_collapse_scan_wait(&pages, object, p);
 			continue;
 		}
 
 		KASSERT(object->backing_object == backing_object,
 		    ("vm_object_collapse_scan: backing object mismatch %p != %p",
 		    object->backing_object, backing_object));
 		KASSERT(p->object == backing_object,
 		    ("vm_object_collapse_scan: object mismatch %p != %p",
 		    p->object, backing_object));
 
 		if (p->pindex < backing_offset_index ||
 		    new_pindex >= object->size) {
 			vm_pager_freespace(backing_object, p->pindex, 1);
 
 			KASSERT(!pmap_page_is_mapped(p),
 			    ("freeing mapped page %p", p));
-			if (vm_page_remove(p))
+			if (vm_page_iter_remove(&pages))
 				vm_page_free(p);
+			next = vm_radix_iter_step(&pages);
 			continue;
 		}
 
 		if (!vm_page_all_valid(p)) {
 			KASSERT(!pmap_page_is_mapped(p),
 			    ("freeing mapped page %p", p));
-			if (vm_page_remove(p))
+			if (vm_page_iter_remove(&pages))
 				vm_page_free(p);
+			next = vm_radix_iter_step(&pages);
 			continue;
 		}
 
 		pp = vm_page_lookup(object, new_pindex);
 		if (pp != NULL && vm_page_tryxbusy(pp) == 0) {
 			vm_page_xunbusy(p);
 			/*
 			 * The page in the parent is busy and possibly not
 			 * (yet) valid.  Until its state is finalized by the
 			 * busy bit owner, we can't tell whether it shadows the
 			 * original page.
 			 */
-			next = vm_object_collapse_scan_wait(object, pp);
+			next = vm_object_collapse_scan_wait(&pages, object, pp);
 			continue;
 		}
 
 		if (pp != NULL && vm_page_none_valid(pp)) {
 			/*
 			 * The page was invalid in the parent.  Likely placed
 			 * there by an incomplete fault.  Just remove and
 			 * ignore.  p can replace it.
 			 */
 			if (vm_page_remove(pp))
 				vm_page_free(pp);
 			pp = NULL;
 		}
 
 		if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL,
 			NULL)) {
 			/*
 			 * The page already exists in the parent OR swap exists
 			 * for this location in the parent.  Leave the parent's
 			 * page alone.  Destroy the original page from the
 			 * backing object.
 			 */
 			vm_pager_freespace(backing_object, p->pindex, 1);
 			KASSERT(!pmap_page_is_mapped(p),
 			    ("freeing mapped page %p", p));
-			if (vm_page_remove(p))
-				vm_page_free(p);
 			if (pp != NULL)
 				vm_page_xunbusy(pp);
+			if (vm_page_iter_remove(&pages))
+				vm_page_free(p);
+			next = vm_radix_iter_step(&pages);
 			continue;
 		}
 
 		/*
 		 * Page does not exist in parent, rename the page from the
 		 * backing object to the main object.
 		 *
 		 * If the page was mapped to a process, it can remain mapped
 		 * through the rename.  vm_page_rename() will dirty the page.
 		 */
-		if (vm_page_rename(p, object, new_pindex)) {
+		if (vm_page_rename(&pages, object, new_pindex)) {
 			vm_page_xunbusy(p);
-			next = vm_object_collapse_scan_wait(object, NULL);
+			next = vm_object_collapse_scan_wait(&pages, object,
+			    NULL);
 			continue;
 		}
 
 		/* Use the old pindex to free the right page. */
 		vm_pager_freespace(backing_object, new_pindex +
 		    backing_offset_index, 1);
 
 #if VM_NRESERVLEVEL > 0
 		/*
 		 * Rename the reservation.
 		 */
 		vm_reserv_rename(p, object, backing_object,
 		    backing_offset_index);
 #endif
 		vm_page_xunbusy(p);
+		next = vm_radix_iter_step(&pages);
 	}
 	return;
 }
 
 /*
  *	vm_object_collapse:
  *
  *	Collapse an object with the object backing it.
  *	Pages in the backing object are moved into the
  *	parent, and the backing object is deallocated.
  */
 void
 vm_object_collapse(vm_object_t object)
 {
 	vm_object_t backing_object, new_backing_object;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	while (TRUE) {
 		KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON,
 		    ("collapsing invalid object"));
 
 		/*
 		 * Wait for the backing_object to finish any pending
 		 * collapse so that the caller sees the shortest possible
 		 * shadow chain.
 		 */
 		backing_object = vm_object_backing_collapse_wait(object);
 		if (backing_object == NULL)
 			return;
 
 		KASSERT(object->ref_count > 0 &&
 		    object->ref_count > atomic_load_int(&object->shadow_count),
 		    ("collapse with invalid ref %d or shadow %d count.",
 		    object->ref_count, atomic_load_int(&object->shadow_count)));
 		KASSERT((backing_object->flags &
 		    (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
 		    ("vm_object_collapse: Backing object already collapsing."));
 		KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
 		    ("vm_object_collapse: object is already collapsing."));
 
 		/*
 		 * We know that we can either collapse the backing object if
 		 * the parent is the only reference to it, or (perhaps) have
 		 * the parent bypass the object if the parent happens to shadow
 		 * all the resident pages in the entire backing object.
 		 */
 		if (backing_object->ref_count == 1) {
 			KASSERT(atomic_load_int(&backing_object->shadow_count)
 			    == 1,
 			    ("vm_object_collapse: shadow_count: %d",
 			    atomic_load_int(&backing_object->shadow_count)));
 			vm_object_pip_add(object, 1);
 			vm_object_set_flag(object, OBJ_COLLAPSING);
 			vm_object_pip_add(backing_object, 1);
 			vm_object_set_flag(backing_object, OBJ_DEAD);
 
 			/*
 			 * If there is exactly one reference to the backing
 			 * object, we can collapse it into the parent.
 			 */
 			vm_object_collapse_scan(object);
 
 			/*
 			 * Move the pager from backing_object to object.
 			 *
 			 * swap_pager_copy() can sleep, in which case the
 			 * backing_object's and object's locks are released and
 			 * reacquired.
 			 */
 			swap_pager_copy(backing_object, object,
 			    OFF_TO_IDX(object->backing_object_offset), TRUE);
 
 			/*
 			 * Object now shadows whatever backing_object did.
 			 */
 			vm_object_clear_flag(object, OBJ_COLLAPSING);
 			vm_object_backing_transfer(object, backing_object);
 			object->backing_object_offset +=
 			    backing_object->backing_object_offset;
 			VM_OBJECT_WUNLOCK(object);
 			vm_object_pip_wakeup(object);
 
 			/*
 			 * Discard backing_object.
 			 *
 			 * Since the backing object has no pages, no pager left,
 			 * and no object references within it, all that is
 			 * necessary is to dispose of it.
 			 */
 			KASSERT(backing_object->ref_count == 1, (
 "backing_object %p was somehow re-referenced during collapse!",
 			    backing_object));
 			vm_object_pip_wakeup(backing_object);
 			(void)refcount_release(&backing_object->ref_count);
 			umtx_shm_object_terminated(backing_object);
 			vm_object_terminate(backing_object);
 			counter_u64_add(object_collapses, 1);
 			VM_OBJECT_WLOCK(object);
 		} else {
 			/*
 			 * If we do not entirely shadow the backing object,
 			 * there is nothing we can do so we give up.
 			 *
 			 * The object lock and backing_object lock must not
 			 * be dropped during this sequence.
 			 */
 			if (!swap_pager_scan_all_shadowed(object)) {
 				VM_OBJECT_WUNLOCK(backing_object);
 				break;
 			}
 
 			/*
 			 * Make the parent shadow the next object in the
 			 * chain.  Deallocating backing_object will not remove
 			 * it, since its reference count is at least 2.
 			 */
 			vm_object_backing_remove_locked(object);
 			new_backing_object = backing_object->backing_object;
 			if (new_backing_object != NULL) {
 				vm_object_backing_insert_ref(object,
 				    new_backing_object);
 				object->backing_object_offset +=
 				    backing_object->backing_object_offset;
 			}
 
 			/*
 			 * Drop the reference count on backing_object. Since
 			 * its ref_count was at least 2, it will not vanish.
 			 */
 			(void)refcount_release(&backing_object->ref_count);
 			KASSERT(backing_object->ref_count >= 1, (
 "backing_object %p was somehow dereferenced during collapse!",
 			    backing_object));
 			VM_OBJECT_WUNLOCK(backing_object);
 			counter_u64_add(object_bypasses, 1);
 		}
 
 		/*
 		 * Try again with this object's new backing object.
 		 */
 	}
 }
 
 /*
  *	vm_object_page_remove:
  *
  *	For the given object, either frees or invalidates each of the
  *	specified pages.  In general, a page is freed.  However, if a page is
  *	wired for any reason other than the existence of a managed, wired
  *	mapping, then it may be invalidated but not removed from the object.
  *	Pages are specified by the given range ["start", "end") and the option
  *	OBJPR_CLEANONLY.  As a special case, if "end" is zero, then the range
  *	extends from "start" to the end of the object.  If the option
  *	OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
  *	specified range are affected.  If the option OBJPR_NOTMAPPED is
  *	specified, then the pages within the specified range must have no
  *	mappings.  Otherwise, if this option is not specified, any mappings to
  *	the specified pages are removed before the pages are freed or
  *	invalidated.
  *
  *	In general, this operation should only be performed on objects that
  *	contain managed pages.  There are, however, two exceptions.  First, it
  *	is performed on the kernel and kmem objects by vm_map_entry_delete().
  *	Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
  *	backed pages.  In both of these cases, the option OBJPR_CLEANONLY must
  *	not be specified and the option OBJPR_NOTMAPPED must be specified.
  *
  *	The object must be locked.
  */
 void
 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
     int options)
 {
-	vm_page_t p, next;
+	struct pctrie_iter pages;
+	vm_page_t p;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
 	    (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
 	    ("vm_object_page_remove: illegal options for object %p", object));
 	if (object->resident_page_count == 0)
 		return;
 	vm_object_pip_add(object, 1);
+	vm_page_iter_limit_init(&pages, object, end);
 again:
-	p = vm_page_find_least(object, start);
-
-	/*
-	 * Here, the variable "p" is either (1) the page with the least pindex
-	 * greater than or equal to the parameter "start" or (2) NULL. 
-	 */
-	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
-		next = TAILQ_NEXT(p, listq);
-
+	pctrie_iter_reset(&pages);
+	for (p = vm_page_iter_lookup_ge(&pages, start); p != NULL;
+	     p = vm_radix_iter_step(&pages)) {
 		/*
 		 * Skip invalid pages if asked to do so.  Try to avoid acquiring
 		 * the busy lock, as some consumers rely on this to avoid
 		 * deadlocks.
 		 *
 		 * A thread may concurrently transition the page from invalid to
 		 * valid using only the busy lock, so the result of this check
 		 * is immediately stale.  It is up to consumers to handle this,
 		 * for instance by ensuring that all invalid->valid transitions
 		 * happen with a mutex held, as may be possible for a
 		 * filesystem.
 		 */
 		if ((options & OBJPR_VALIDONLY) != 0 && vm_page_none_valid(p))
 			continue;
 
 		/*
 		 * If the page is wired for any reason besides the existence
 		 * of managed, wired mappings, then it cannot be freed.  For
 		 * example, fictitious pages, which represent device memory,
 		 * are inherently wired and cannot be freed.  They can,
 		 * however, be invalidated if the option OBJPR_CLEANONLY is
 		 * not specified.
 		 */
 		if (vm_page_tryxbusy(p) == 0) {
 			if (vm_page_busy_sleep(p, "vmopar", 0))
 				VM_OBJECT_WLOCK(object);
 			goto again;
 		}
 		if ((options & OBJPR_VALIDONLY) != 0 && vm_page_none_valid(p)) {
 			vm_page_xunbusy(p);
 			continue;
 		}
 		if (vm_page_wired(p)) {
 wired:
 			if ((options & OBJPR_NOTMAPPED) == 0 &&
 			    object->ref_count != 0)
 				pmap_remove_all(p);
 			if ((options & OBJPR_CLEANONLY) == 0) {
 				vm_page_invalid(p);
 				vm_page_undirty(p);
 			}
 			vm_page_xunbusy(p);
 			continue;
 		}
 		KASSERT((p->flags & PG_FICTITIOUS) == 0,
 		    ("vm_object_page_remove: page %p is fictitious", p));
 		if ((options & OBJPR_CLEANONLY) != 0 &&
 		    !vm_page_none_valid(p)) {
 			if ((options & OBJPR_NOTMAPPED) == 0 &&
 			    object->ref_count != 0 &&
 			    !vm_page_try_remove_write(p))
 				goto wired;
 			if (p->dirty != 0) {
 				vm_page_xunbusy(p);
 				continue;
 			}
 		}
 		if ((options & OBJPR_NOTMAPPED) == 0 &&
 		    object->ref_count != 0 && !vm_page_try_remove_all(p))
 			goto wired;
-		vm_page_free(p);
+		vm_page_iter_free(&pages);
 	}
 	vm_object_pip_wakeup(object);
 
 	vm_pager_freespace(object, start, (end == 0 ? object->size : end) -
 	    start);
 }
 
 /*
  *	vm_object_page_noreuse:
  *
  *	For the given object, attempt to move the specified pages to
  *	the head of the inactive queue.  This bypasses regular LRU
  *	operation and allows the pages to be reused quickly under memory
  *	pressure.  If a page is wired for any reason, then it will not
  *	be queued.  Pages are specified by the range ["start", "end").
  *	As a special case, if "end" is zero, then the range extends from
  *	"start" to the end of the object.
  *
  *	This operation should only be performed on objects that
  *	contain non-fictitious, managed pages.
  *
  *	The object must be locked.
  */
 void
 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
 {
 	vm_page_t p, next;
 
 	VM_OBJECT_ASSERT_LOCKED(object);
 	KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
 	    ("vm_object_page_noreuse: illegal object %p", object));
 	if (object->resident_page_count == 0)
 		return;
 	p = vm_page_find_least(object, start);
 
 	/*
 	 * Here, the variable "p" is either (1) the page with the least pindex
 	 * greater than or equal to the parameter "start" or (2) NULL. 
 	 */
 	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
 		next = TAILQ_NEXT(p, listq);
 		vm_page_deactivate_noreuse(p);
 	}
 }
 
 /*
  *	Populate the specified range of the object with valid pages.  Returns
  *	TRUE if the range is successfully populated and FALSE otherwise.
  *
  *	Note: This function should be optimized to pass a larger array of
  *	pages to vm_pager_get_pages() before it is applied to a non-
  *	OBJT_DEVICE object.
  *
  *	The object must be locked.
  */
 boolean_t
 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
 {
 	vm_page_t m;
 	vm_pindex_t pindex;
 	int rv;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	for (pindex = start; pindex < end; pindex++) {
 		rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL);
 		if (rv != VM_PAGER_OK)
 			break;
 
 		/*
 		 * Keep "m" busy because a subsequent iteration may unlock
 		 * the object.
 		 */
 	}
 	if (pindex > start) {
 		m = vm_page_lookup(object, start);
 		while (m != NULL && m->pindex < pindex) {
 			vm_page_xunbusy(m);
 			m = TAILQ_NEXT(m, listq);
 		}
 	}
 	return (pindex == end);
 }
 
 /*
  *	Routine:	vm_object_coalesce
  *	Function:	Coalesces two objects backing up adjoining
  *			regions of memory into a single object.
  *
  *	returns TRUE if objects were combined.
  *
  *	NOTE:	Only works at the moment if the second object is NULL -
  *		if it's not, which object do we lock first?
  *
  *	Parameters:
  *		prev_object	First object to coalesce
  *		prev_offset	Offset into prev_object
  *		prev_size	Size of reference to prev_object
  *		next_size	Size of reference to the second object
  *		reserved	Indicator that extension region has
  *				swap accounted for
  *
  *	Conditions:
  *	The object must *not* be locked.
  */
 boolean_t
 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
     vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
 {
 	vm_pindex_t next_pindex;
 
 	if (prev_object == NULL)
 		return (TRUE);
 	if ((prev_object->flags & OBJ_ANON) == 0)
 		return (FALSE);
 
 	VM_OBJECT_WLOCK(prev_object);
 	/*
 	 * Try to collapse the object first.
 	 */
 	vm_object_collapse(prev_object);
 
 	/*
 	 * Can't coalesce if: . more than one reference . paged out . shadows
 	 * another object . has a copy elsewhere (any of which mean that the
 	 * pages not mapped to prev_entry may be in use anyway)
 	 */
 	if (prev_object->backing_object != NULL) {
 		VM_OBJECT_WUNLOCK(prev_object);
 		return (FALSE);
 	}
 
 	prev_size >>= PAGE_SHIFT;
 	next_size >>= PAGE_SHIFT;
 	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
 
 	if (prev_object->ref_count > 1 &&
 	    prev_object->size != next_pindex &&
 	    (prev_object->flags & OBJ_ONEMAPPING) == 0) {
 		VM_OBJECT_WUNLOCK(prev_object);
 		return (FALSE);
 	}
 
 	/*
 	 * Account for the charge.
 	 */
 	if (prev_object->cred != NULL) {
 		/*
 		 * If prev_object was charged, then this mapping,
 		 * although not charged now, may become writable
 		 * later. Non-NULL cred in the object would prevent
 		 * swap reservation during enabling of the write
 		 * access, so reserve swap now. Failed reservation
 		 * cause allocation of the separate object for the map
 		 * entry, and swap reservation for this entry is
 		 * managed in appropriate time.
 		 */
 		if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
 		    prev_object->cred)) {
 			VM_OBJECT_WUNLOCK(prev_object);
 			return (FALSE);
 		}
 		prev_object->charge += ptoa(next_size);
 	}
 
 	/*
 	 * Remove any pages that may still be in the object from a previous
 	 * deallocation.
 	 */
 	if (next_pindex < prev_object->size) {
 		vm_object_page_remove(prev_object, next_pindex, next_pindex +
 		    next_size, 0);
 #if 0
 		if (prev_object->cred != NULL) {
 			KASSERT(prev_object->charge >=
 			    ptoa(prev_object->size - next_pindex),
 			    ("object %p overcharged 1 %jx %jx", prev_object,
 				(uintmax_t)next_pindex, (uintmax_t)next_size));
 			prev_object->charge -= ptoa(prev_object->size -
 			    next_pindex);
 		}
 #endif
 	}
 
 	/*
 	 * Extend the object if necessary.
 	 */
 	if (next_pindex + next_size > prev_object->size)
 		prev_object->size = next_pindex + next_size;
 
 	VM_OBJECT_WUNLOCK(prev_object);
 	return (TRUE);
 }
 
 void
 vm_object_set_writeable_dirty_(vm_object_t object)
 {
 	atomic_add_int(&object->generation, 1);
 }
 
 bool
 vm_object_mightbedirty_(vm_object_t object)
 {
 	return (object->generation != object->cleangeneration);
 }
 
 /*
  *	vm_object_unwire:
  *
  *	For each page offset within the specified range of the given object,
  *	find the highest-level page in the shadow chain and unwire it.  A page
  *	must exist at every page offset, and the highest-level page must be
  *	wired.
  */
 void
 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length,
     uint8_t queue)
 {
 	vm_object_t tobject, t1object;
 	vm_page_t m, tm;
 	vm_pindex_t end_pindex, pindex, tpindex;
 	int depth, locked_depth;
 
 	KASSERT((offset & PAGE_MASK) == 0,
 	    ("vm_object_unwire: offset is not page aligned"));
 	KASSERT((length & PAGE_MASK) == 0,
 	    ("vm_object_unwire: length is not a multiple of PAGE_SIZE"));
 	/* The wired count of a fictitious page never changes. */
 	if ((object->flags & OBJ_FICTITIOUS) != 0)
 		return;
 	pindex = OFF_TO_IDX(offset);
 	end_pindex = pindex + atop(length);
 again:
 	locked_depth = 1;
 	VM_OBJECT_RLOCK(object);
 	m = vm_page_find_least(object, pindex);
 	while (pindex < end_pindex) {
 		if (m == NULL || pindex < m->pindex) {
 			/*
 			 * The first object in the shadow chain doesn't
 			 * contain a page at the current index.  Therefore,
 			 * the page must exist in a backing object.
 			 */
 			tobject = object;
 			tpindex = pindex;
 			depth = 0;
 			do {
 				tpindex +=
 				    OFF_TO_IDX(tobject->backing_object_offset);
 				tobject = tobject->backing_object;
 				KASSERT(tobject != NULL,
 				    ("vm_object_unwire: missing page"));
 				if ((tobject->flags & OBJ_FICTITIOUS) != 0)
 					goto next_page;
 				depth++;
 				if (depth == locked_depth) {
 					locked_depth++;
 					VM_OBJECT_RLOCK(tobject);
 				}
 			} while ((tm = vm_page_lookup(tobject, tpindex)) ==
 			    NULL);
 		} else {
 			tm = m;
 			m = TAILQ_NEXT(m, listq);
 		}
 		if (vm_page_trysbusy(tm) == 0) {
 			for (tobject = object; locked_depth >= 1;
 			    locked_depth--) {
 				t1object = tobject->backing_object;
 				if (tm->object != tobject)
 					VM_OBJECT_RUNLOCK(tobject);
 				tobject = t1object;
 			}
 			tobject = tm->object;
 			if (!vm_page_busy_sleep(tm, "unwbo",
 			    VM_ALLOC_IGN_SBUSY))
 				VM_OBJECT_RUNLOCK(tobject);
 			goto again;
 		}
 		vm_page_unwire(tm, queue);
 		vm_page_sunbusy(tm);
 next_page:
 		pindex++;
 	}
 	/* Release the accumulated object locks. */
 	for (tobject = object; locked_depth >= 1; locked_depth--) {
 		t1object = tobject->backing_object;
 		VM_OBJECT_RUNLOCK(tobject);
 		tobject = t1object;
 	}
 }
 
 /*
  * Return the vnode for the given object, or NULL if none exists.
  * For tmpfs objects, the function may return NULL if there is
  * no vnode allocated at the time of the call.
  */
 struct vnode *
 vm_object_vnode(vm_object_t object)
 {
 	struct vnode *vp;
 
 	VM_OBJECT_ASSERT_LOCKED(object);
 	vm_pager_getvp(object, &vp, NULL);
 	return (vp);
 }
 
 /*
  * Busy the vm object.  This prevents new pages belonging to the object from
  * becoming busy.  Existing pages persist as busy.  Callers are responsible
  * for checking page state before proceeding.
  */
 void
 vm_object_busy(vm_object_t obj)
 {
 
 	VM_OBJECT_ASSERT_LOCKED(obj);
 
 	blockcount_acquire(&obj->busy, 1);
 	/* The fence is required to order loads of page busy. */
 	atomic_thread_fence_acq_rel();
 }
 
 void
 vm_object_unbusy(vm_object_t obj)
 {
 
 	blockcount_release(&obj->busy, 1);
 }
 
 void
 vm_object_busy_wait(vm_object_t obj, const char *wmesg)
 {
 
 	VM_OBJECT_ASSERT_UNLOCKED(obj);
 
 	(void)blockcount_sleep(&obj->busy, NULL, wmesg, PVM);
 }
 
 /*
  * This function aims to determine if the object is mapped,
  * specifically, if it is referenced by a vm_map_entry.  Because
  * objects occasionally acquire transient references that do not
  * represent a mapping, the method used here is inexact.  However, it
  * has very low overhead and is good enough for the advisory
  * vm.vmtotal sysctl.
  */
 bool
 vm_object_is_active(vm_object_t obj)
 {
 
 	return (obj->ref_count > atomic_load_int(&obj->shadow_count));
 }
 
 static int
 vm_object_list_handler(struct sysctl_req *req, bool swap_only)
 {
 	struct kinfo_vmobject *kvo;
 	char *fullpath, *freepath;
 	struct vnode *vp;
 	struct vattr va;
 	vm_object_t obj;
 	vm_page_t m;
 	struct cdev *cdev;
 	struct cdevsw *csw;
 	u_long sp;
 	int count, error, ref;
 	key_t key;
 	unsigned short seq;
 	bool want_path;
 
 	if (req->oldptr == NULL) {
 		/*
 		 * If an old buffer has not been provided, generate an
 		 * estimate of the space needed for a subsequent call.
 		 */
 		mtx_lock(&vm_object_list_mtx);
 		count = 0;
 		TAILQ_FOREACH(obj, &vm_object_list, object_list) {
 			if (obj->type == OBJT_DEAD)
 				continue;
 			count++;
 		}
 		mtx_unlock(&vm_object_list_mtx);
 		return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) *
 		    count * 11 / 10));
 	}
 
 	want_path = !(swap_only || jailed(curthread->td_ucred));
 	kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK | M_ZERO);
 	error = 0;
 
 	/*
 	 * VM objects are type stable and are never removed from the
 	 * list once added.  This allows us to safely read obj->object_list
 	 * after reacquiring the VM object lock.
 	 */
 	mtx_lock(&vm_object_list_mtx);
 	TAILQ_FOREACH(obj, &vm_object_list, object_list) {
 		if (obj->type == OBJT_DEAD ||
 		    (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0))
 			continue;
 		VM_OBJECT_RLOCK(obj);
 		if (obj->type == OBJT_DEAD ||
 		    (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0)) {
 			VM_OBJECT_RUNLOCK(obj);
 			continue;
 		}
 		mtx_unlock(&vm_object_list_mtx);
 		kvo->kvo_size = ptoa(obj->size);
 		kvo->kvo_resident = obj->resident_page_count;
 		kvo->kvo_ref_count = obj->ref_count;
 		kvo->kvo_shadow_count = atomic_load_int(&obj->shadow_count);
 		kvo->kvo_memattr = obj->memattr;
 		kvo->kvo_active = 0;
 		kvo->kvo_inactive = 0;
 		kvo->kvo_flags = 0;
 		if (!swap_only) {
 			TAILQ_FOREACH(m, &obj->memq, listq) {
 				/*
 				 * A page may belong to the object but be
 				 * dequeued and set to PQ_NONE while the
 				 * object lock is not held.  This makes the
 				 * reads of m->queue below racy, and we do not
 				 * count pages set to PQ_NONE.  However, this
 				 * sysctl is only meant to give an
 				 * approximation of the system anyway.
 				 */
 				if (vm_page_active(m))
 					kvo->kvo_active++;
 				else if (vm_page_inactive(m))
 					kvo->kvo_inactive++;
 				else if (vm_page_in_laundry(m))
 					kvo->kvo_laundry++;
 			}
 		}
 
 		kvo->kvo_vn_fileid = 0;
 		kvo->kvo_vn_fsid = 0;
 		kvo->kvo_vn_fsid_freebsd11 = 0;
 		freepath = NULL;
 		fullpath = "";
 		vp = NULL;
 		kvo->kvo_type = vm_object_kvme_type(obj, want_path ? &vp :
 		    NULL);
 		if (vp != NULL) {
 			vref(vp);
 		} else if ((obj->flags & OBJ_ANON) != 0) {
 			MPASS(kvo->kvo_type == KVME_TYPE_SWAP);
 			kvo->kvo_me = (uintptr_t)obj;
 			/* tmpfs objs are reported as vnodes */
 			kvo->kvo_backing_obj = (uintptr_t)obj->backing_object;
 			sp = swap_pager_swapped_pages(obj);
 			kvo->kvo_swapped = sp > UINT32_MAX ? UINT32_MAX : sp;
 		}
 		if ((obj->type == OBJT_DEVICE || obj->type == OBJT_MGTDEVICE) &&
 		    (obj->flags & OBJ_CDEVH) != 0) {
 			cdev = obj->un_pager.devp.handle;
 			if (cdev != NULL) {
 				csw = dev_refthread(cdev, &ref);
 				if (csw != NULL) {
 					strlcpy(kvo->kvo_path, cdev->si_name,
 					    sizeof(kvo->kvo_path));
 					dev_relthread(cdev, ref);
 				}
 			}
 		}
 		VM_OBJECT_RUNLOCK(obj);
 		if ((obj->flags & OBJ_SYSVSHM) != 0) {
 			kvo->kvo_flags |= KVMO_FLAG_SYSVSHM;
 			shmobjinfo(obj, &key, &seq);
 			kvo->kvo_vn_fileid = key;
 			kvo->kvo_vn_fsid_freebsd11 = seq;
 		}
 		if ((obj->flags & OBJ_POSIXSHM) != 0) {
 			kvo->kvo_flags |= KVMO_FLAG_POSIXSHM;
 			shm_get_path(obj, kvo->kvo_path,
 			    sizeof(kvo->kvo_path));
 		}
 		if (vp != NULL) {
 			vn_fullpath(vp, &fullpath, &freepath);
 			vn_lock(vp, LK_SHARED | LK_RETRY);
 			if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) {
 				kvo->kvo_vn_fileid = va.va_fileid;
 				kvo->kvo_vn_fsid = va.va_fsid;
 				kvo->kvo_vn_fsid_freebsd11 = va.va_fsid;
 								/* truncate */
 			}
 			vput(vp);
 			strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path));
 			free(freepath, M_TEMP);
 		}
 
 		/* Pack record size down */
 		kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path)
 		    + strlen(kvo->kvo_path) + 1;
 		kvo->kvo_structsize = roundup(kvo->kvo_structsize,
 		    sizeof(uint64_t));
 		error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize);
 		maybe_yield();
 		mtx_lock(&vm_object_list_mtx);
 		if (error)
 			break;
 	}
 	mtx_unlock(&vm_object_list_mtx);
 	free(kvo, M_TEMP);
 	return (error);
 }
 
 static int
 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
 {
 	return (vm_object_list_handler(req, false));
 }
 
 SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP |
     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject",
     "List of VM objects");
 
 static int
 sysctl_vm_object_list_swap(SYSCTL_HANDLER_ARGS)
 {
 	return (vm_object_list_handler(req, true));
 }
 
 /*
  * This sysctl returns list of the anonymous or swap objects. Intent
  * is to provide stripped optimized list useful to analyze swap use.
  * Since technically non-swap (default) objects participate in the
  * shadow chains, and are converted to swap type as needed by swap
  * pager, we must report them.
  */
 SYSCTL_PROC(_vm, OID_AUTO, swap_objects,
     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 0,
     sysctl_vm_object_list_swap, "S,kinfo_vmobject",
     "List of swap VM objects");
 
 #include "opt_ddb.h"
 #ifdef DDB
 #include <sys/kernel.h>
 
 #include <sys/cons.h>
 
 #include <ddb/ddb.h>
 
 static int
 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
 {
 	vm_map_t tmpm;
 	vm_map_entry_t tmpe;
 	vm_object_t obj;
 
 	if (map == 0)
 		return 0;
 
 	if (entry == 0) {
 		VM_MAP_ENTRY_FOREACH(tmpe, map) {
 			if (_vm_object_in_map(map, object, tmpe)) {
 				return 1;
 			}
 		}
 	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
 		tmpm = entry->object.sub_map;
 		VM_MAP_ENTRY_FOREACH(tmpe, tmpm) {
 			if (_vm_object_in_map(tmpm, object, tmpe)) {
 				return 1;
 			}
 		}
 	} else if ((obj = entry->object.vm_object) != NULL) {
 		for (; obj; obj = obj->backing_object)
 			if (obj == object) {
 				return 1;
 			}
 	}
 	return 0;
 }
 
 static int
 vm_object_in_map(vm_object_t object)
 {
 	struct proc *p;
 
 	/* sx_slock(&allproc_lock); */
 	FOREACH_PROC_IN_SYSTEM(p) {
 		if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
 			continue;
 		if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
 			/* sx_sunlock(&allproc_lock); */
 			return 1;
 		}
 	}
 	/* sx_sunlock(&allproc_lock); */
 	if (_vm_object_in_map(kernel_map, object, 0))
 		return 1;
 	return 0;
 }
 
 DB_SHOW_COMMAND_FLAGS(vmochk, vm_object_check, DB_CMD_MEMSAFE)
 {
 	vm_object_t object;
 
 	/*
 	 * make sure that internal objs are in a map somewhere
 	 * and none have zero ref counts.
 	 */
 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
 		if ((object->flags & OBJ_ANON) != 0) {
 			if (object->ref_count == 0) {
 				db_printf("vmochk: internal obj has zero ref count: %ld\n",
 					(long)object->size);
 			}
 			if (!vm_object_in_map(object)) {
 				db_printf(
 			"vmochk: internal obj is not in a map: "
 			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
 				    object->ref_count, (u_long)object->size, 
 				    (u_long)object->size,
 				    (void *)object->backing_object);
 			}
 		}
 		if (db_pager_quit)
 			return;
 	}
 }
 
 /*
  *	vm_object_print:	[ debug ]
  */
 DB_SHOW_COMMAND(object, vm_object_print_static)
 {
 	/* XXX convert args. */
 	vm_object_t object = (vm_object_t)addr;
 	boolean_t full = have_addr;
 
 	vm_page_t p;
 
 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
 #define	count	was_count
 
 	int count;
 
 	if (object == NULL)
 		return;
 
 	db_iprintf(
 	    "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
 	    object, (int)object->type, (uintmax_t)object->size,
 	    object->resident_page_count, object->ref_count, object->flags,
 	    object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
 	    atomic_load_int(&object->shadow_count),
 	    object->backing_object ? object->backing_object->ref_count : 0,
 	    object->backing_object, (uintmax_t)object->backing_object_offset);
 
 	if (!full)
 		return;
 
 	db_indent += 2;
 	count = 0;
 	TAILQ_FOREACH(p, &object->memq, listq) {
 		if (count == 0)
 			db_iprintf("memory:=");
 		else if (count == 6) {
 			db_printf("\n");
 			db_iprintf(" ...");
 			count = 0;
 		} else
 			db_printf(",");
 		count++;
 
 		db_printf("(off=0x%jx,page=0x%jx)",
 		    (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
 
 		if (db_pager_quit)
 			break;
 	}
 	if (count != 0)
 		db_printf("\n");
 	db_indent -= 2;
 }
 
 /* XXX. */
 #undef count
 
 /* XXX need this non-static entry for calling from vm_map_print. */
 void
 vm_object_print(
         /* db_expr_t */ long addr,
 	boolean_t have_addr,
 	/* db_expr_t */ long count,
 	char *modif)
 {
 	vm_object_print_static(addr, have_addr, count, modif);
 }
 
 DB_SHOW_COMMAND_FLAGS(vmopag, vm_object_print_pages, DB_CMD_MEMSAFE)
 {
 	vm_object_t object;
 	vm_pindex_t fidx;
 	vm_paddr_t pa;
 	vm_page_t m, prev_m;
 	int rcount;
 
 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
 		db_printf("new object: %p\n", (void *)object);
 		if (db_pager_quit)
 			return;
 
 		rcount = 0;
 		fidx = 0;
 		pa = -1;
 		TAILQ_FOREACH(m, &object->memq, listq) {
 			if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
 			    prev_m->pindex + 1 != m->pindex) {
 				if (rcount) {
 					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
 						(long)fidx, rcount, (long)pa);
 					if (db_pager_quit)
 						return;
 					rcount = 0;
 				}
 			}				
 			if (rcount &&
 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
 				++rcount;
 				continue;
 			}
 			if (rcount) {
 				db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
 					(long)fidx, rcount, (long)pa);
 				if (db_pager_quit)
 					return;
 			}
 			fidx = m->pindex;
 			pa = VM_PAGE_TO_PHYS(m);
 			rcount = 1;
 		}
 		if (rcount) {
 			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
 				(long)fidx, rcount, (long)pa);
 			if (db_pager_quit)
 				return;
 		}
 	}
 }
 #endif /* DDB */
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 0b9b55337b52..7d093579e35d 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1,5882 +1,5948 @@
 /*-
  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
  *
  * Copyright (c) 1991 Regents of the University of California.
  * All rights reserved.
  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. Neither the name of the University nor the names of its contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 
 /*-
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  * All rights reserved.
  *
  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
  *
  * Permission to use, copy, modify and distribute this software and
  * its documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
  *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  *
  * Carnegie Mellon requests users of this software to return to
  *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
  *
  * any improvements or extensions that they make and grant Carnegie the
  * rights to redistribute these changes.
  */
 
 /*
  *	Resident memory management module.
  */
 
 #include <sys/cdefs.h>
 #include "opt_vm.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/counter.h>
 #include <sys/domainset.h>
 #include <sys/kernel.h>
 #include <sys/limits.h>
 #include <sys/linker.h>
 #include <sys/lock.h>
 #include <sys/malloc.h>
 #include <sys/mman.h>
 #include <sys/msgbuf.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
 #include <sys/rwlock.h>
 #include <sys/sleepqueue.h>
 #include <sys/sbuf.h>
 #include <sys/sched.h>
 #include <sys/smp.h>
 #include <sys/sysctl.h>
 #include <sys/vmmeter.h>
 #include <sys/vnode.h>
 
 #include <vm/vm.h>
 #include <vm/pmap.h>
 #include <vm/vm_param.h>
 #include <vm/vm_domainset.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
 #include <vm/vm_pageout.h>
 #include <vm/vm_phys.h>
 #include <vm/vm_pagequeue.h>
 #include <vm/vm_pager.h>
 #include <vm/vm_radix.h>
 #include <vm/vm_reserv.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_dumpset.h>
 #include <vm/uma.h>
 #include <vm/uma_int.h>
 
 #include <machine/md_var.h>
 
 struct vm_domain vm_dom[MAXMEMDOM];
 
 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]);
 
 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
 
 struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
 /* The following fields are protected by the domainset lock. */
 domainset_t __exclusive_cache_line vm_min_domains;
 domainset_t __exclusive_cache_line vm_severe_domains;
 static int vm_min_waiters;
 static int vm_severe_waiters;
 static int vm_pageproc_waiters;
 
 static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
     "VM page statistics");
 
 static COUNTER_U64_DEFINE_EARLY(pqstate_commit_retries);
 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries,
     CTLFLAG_RD, &pqstate_commit_retries,
     "Number of failed per-page atomic queue state updates");
 
 static COUNTER_U64_DEFINE_EARLY(queue_ops);
 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops,
     CTLFLAG_RD, &queue_ops,
     "Number of batched queue operations");
 
 static COUNTER_U64_DEFINE_EARLY(queue_nops);
 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_nops,
     CTLFLAG_RD, &queue_nops,
     "Number of batched queue operations with no effects");
 
 /*
  * bogus page -- for I/O to/from partially complete buffers,
  * or for paging into sparsely invalid regions.
  */
 vm_page_t bogus_page;
 
 vm_page_t vm_page_array;
 long vm_page_array_size;
 long first_page;
 
 struct bitset *vm_page_dump;
 long vm_page_dump_pages;
 
 static TAILQ_HEAD(, vm_page) blacklist_head;
 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
 
 static uma_zone_t fakepg_zone;
 
 static vm_page_t vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
     int req, vm_page_t mpred);
 static void vm_page_alloc_check(vm_page_t m);
 static vm_page_t vm_page_alloc_nofree_domain(int domain, int req);
 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m,
     vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked);
 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
-static bool vm_page_free_prep(vm_page_t m);
+static bool vm_page_free_prep(vm_page_t m, bool do_remove);
 static void vm_page_free_toq(vm_page_t m);
+static void vm_page_free_toq_impl(vm_page_t m, bool do_remove);
 static void vm_page_init(void *dummy);
 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
     vm_pindex_t pindex, vm_page_t mpred);
 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
     vm_page_t mpred);
 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
     const uint16_t nflag);
 static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
     vm_page_t m_run, vm_paddr_t high);
 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse);
 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
     int req);
 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain,
     int flags);
 static void vm_page_zone_release(void *arg, void **store, int cnt);
 
 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
 
 static void
 vm_page_init(void *dummy)
 {
 
 	fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
 	bogus_page = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_NOFREE);
 }
 
 static int pgcache_zone_max_pcpu;
 SYSCTL_INT(_vm, OID_AUTO, pgcache_zone_max_pcpu,
     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pgcache_zone_max_pcpu, 0,
     "Per-CPU page cache size");
 
 /*
  * The cache page zone is initialized later since we need to be able to allocate
  * pages before UMA is fully initialized.
  */
 static void
 vm_page_init_cache_zones(void *dummy __unused)
 {
 	struct vm_domain *vmd;
 	struct vm_pgcache *pgcache;
 	int cache, domain, maxcache, pool;
 
 	TUNABLE_INT_FETCH("vm.pgcache_zone_max_pcpu", &pgcache_zone_max_pcpu);
 	maxcache = pgcache_zone_max_pcpu * mp_ncpus;
 	for (domain = 0; domain < vm_ndomains; domain++) {
 		vmd = VM_DOMAIN(domain);
 		for (pool = 0; pool < VM_NFREEPOOL; pool++) {
 			pgcache = &vmd->vmd_pgcache[pool];
 			pgcache->domain = domain;
 			pgcache->pool = pool;
 			pgcache->zone = uma_zcache_create("vm pgcache",
 			    PAGE_SIZE, NULL, NULL, NULL, NULL,
 			    vm_page_zone_import, vm_page_zone_release, pgcache,
 			    UMA_ZONE_VM);
 
 			/*
 			 * Limit each pool's zone to 0.1% of the pages in the
 			 * domain.
 			 */
 			cache = maxcache != 0 ? maxcache :
 			    vmd->vmd_page_count / 1000;
 			uma_zone_set_maxcache(pgcache->zone, cache);
 		}
 	}
 }
 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL);
 
 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
 #if PAGE_SIZE == 32768
 #ifdef CTASSERT
 CTASSERT(sizeof(u_long) >= 8);
 #endif
 #endif
 
 /*
  *	vm_set_page_size:
  *
  *	Sets the page size, perhaps based upon the memory
  *	size.  Must be called before any use of page-size
  *	dependent functions.
  */
 void
 vm_set_page_size(void)
 {
 	if (vm_cnt.v_page_size == 0)
 		vm_cnt.v_page_size = PAGE_SIZE;
 	if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0)
 		panic("vm_set_page_size: page size not a power of two");
 }
 
 /*
  *	vm_page_blacklist_next:
  *
  *	Find the next entry in the provided string of blacklist
  *	addresses.  Entries are separated by space, comma, or newline.
  *	If an invalid integer is encountered then the rest of the
  *	string is skipped.  Updates the list pointer to the next
  *	character, or NULL if the string is exhausted or invalid.
  */
 static vm_paddr_t
 vm_page_blacklist_next(char **list, char *end)
 {
 	vm_paddr_t bad;
 	char *cp, *pos;
 
 	if (list == NULL || *list == NULL)
 		return (0);
 	if (**list =='\0') {
 		*list = NULL;
 		return (0);
 	}
 
 	/*
 	 * If there's no end pointer then the buffer is coming from
 	 * the kenv and we know it's null-terminated.
 	 */
 	if (end == NULL)
 		end = *list + strlen(*list);
 
 	/* Ensure that strtoq() won't walk off the end */
 	if (*end != '\0') {
 		if (*end == '\n' || *end == ' ' || *end  == ',')
 			*end = '\0';
 		else {
 			printf("Blacklist not terminated, skipping\n");
 			*list = NULL;
 			return (0);
 		}
 	}
 
 	for (pos = *list; *pos != '\0'; pos = cp) {
 		bad = strtoq(pos, &cp, 0);
 		if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') {
 			if (bad == 0) {
 				if (++cp < end)
 					continue;
 				else
 					break;
 			}
 		} else
 			break;
 		if (*cp == '\0' || ++cp >= end)
 			*list = NULL;
 		else
 			*list = cp;
 		return (trunc_page(bad));
 	}
 	printf("Garbage in RAM blacklist, skipping\n");
 	*list = NULL;
 	return (0);
 }
 
 bool
 vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
 {
 	struct vm_domain *vmd;
 	vm_page_t m;
 	bool found;
 
 	m = vm_phys_paddr_to_vm_page(pa);
 	if (m == NULL)
 		return (true); /* page does not exist, no failure */
 
 	vmd = VM_DOMAIN(vm_phys_domain(pa));
 	vm_domain_free_lock(vmd);
 	found = vm_phys_unfree_page(pa);
 	vm_domain_free_unlock(vmd);
 	if (found) {
 		vm_domain_freecnt_inc(vmd, -1);
 		TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
 		if (verbose)
 			printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa);
 	}
 	return (found);
 }
 
 /*
  *	vm_page_blacklist_check:
  *
  *	Iterate through the provided string of blacklist addresses, pulling
  *	each entry out of the physical allocator free list and putting it
  *	onto a list for reporting via the vm.page_blacklist sysctl.
  */
 static void
 vm_page_blacklist_check(char *list, char *end)
 {
 	vm_paddr_t pa;
 	char *next;
 
 	next = list;
 	while (next != NULL) {
 		if ((pa = vm_page_blacklist_next(&next, end)) == 0)
 			continue;
 		vm_page_blacklist_add(pa, bootverbose);
 	}
 }
 
 /*
  *	vm_page_blacklist_load:
  *
  *	Search for a special module named "ram_blacklist".  It'll be a
  *	plain text file provided by the user via the loader directive
  *	of the same name.
  */
 static void
 vm_page_blacklist_load(char **list, char **end)
 {
 	void *mod;
 	u_char *ptr;
 	u_int len;
 
 	mod = NULL;
 	ptr = NULL;
 
 	mod = preload_search_by_type("ram_blacklist");
 	if (mod != NULL) {
 		ptr = preload_fetch_addr(mod);
 		len = preload_fetch_size(mod);
         }
 	*list = ptr;
 	if (ptr != NULL)
 		*end = ptr + len;
 	else
 		*end = NULL;
 	return;
 }
 
 static int
 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
 {
 	vm_page_t m;
 	struct sbuf sbuf;
 	int error, first;
 
 	first = 1;
 	error = sysctl_wire_old_buffer(req, 0);
 	if (error != 0)
 		return (error);
 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
 	TAILQ_FOREACH(m, &blacklist_head, listq) {
 		sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",",
 		    (uintmax_t)m->phys_addr);
 		first = 0;
 	}
 	error = sbuf_finish(&sbuf);
 	sbuf_delete(&sbuf);
 	return (error);
 }
 
 /*
  * Initialize a dummy page for use in scans of the specified paging queue.
  * In principle, this function only needs to set the flag PG_MARKER.
  * Nonetheless, it write busies the page as a safety precaution.
  */
 void
 vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags)
 {
 
 	bzero(marker, sizeof(*marker));
 	marker->flags = PG_MARKER;
 	marker->a.flags = aflags;
 	marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
 	marker->a.queue = queue;
 }
 
 static void
 vm_page_domain_init(int domain)
 {
 	struct vm_domain *vmd;
 	struct vm_pagequeue *pq;
 	int i;
 
 	vmd = VM_DOMAIN(domain);
 	bzero(vmd, sizeof(*vmd));
 	*__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
 	    "vm inactive pagequeue";
 	*__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
 	    "vm active pagequeue";
 	*__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) =
 	    "vm laundry pagequeue";
 	*__DECONST(const char **,
 	    &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
 	    "vm unswappable pagequeue";
 	vmd->vmd_domain = domain;
 	vmd->vmd_page_count = 0;
 	vmd->vmd_free_count = 0;
 	vmd->vmd_segs = 0;
 	vmd->vmd_oom = FALSE;
 	for (i = 0; i < PQ_COUNT; i++) {
 		pq = &vmd->vmd_pagequeues[i];
 		TAILQ_INIT(&pq->pq_pl);
 		mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
 		    MTX_DEF | MTX_DUPOK);
 		pq->pq_pdpages = 0;
 		vm_page_init_marker(&vmd->vmd_markers[i], i, 0);
 	}
 	mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
 	mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
 	snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain);
 
 	/*
 	 * inacthead is used to provide FIFO ordering for LRU-bypassing
 	 * insertions.
 	 */
 	vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED);
 	TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
 	    &vmd->vmd_inacthead, plinks.q);
 
 	/*
 	 * The clock pages are used to implement active queue scanning without
 	 * requeues.  Scans start at clock[0], which is advanced after the scan
 	 * ends.  When the two clock hands meet, they are reset and scanning
 	 * resumes from the head of the queue.
 	 */
 	vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED);
 	vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED);
 	TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
 	    &vmd->vmd_clock[0], plinks.q);
 	TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
 	    &vmd->vmd_clock[1], plinks.q);
 }
 
 /*
  * Initialize a physical page in preparation for adding it to the free
  * lists.
  */
 void
 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool)
 {
 	m->object = NULL;
 	m->ref_count = 0;
 	m->busy_lock = VPB_FREED;
 	m->flags = m->a.flags = 0;
 	m->phys_addr = pa;
 	m->a.queue = PQ_NONE;
 	m->psind = 0;
 	m->segind = segind;
 	m->order = VM_NFREEORDER;
 	m->pool = pool;
 	m->valid = m->dirty = 0;
 	pmap_page_init(m);
 }
 
 #ifndef PMAP_HAS_PAGE_ARRAY
 static vm_paddr_t
 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range)
 {
 	vm_paddr_t new_end;
 
 	/*
 	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
 	 * However, because this page is allocated from KVM, out-of-bounds
 	 * accesses using the direct map will not be trapped.
 	 */
 	*vaddr += PAGE_SIZE;
 
 	/*
 	 * Allocate physical memory for the page structures, and map it.
 	 */
 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
 	vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end,
 	    VM_PROT_READ | VM_PROT_WRITE);
 	vm_page_array_size = page_range;
 
 	return (new_end);
 }
 #endif
 
 /*
  *	vm_page_startup:
  *
  *	Initializes the resident memory module.  Allocates physical memory for
  *	bootstrapping UMA and some data structures that are used to manage
  *	physical pages.  Initializes these structures, and populates the free
  *	page queues.
  */
 vm_offset_t
 vm_page_startup(vm_offset_t vaddr)
 {
 	struct vm_phys_seg *seg;
 	struct vm_domain *vmd;
 	vm_page_t m;
 	char *list, *listend;
 	vm_paddr_t end, high_avail, low_avail, new_end, size;
 	vm_paddr_t page_range __unused;
 	vm_paddr_t last_pa, pa, startp, endp;
 	u_long pagecount;
 #if MINIDUMP_PAGE_TRACKING
 	u_long vm_page_dump_size;
 #endif
 	int biggestone, i, segind;
 #ifdef WITNESS
 	vm_offset_t mapped;
 	int witness_size;
 #endif
 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
 	long ii;
 #endif
 #ifdef VM_FREEPOOL_LAZYINIT
 	int lazyinit;
 #endif
 
 	vaddr = round_page(vaddr);
 
 	vm_phys_early_startup();
 	biggestone = vm_phys_avail_largest();
 	end = phys_avail[biggestone+1];
 
 	/*
 	 * Initialize the page and queue locks.
 	 */
 	mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
 	for (i = 0; i < PA_LOCK_COUNT; i++)
 		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
 	for (i = 0; i < vm_ndomains; i++)
 		vm_page_domain_init(i);
 
 	new_end = end;
 #ifdef WITNESS
 	witness_size = round_page(witness_startup_count());
 	new_end -= witness_size;
 	mapped = pmap_map(&vaddr, new_end, new_end + witness_size,
 	    VM_PROT_READ | VM_PROT_WRITE);
 	bzero((void *)mapped, witness_size);
 	witness_startup((void *)mapped);
 #endif
 
 #if MINIDUMP_PAGE_TRACKING
 	/*
 	 * Allocate a bitmap to indicate that a random physical page
 	 * needs to be included in a minidump.
 	 *
 	 * The amd64 port needs this to indicate which direct map pages
 	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
 	 *
 	 * However, i386 still needs this workspace internally within the
 	 * minidump code.  In theory, they are not needed on i386, but are
 	 * included should the sf_buf code decide to use them.
 	 */
 	last_pa = 0;
 	vm_page_dump_pages = 0;
 	for (i = 0; dump_avail[i + 1] != 0; i += 2) {
 		vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) -
 		    dump_avail[i] / PAGE_SIZE;
 		if (dump_avail[i + 1] > last_pa)
 			last_pa = dump_avail[i + 1];
 	}
 	vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages));
 	new_end -= vm_page_dump_size;
 	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
 	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
 	bzero((void *)vm_page_dump, vm_page_dump_size);
 #if MINIDUMP_STARTUP_PAGE_TRACKING
 	/*
 	 * Include the UMA bootstrap pages, witness pages and vm_page_dump
 	 * in a crash dump.  When pmap_map() uses the direct map, they are
 	 * not automatically included.
 	 */
 	for (pa = new_end; pa < end; pa += PAGE_SIZE)
 		dump_add_page(pa);
 #endif
 #else
 	(void)last_pa;
 #endif
 	phys_avail[biggestone + 1] = new_end;
 #ifdef __amd64__
 	/*
 	 * Request that the physical pages underlying the message buffer be
 	 * included in a crash dump.  Since the message buffer is accessed
 	 * through the direct map, they are not automatically included.
 	 */
 	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
 	last_pa = pa + round_page(msgbufsize);
 	while (pa < last_pa) {
 		dump_add_page(pa);
 		pa += PAGE_SIZE;
 	}
 #endif
 	/*
 	 * Compute the number of pages of memory that will be available for
 	 * use, taking into account the overhead of a page structure per page.
 	 * In other words, solve
 	 *	"available physical memory" - round_page(page_range *
 	 *	    sizeof(struct vm_page)) = page_range * PAGE_SIZE 
 	 * for page_range.  
 	 */
 	low_avail = phys_avail[0];
 	high_avail = phys_avail[1];
 	for (i = 0; i < vm_phys_nsegs; i++) {
 		if (vm_phys_segs[i].start < low_avail)
 			low_avail = vm_phys_segs[i].start;
 		if (vm_phys_segs[i].end > high_avail)
 			high_avail = vm_phys_segs[i].end;
 	}
 	/* Skip the first chunk.  It is already accounted for. */
 	for (i = 2; phys_avail[i + 1] != 0; i += 2) {
 		if (phys_avail[i] < low_avail)
 			low_avail = phys_avail[i];
 		if (phys_avail[i + 1] > high_avail)
 			high_avail = phys_avail[i + 1];
 	}
 	first_page = low_avail / PAGE_SIZE;
 #ifdef VM_PHYSSEG_SPARSE
 	size = 0;
 	for (i = 0; i < vm_phys_nsegs; i++)
 		size += vm_phys_segs[i].end - vm_phys_segs[i].start;
 	for (i = 0; phys_avail[i + 1] != 0; i += 2)
 		size += phys_avail[i + 1] - phys_avail[i];
 #elif defined(VM_PHYSSEG_DENSE)
 	size = high_avail - low_avail;
 #else
 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
 #endif
 
 #ifdef PMAP_HAS_PAGE_ARRAY
 	pmap_page_array_startup(size / PAGE_SIZE);
 	biggestone = vm_phys_avail_largest();
 	end = new_end = phys_avail[biggestone + 1];
 #else
 #ifdef VM_PHYSSEG_DENSE
 	/*
 	 * In the VM_PHYSSEG_DENSE case, the number of pages can account for
 	 * the overhead of a page structure per page only if vm_page_array is
 	 * allocated from the last physical memory chunk.  Otherwise, we must
 	 * allocate page structures representing the physical memory
 	 * underlying vm_page_array, even though they will not be used.
 	 */
 	if (new_end != high_avail)
 		page_range = size / PAGE_SIZE;
 	else
 #endif
 	{
 		page_range = size / (PAGE_SIZE + sizeof(struct vm_page));
 
 		/*
 		 * If the partial bytes remaining are large enough for
 		 * a page (PAGE_SIZE) without a corresponding
 		 * 'struct vm_page', then new_end will contain an
 		 * extra page after subtracting the length of the VM
 		 * page array.  Compensate by subtracting an extra
 		 * page from new_end.
 		 */
 		if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) {
 			if (new_end == high_avail)
 				high_avail -= PAGE_SIZE;
 			new_end -= PAGE_SIZE;
 		}
 	}
 	end = new_end;
 	new_end = vm_page_array_alloc(&vaddr, end, page_range);
 #endif
 
 #if VM_NRESERVLEVEL > 0
 	/*
 	 * Allocate physical memory for the reservation management system's
 	 * data structures, and map it.
 	 */
 	new_end = vm_reserv_startup(&vaddr, new_end);
 #endif
 #if MINIDUMP_PAGE_TRACKING && MINIDUMP_STARTUP_PAGE_TRACKING
 	/*
 	 * Include vm_page_array and vm_reserv_array in a crash dump.
 	 */
 	for (pa = new_end; pa < end; pa += PAGE_SIZE)
 		dump_add_page(pa);
 #endif
 	phys_avail[biggestone + 1] = new_end;
 
 	/*
 	 * Add physical memory segments corresponding to the available
 	 * physical pages.
 	 */
 	for (i = 0; phys_avail[i + 1] != 0; i += 2)
 		if (vm_phys_avail_size(i) != 0)
 			vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
 
 	/*
 	 * Initialize the physical memory allocator.
 	 */
 	vm_phys_init();
 
 #ifdef VM_FREEPOOL_LAZYINIT
 	lazyinit = 1;
 	TUNABLE_INT_FETCH("debug.vm.lazy_page_init", &lazyinit);
 #endif
 
 	/*
 	 * Initialize the page structures and add every available page to the
 	 * physical memory allocator's free lists.
 	 */
 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
 	for (ii = 0; ii < vm_page_array_size; ii++) {
 		m = &vm_page_array[ii];
 		vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0,
 		    VM_FREEPOOL_DEFAULT);
 		m->flags = PG_FICTITIOUS;
 	}
 #endif
 	vm_cnt.v_page_count = 0;
 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
 		seg = &vm_phys_segs[segind];
 
 		/*
 		 * If lazy vm_page initialization is not enabled, simply
 		 * initialize all of the pages in the segment.  Otherwise, we
 		 * only initialize:
 		 * 1. Pages not covered by phys_avail[], since they might be
 		 *    freed to the allocator at some future point, e.g., by
 		 *    kmem_bootstrap_free().
 		 * 2. The first page of each run of free pages handed to the
 		 *    vm_phys allocator, which in turn defers initialization
 		 *    of pages until they are needed.
 		 * This avoids blocking the boot process for long periods, which
 		 * may be relevant for VMs (which ought to boot as quickly as
 		 * possible) and/or systems with large amounts of physical
 		 * memory.
 		 */
 #ifdef VM_FREEPOOL_LAZYINIT
 		if (lazyinit) {
 			startp = seg->start;
 			for (i = 0; phys_avail[i + 1] != 0; i += 2) {
 				if (startp >= seg->end)
 					break;
 
 				if (phys_avail[i + 1] < startp)
 					continue;
 				if (phys_avail[i] <= startp) {
 					startp = phys_avail[i + 1];
 					continue;
 				}
 
 				m = vm_phys_seg_paddr_to_vm_page(seg, startp);
 				for (endp = MIN(phys_avail[i], seg->end);
 				    startp < endp; startp += PAGE_SIZE, m++) {
 					vm_page_init_page(m, startp, segind,
 					    VM_FREEPOOL_DEFAULT);
 				}
 			}
 		} else
 #endif
 			for (m = seg->first_page, pa = seg->start;
 			    pa < seg->end; m++, pa += PAGE_SIZE) {
 				vm_page_init_page(m, pa, segind,
 				    VM_FREEPOOL_DEFAULT);
 			}
 
 		/*
 		 * Add the segment's pages that are covered by one of
 		 * phys_avail's ranges to the free lists.
 		 */
 		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
 			if (seg->end <= phys_avail[i] ||
 			    seg->start >= phys_avail[i + 1])
 				continue;
 
 			startp = MAX(seg->start, phys_avail[i]);
 			endp = MIN(seg->end, phys_avail[i + 1]);
 			pagecount = (u_long)atop(endp - startp);
 			if (pagecount == 0)
 				continue;
 
 			m = vm_phys_seg_paddr_to_vm_page(seg, startp);
 #ifdef VM_FREEPOOL_LAZYINIT
 			if (lazyinit) {
 				vm_page_init_page(m, startp, segind,
 				    VM_FREEPOOL_LAZYINIT);
 			}
 #endif
 			vmd = VM_DOMAIN(seg->domain);
 			vm_domain_free_lock(vmd);
 			vm_phys_enqueue_contig(m, pagecount);
 			vm_domain_free_unlock(vmd);
 			vm_domain_freecnt_inc(vmd, pagecount);
 			vm_cnt.v_page_count += (u_int)pagecount;
 			vmd->vmd_page_count += (u_int)pagecount;
 			vmd->vmd_segs |= 1UL << segind;
 		}
 	}
 
 	/*
 	 * Remove blacklisted pages from the physical memory allocator.
 	 */
 	TAILQ_INIT(&blacklist_head);
 	vm_page_blacklist_load(&list, &listend);
 	vm_page_blacklist_check(list, listend);
 
 	list = kern_getenv("vm.blacklist");
 	vm_page_blacklist_check(list, NULL);
 
 	freeenv(list);
 #if VM_NRESERVLEVEL > 0
 	/*
 	 * Initialize the reservation management system.
 	 */
 	vm_reserv_init();
 #endif
 
 	return (vaddr);
 }
 
 void
 vm_page_reference(vm_page_t m)
 {
 
 	vm_page_aflag_set(m, PGA_REFERENCED);
 }
 
 /*
  *	vm_page_trybusy
  *
  *	Helper routine for grab functions to trylock busy.
  *
  *	Returns true on success and false on failure.
  */
 static bool
 vm_page_trybusy(vm_page_t m, int allocflags)
 {
 
 	if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0)
 		return (vm_page_trysbusy(m));
 	else
 		return (vm_page_tryxbusy(m));
 }
 
 /*
  *	vm_page_tryacquire
  *
  *	Helper routine for grab functions to trylock busy and wire.
  *
  *	Returns true on success and false on failure.
  */
 static inline bool
 vm_page_tryacquire(vm_page_t m, int allocflags)
 {
 	bool locked;
 
 	locked = vm_page_trybusy(m, allocflags);
 	if (locked && (allocflags & VM_ALLOC_WIRED) != 0)
 		vm_page_wire(m);
 	return (locked);
 }
 
 /*
  *	vm_page_busy_acquire:
  *
  *	Acquire the busy lock as described by VM_ALLOC_* flags.  Will loop
  *	and drop the object lock if necessary.
  */
 bool
 vm_page_busy_acquire(vm_page_t m, int allocflags)
 {
 	vm_object_t obj;
 	bool locked;
 
 	/*
 	 * The page-specific object must be cached because page
 	 * identity can change during the sleep, causing the
 	 * re-lock of a different object.
 	 * It is assumed that a reference to the object is already
 	 * held by the callers.
 	 */
 	obj = atomic_load_ptr(&m->object);
 	for (;;) {
 		if (vm_page_tryacquire(m, allocflags))
 			return (true);
 		if ((allocflags & VM_ALLOC_NOWAIT) != 0)
 			return (false);
 		if (obj != NULL)
 			locked = VM_OBJECT_WOWNED(obj);
 		else
 			locked = false;
 		MPASS(locked || vm_page_wired(m));
 		if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags,
 		    locked) && locked)
 			VM_OBJECT_WLOCK(obj);
 		if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
 			return (false);
 		KASSERT(m->object == obj || m->object == NULL,
 		    ("vm_page_busy_acquire: page %p does not belong to %p",
 		    m, obj));
 	}
 }
 
 /*
  *	vm_page_busy_downgrade:
  *
  *	Downgrade an exclusive busy page into a single shared busy page.
  */
 void
 vm_page_busy_downgrade(vm_page_t m)
 {
 	u_int x;
 
 	vm_page_assert_xbusied(m);
 
 	x = vm_page_busy_fetch(m);
 	for (;;) {
 		if (atomic_fcmpset_rel_int(&m->busy_lock,
 		    &x, VPB_SHARERS_WORD(1)))
 			break;
 	}
 	if ((x & VPB_BIT_WAITERS) != 0)
 		wakeup(m);
 }
 
 /*
  *
  *	vm_page_busy_tryupgrade:
  *
  *	Attempt to upgrade a single shared busy into an exclusive busy.
  */
 int
 vm_page_busy_tryupgrade(vm_page_t m)
 {
 	u_int ce, x;
 
 	vm_page_assert_sbusied(m);
 
 	x = vm_page_busy_fetch(m);
 	ce = VPB_CURTHREAD_EXCLUSIVE;
 	for (;;) {
 		if (VPB_SHARERS(x) > 1)
 			return (0);
 		KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
 		    ("vm_page_busy_tryupgrade: invalid lock state"));
 		if (!atomic_fcmpset_acq_int(&m->busy_lock, &x,
 		    ce | (x & VPB_BIT_WAITERS)))
 			continue;
 		return (1);
 	}
 }
 
 /*
  *	vm_page_sbusied:
  *
  *	Return a positive value if the page is shared busied, 0 otherwise.
  */
 int
 vm_page_sbusied(vm_page_t m)
 {
 	u_int x;
 
 	x = vm_page_busy_fetch(m);
 	return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
 }
 
 /*
  *	vm_page_sunbusy:
  *
  *	Shared unbusy a page.
  */
 void
 vm_page_sunbusy(vm_page_t m)
 {
 	u_int x;
 
 	vm_page_assert_sbusied(m);
 
 	x = vm_page_busy_fetch(m);
 	for (;;) {
 		KASSERT(x != VPB_FREED,
 		    ("vm_page_sunbusy: Unlocking freed page."));
 		if (VPB_SHARERS(x) > 1) {
 			if (atomic_fcmpset_int(&m->busy_lock, &x,
 			    x - VPB_ONE_SHARER))
 				break;
 			continue;
 		}
 		KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
 		    ("vm_page_sunbusy: invalid lock state"));
 		if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
 			continue;
 		if ((x & VPB_BIT_WAITERS) == 0)
 			break;
 		wakeup(m);
 		break;
 	}
 }
 
 /*
  *	vm_page_busy_sleep:
  *
  *	Sleep if the page is busy, using the page pointer as wchan.
  *	This is used to implement the hard-path of the busying mechanism.
  *
  *	If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function
  *	will not sleep if the page is shared-busy.
  *
  *	The object lock must be held on entry.
  *
  *	Returns true if it slept and dropped the object lock, or false
  *	if there was no sleep and the lock is still held.
  */
 bool
 vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags)
 {
 	vm_object_t obj;
 
 	obj = m->object;
 	VM_OBJECT_ASSERT_LOCKED(obj);
 
 	return (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, allocflags,
 	    true));
 }
 
 /*
  *	vm_page_busy_sleep_unlocked:
  *
  *	Sleep if the page is busy, using the page pointer as wchan.
  *	This is used to implement the hard-path of busying mechanism.
  *
  *	If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function
  *	will not sleep if the page is shared-busy.
  *
  *	The object lock must not be held on entry.  The operation will
  *	return if the page changes identity.
  */
 void
 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
     const char *wmesg, int allocflags)
 {
 	VM_OBJECT_ASSERT_UNLOCKED(obj);
 
 	(void)_vm_page_busy_sleep(obj, m, pindex, wmesg, allocflags, false);
 }
 
 /*
  *	_vm_page_busy_sleep:
  *
  *	Internal busy sleep function.  Verifies the page identity and
  *	lockstate against parameters.  Returns true if it sleeps and
  *	false otherwise.
  *
  *	allocflags uses VM_ALLOC_* flags to specify the lock required.
  *
  *	If locked is true the lock will be dropped for any true returns
  *	and held for any false returns.
  */
 static bool
 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
     const char *wmesg, int allocflags, bool locked)
 {
 	bool xsleep;
 	u_int x;
 
 	/*
 	 * If the object is busy we must wait for that to drain to zero
 	 * before trying the page again.
 	 */
 	if (obj != NULL && vm_object_busied(obj)) {
 		if (locked)
 			VM_OBJECT_DROP(obj);
 		vm_object_busy_wait(obj, wmesg);
 		return (true);
 	}
 
 	if (!vm_page_busied(m))
 		return (false);
 
 	xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0;
 	sleepq_lock(m);
 	x = vm_page_busy_fetch(m);
 	do {
 		/*
 		 * If the page changes objects or becomes unlocked we can
 		 * simply return.
 		 */
 		if (x == VPB_UNBUSIED ||
 		    (xsleep && (x & VPB_BIT_SHARED) != 0) ||
 		    m->object != obj || m->pindex != pindex) {
 			sleepq_release(m);
 			return (false);
 		}
 		if ((x & VPB_BIT_WAITERS) != 0)
 			break;
 	} while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS));
 	if (locked)
 		VM_OBJECT_DROP(obj);
 	DROP_GIANT();
 	sleepq_add(m, NULL, wmesg, 0, 0);
 	sleepq_wait(m, PVM);
 	PICKUP_GIANT();
 	return (true);
 }
 
 /*
  *	vm_page_trysbusy:
  *
  *	Try to shared busy a page.
  *	If the operation succeeds 1 is returned otherwise 0.
  *	The operation never sleeps.
  */
 int
 vm_page_trysbusy(vm_page_t m)
 {
 	vm_object_t obj;
 	u_int x;
 
 	obj = m->object;
 	x = vm_page_busy_fetch(m);
 	for (;;) {
 		if ((x & VPB_BIT_SHARED) == 0)
 			return (0);
 		/*
 		 * Reduce the window for transient busies that will trigger
 		 * false negatives in vm_page_ps_test().
 		 */
 		if (obj != NULL && vm_object_busied(obj))
 			return (0);
 		if (atomic_fcmpset_acq_int(&m->busy_lock, &x,
 		    x + VPB_ONE_SHARER))
 			break;
 	}
 
 	/* Refetch the object now that we're guaranteed that it is stable. */
 	obj = m->object;
 	if (obj != NULL && vm_object_busied(obj)) {
 		vm_page_sunbusy(m);
 		return (0);
 	}
 	return (1);
 }
 
 /*
  *	vm_page_tryxbusy:
  *
  *	Try to exclusive busy a page.
  *	If the operation succeeds 1 is returned otherwise 0.
  *	The operation never sleeps.
  */
 int
 vm_page_tryxbusy(vm_page_t m)
 {
 	vm_object_t obj;
 
         if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED,
             VPB_CURTHREAD_EXCLUSIVE) == 0)
 		return (0);
 
 	obj = m->object;
 	if (obj != NULL && vm_object_busied(obj)) {
 		vm_page_xunbusy(m);
 		return (0);
 	}
 	return (1);
 }
 
 static void
 vm_page_xunbusy_hard_tail(vm_page_t m)
 {
 	atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
 	/* Wake the waiter. */
 	wakeup(m);
 }
 
 /*
  *	vm_page_xunbusy_hard:
  *
  *	Called when unbusy has failed because there is a waiter.
  */
 void
 vm_page_xunbusy_hard(vm_page_t m)
 {
 	vm_page_assert_xbusied(m);
 	vm_page_xunbusy_hard_tail(m);
 }
 
 void
 vm_page_xunbusy_hard_unchecked(vm_page_t m)
 {
 	vm_page_assert_xbusied_unchecked(m);
 	vm_page_xunbusy_hard_tail(m);
 }
 
 static void
 vm_page_busy_free(vm_page_t m)
 {
 	u_int x;
 
 	atomic_thread_fence_rel();
 	x = atomic_swap_int(&m->busy_lock, VPB_FREED);
 	if ((x & VPB_BIT_WAITERS) != 0)
 		wakeup(m);
 }
 
 /*
  *	vm_page_unhold_pages:
  *
  *	Unhold each of the pages that is referenced by the given array.
  */
 void
 vm_page_unhold_pages(vm_page_t *ma, int count)
 {
 
 	for (; count != 0; count--) {
 		vm_page_unwire(*ma, PQ_ACTIVE);
 		ma++;
 	}
 }
 
 vm_page_t
 PHYS_TO_VM_PAGE(vm_paddr_t pa)
 {
 	vm_page_t m;
 
 #ifdef VM_PHYSSEG_SPARSE
 	m = vm_phys_paddr_to_vm_page(pa);
 	if (m == NULL)
 		m = vm_phys_fictitious_to_vm_page(pa);
 	return (m);
 #elif defined(VM_PHYSSEG_DENSE)
 	long pi;
 
 	pi = atop(pa);
 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
 		m = &vm_page_array[pi - first_page];
 		return (m);
 	}
 	return (vm_phys_fictitious_to_vm_page(pa));
 #else
 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
 #endif
 }
 
 /*
  *	vm_page_getfake:
  *
  *	Create a fictitious page with the specified physical address and
  *	memory attribute.  The memory attribute is the only the machine-
  *	dependent aspect of a fictitious page that must be initialized.
  */
 vm_page_t
 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
 {
 	vm_page_t m;
 
 	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
 	vm_page_initfake(m, paddr, memattr);
 	return (m);
 }
 
 void
 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
 {
 
 	if ((m->flags & PG_FICTITIOUS) != 0) {
 		/*
 		 * The page's memattr might have changed since the
 		 * previous initialization.  Update the pmap to the
 		 * new memattr.
 		 */
 		goto memattr;
 	}
 	m->phys_addr = paddr;
 	m->a.queue = PQ_NONE;
 	/* Fictitious pages don't use "segind". */
 	m->flags = PG_FICTITIOUS;
 	/* Fictitious pages don't use "order" or "pool". */
 	m->oflags = VPO_UNMANAGED;
 	m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
 	/* Fictitious pages are unevictable. */
 	m->ref_count = 1;
 	pmap_page_init(m);
 memattr:
 	pmap_page_set_memattr(m, memattr);
 }
 
 /*
  *	vm_page_putfake:
  *
  *	Release a fictitious page.
  */
 void
 vm_page_putfake(vm_page_t m)
 {
 
 	KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
 	    ("vm_page_putfake: bad page %p", m));
 	vm_page_assert_xbusied(m);
 	vm_page_busy_free(m);
 	uma_zfree(fakepg_zone, m);
 }
 
 /*
  *	vm_page_updatefake:
  *
  *	Update the given fictitious page to the specified physical address and
  *	memory attribute.
  */
 void
 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
 {
 
 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
 	    ("vm_page_updatefake: bad page %p", m));
 	m->phys_addr = paddr;
 	pmap_page_set_memattr(m, memattr);
 }
 
 /*
  *	vm_page_free:
  *
  *	Free a page.
  */
 void
 vm_page_free(vm_page_t m)
 {
 
 	m->flags &= ~PG_ZERO;
 	vm_page_free_toq(m);
 }
 
+/*
+ *	vm_page_iter_free:
+ *
+ *	Free the current page, as identified by iterator.
+ */
+void
+vm_page_iter_free(struct pctrie_iter *pages)
+{
+	vm_page_t m;
+
+	m = vm_radix_iter_page(pages);
+	vm_radix_iter_remove(pages);
+	m->flags &= ~PG_ZERO;
+	vm_page_free_toq_impl(m, false);
+}
+
 /*
  *	vm_page_free_zero:
  *
  *	Free a page to the zerod-pages queue
  */
 void
 vm_page_free_zero(vm_page_t m)
 {
 
 	m->flags |= PG_ZERO;
 	vm_page_free_toq(m);
 }
 
 /*
  * Unbusy and handle the page queueing for a page from a getpages request that
  * was optionally read ahead or behind.
  */
 void
 vm_page_readahead_finish(vm_page_t m)
 {
 
 	/* We shouldn't put invalid pages on queues. */
 	KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m));
 
 	/*
 	 * Since the page is not the actually needed one, whether it should
 	 * be activated or deactivated is not obvious.  Empirical results
 	 * have shown that deactivating the page is usually the best choice,
 	 * unless the page is wanted by another thread.
 	 */
 	if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0)
 		vm_page_activate(m);
 	else
 		vm_page_deactivate(m);
 	vm_page_xunbusy_unchecked(m);
 }
 
 /*
  * Destroy the identity of an invalid page and free it if possible.
  * This is intended to be used when reading a page from backing store fails.
  */
 void
 vm_page_free_invalid(vm_page_t m)
 {
 
 	KASSERT(vm_page_none_valid(m), ("page %p is valid", m));
 	KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m));
 	KASSERT(m->object != NULL, ("page %p has no object", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 
 	/*
 	 * We may be attempting to free the page as part of the handling for an
 	 * I/O error, in which case the page was xbusied by a different thread.
 	 */
 	vm_page_xbusy_claim(m);
 
 	/*
 	 * If someone has wired this page while the object lock
 	 * was not held, then the thread that unwires is responsible
 	 * for freeing the page.  Otherwise just free the page now.
 	 * The wire count of this unmapped page cannot change while
 	 * we have the page xbusy and the page's object wlocked.
 	 */
 	if (vm_page_remove(m))
 		vm_page_free(m);
 }
 
 /*
  *	vm_page_dirty_KBI:		[ internal use only ]
  *
  *	Set all bits in the page's dirty field.
  *
  *	The object containing the specified page must be locked if the
  *	call is made from the machine-independent layer.
  *
  *	See vm_page_clear_dirty_mask().
  *
  *	This function should only be called by vm_page_dirty().
  */
 void
 vm_page_dirty_KBI(vm_page_t m)
 {
 
 	/* Refer to this operation by its public name. */
 	KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!"));
 	m->dirty = VM_PAGE_BITS_ALL;
 }
 
 /*
  * Insert the given page into the given object at the given pindex.  mpred is
  * used for memq linkage.  From vm_page_insert, lookup is true, mpred is
  * initially NULL, and this procedure looks it up.  From vm_page_insert_after
  * and vm_page_iter_insert, lookup is false and mpred is known to the caller
  * to be valid, and may be NULL if this will be the page with the lowest
  * pindex.
  *
  * The procedure is marked __always_inline to suggest to the compiler to
  * eliminate the lookup parameter and the associated alternate branch.
  */
 static __always_inline int
 vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
     struct pctrie_iter *pages, bool iter, vm_page_t mpred, bool lookup)
 {
 	int error;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(m->object == NULL,
 	    ("vm_page_insert: page %p already inserted", m));
 
 	/*
 	 * Record the object/offset pair in this page.
 	 */
 	m->object = object;
 	m->pindex = pindex;
 	m->ref_count |= VPRC_OBJREF;
 
 	/*
 	 * Add this page to the object's radix tree, and look up mpred if
 	 * needed.
 	 */
 	if (iter) {
 		KASSERT(!lookup, ("%s: cannot lookup mpred", __func__));
 		error = vm_radix_iter_insert(pages, m);
 	} else if (lookup)
 		error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred);
 	else
 		error = vm_radix_insert(&object->rtree, m);
 	if (__predict_false(error != 0)) {
 		m->object = NULL;
 		m->pindex = 0;
 		m->ref_count &= ~VPRC_OBJREF;
 		return (1);
 	}
 
 	/*
 	 * Now link into the object's ordered list of backed pages.
 	 */
 	vm_page_insert_radixdone(m, object, mpred);
 	vm_pager_page_inserted(object, m);
 	return (0);
 }
 
 /*
  *	vm_page_insert:		[ internal use only ]
  *
  *	Inserts the given mem entry into the object and object list.
  *
  *	The object must be locked.
  */
 int
 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
 {
 	return (vm_page_insert_lookup(m, object, pindex, NULL, false, NULL,
 	    true));
 }
 
 /*
  *	vm_page_insert_after:
  *
  *	Inserts the page "m" into the specified object at offset "pindex".
  *
  *	The page "mpred" must immediately precede the offset "pindex" within
  *	the specified object.
  *
  *	The object must be locked.
  */
 static int
 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
     vm_page_t mpred)
 {
 	return (vm_page_insert_lookup(m, object, pindex, NULL, false, mpred,
 	    false));
 }
 
 /*
  *	vm_page_iter_insert:
  *
  *	Tries to insert the page "m" into the specified object at offset
  *	"pindex" using the iterator "pages".  Returns 0 if the insertion was
  *	successful.
  *
  *	The page "mpred" must immediately precede the offset "pindex" within
  *	the specified object.
  *
  *	The object must be locked.
  */
 static int
 vm_page_iter_insert(struct pctrie_iter *pages, vm_page_t m, vm_object_t object,
     vm_pindex_t pindex, vm_page_t mpred)
 {
 	return (vm_page_insert_lookup(m, object, pindex, pages, true, mpred,
 	    false));
 }
 
 /*
  *	vm_page_insert_radixdone:
  *
  *	Complete page "m" insertion into the specified object after the
  *	radix trie hooking.
  *
  *	The page "mpred" must precede the offset "m->pindex" within the
  *	specified object.
  *
  *	The object must be locked.
  */
 static void
 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(object != NULL && m->object == object,
 	    ("vm_page_insert_radixdone: page %p has inconsistent object", m));
 	KASSERT((m->ref_count & VPRC_OBJREF) != 0,
 	    ("vm_page_insert_radixdone: page %p is missing object ref", m));
 	if (mpred != NULL) {
 		KASSERT(mpred->object == object,
 		    ("vm_page_insert_radixdone: object doesn't contain mpred"));
 		KASSERT(mpred->pindex < m->pindex,
 		    ("vm_page_insert_radixdone: mpred doesn't precede pindex"));
 		KASSERT(TAILQ_NEXT(mpred, listq) == NULL ||
 		    m->pindex < TAILQ_NEXT(mpred, listq)->pindex,
 		    ("vm_page_insert_radixdone: pindex doesn't precede msucc"));
 	} else {
 		KASSERT(TAILQ_EMPTY(&object->memq) ||
 		    m->pindex < TAILQ_FIRST(&object->memq)->pindex,
 		    ("vm_page_insert_radixdone: no mpred but not first page"));
 	}
 
 	if (mpred != NULL)
 		TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
 	else
 		TAILQ_INSERT_HEAD(&object->memq, m, listq);
 
 	/*
 	 * Show that the object has one more resident page.
 	 */
 	object->resident_page_count++;
 
 	/*
 	 * Hold the vnode until the last page is released.
 	 */
 	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
 		vhold(object->handle);
 
 	/*
 	 * Since we are inserting a new and possibly dirty page,
 	 * update the object's generation count.
 	 */
 	if (pmap_page_is_write_mapped(m))
 		vm_object_set_writeable_dirty(object);
 }
 
 /*
- * Do the work to remove a page from its object.  The caller is responsible for
- * updating the page's fields to reflect this removal.
+ *	vm_page_remove_radixdone
+ *
+ *	Complete page "m" removal from the specified object after the radix trie
+ *	unhooking.
+ *
+ *	The caller is responsible for updating the page's fields to reflect this
+ *	removal.
  */
 static void
-vm_page_object_remove(vm_page_t m)
+vm_page_remove_radixdone(vm_page_t m)
 {
 	vm_object_t object;
-	vm_page_t mrem __diagused;
 
 	vm_page_assert_xbusied(m);
 	object = m->object;
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT((m->ref_count & VPRC_OBJREF) != 0,
 	    ("page %p is missing its object ref", m));
 
 	/* Deferred free of swap space. */
 	if ((m->a.flags & PGA_SWAP_FREE) != 0)
 		vm_pager_page_unswapped(m);
 
 	vm_pager_page_removed(object, m);
-
 	m->object = NULL;
-	mrem = vm_radix_remove(&object->rtree, m->pindex);
-	KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m));
 
 	/*
 	 * Now remove from the object's list of backed pages.
 	 */
 	TAILQ_REMOVE(&object->memq, m, listq);
 
 	/*
 	 * And show that the object has one fewer resident page.
 	 */
 	object->resident_page_count--;
 
 	/*
 	 * The vnode may now be recycled.
 	 */
 	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
 		vdrop(object->handle);
 }
 
 /*
  *	vm_page_remove:
  *
  *	Removes the specified page from its containing object, but does not
  *	invalidate any backing storage.  Returns true if the object's reference
  *	was the last reference to the page, and false otherwise.
  *
  *	The object must be locked and the page must be exclusively busied.
  *	The exclusive busy will be released on return.  If this is not the
  *	final ref and the caller does not hold a wire reference it may not
  *	continue to access the page.
  */
 bool
 vm_page_remove(vm_page_t m)
 {
 	bool dropped;
 
 	dropped = vm_page_remove_xbusy(m);
 	vm_page_xunbusy(m);
 
 	return (dropped);
 }
 
+/*
+ *	vm_page_iter_remove:
+ *
+ *	Remove the current page, as identified by iterator, and remove it from the
+ *	radix tree.
+ */
+bool
+vm_page_iter_remove(struct pctrie_iter *pages)
+{
+	vm_page_t m;
+	bool dropped;
+
+	m = vm_radix_iter_page(pages);
+	vm_radix_iter_remove(pages);
+	vm_page_remove_radixdone(m);
+	dropped = (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF);
+	vm_page_xunbusy(m);
+
+	return (dropped);
+}
+
+/*
+ *	vm_page_radix_remove
+ *
+ *	Removes the specified page from the radix tree.
+ */
+static void
+vm_page_radix_remove(vm_page_t m)
+{
+	vm_page_t mrem __diagused;
+
+	mrem = vm_radix_remove(&m->object->rtree, m->pindex);
+	KASSERT(mrem == m,
+	    ("removed page %p, expected page %p", mrem, m));
+}
+
 /*
  *	vm_page_remove_xbusy
  *
  *	Removes the page but leaves the xbusy held.  Returns true if this
  *	removed the final ref and false otherwise.
  */
 bool
 vm_page_remove_xbusy(vm_page_t m)
 {
 
-	vm_page_object_remove(m);
+	vm_page_radix_remove(m);
+	vm_page_remove_radixdone(m);
 	return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF);
 }
 
 /*
  *	vm_page_lookup:
  *
  *	Returns the page associated with the object/offset
  *	pair specified; if none is found, NULL is returned.
  *
  *	The object must be locked.
  */
 vm_page_t
 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
 {
 
 	VM_OBJECT_ASSERT_LOCKED(object);
 	return (vm_radix_lookup(&object->rtree, pindex));
 }
 
 /*
  *	vm_page_iter_init:
  *
  *	Initialize iterator for vm pages.
  */
 void
 vm_page_iter_init(struct pctrie_iter *pages, vm_object_t object)
 {
 
 	vm_radix_iter_init(pages, &object->rtree);
 }
 
 /*
  *	vm_page_iter_init:
  *
  *	Initialize iterator for vm pages.
  */
 void
 vm_page_iter_limit_init(struct pctrie_iter *pages, vm_object_t object,
     vm_pindex_t limit)
 {
 
 	vm_radix_iter_limit_init(pages, &object->rtree, limit);
 }
 
 /*
  *	vm_page_iter_lookup:
  *
  *	Returns the page associated with the object/offset pair specified, and
  *	stores the path to its position; if none is found, NULL is returned.
  *
  *	The iter pctrie must be locked.
  */
 vm_page_t
 vm_page_iter_lookup(struct pctrie_iter *pages, vm_pindex_t pindex)
 {
 
 	return (vm_radix_iter_lookup(pages, pindex));
 }
 
 /*
  *	vm_page_lookup_unlocked:
  *
  *	Returns the page associated with the object/offset pair specified;
  *	if none is found, NULL is returned.  The page may be no longer be
  *	present in the object at the time that this function returns.  Only
  *	useful for opportunistic checks such as inmem().
  */
 vm_page_t
 vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex)
 {
 
 	return (vm_radix_lookup_unlocked(&object->rtree, pindex));
 }
 
 /*
  *	vm_page_relookup:
  *
  *	Returns a page that must already have been busied by
  *	the caller.  Used for bogus page replacement.
  */
 vm_page_t
 vm_page_relookup(vm_object_t object, vm_pindex_t pindex)
 {
 	vm_page_t m;
 
 	m = vm_page_lookup_unlocked(object, pindex);
 	KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) &&
 	    m->object == object && m->pindex == pindex,
 	    ("vm_page_relookup: Invalid page %p", m));
 	return (m);
 }
 
 /*
  * This should only be used by lockless functions for releasing transient
  * incorrect acquires.  The page may have been freed after we acquired a
  * busy lock.  In this case busy_lock == VPB_FREED and we have nothing
  * further to do.
  */
 static void
 vm_page_busy_release(vm_page_t m)
 {
 	u_int x;
 
 	x = vm_page_busy_fetch(m);
 	for (;;) {
 		if (x == VPB_FREED)
 			break;
 		if ((x & VPB_BIT_SHARED) != 0 && VPB_SHARERS(x) > 1) {
 			if (atomic_fcmpset_int(&m->busy_lock, &x,
 			    x - VPB_ONE_SHARER))
 				break;
 			continue;
 		}
 		KASSERT((x & VPB_BIT_SHARED) != 0 ||
 		    (x & ~VPB_BIT_WAITERS) == VPB_CURTHREAD_EXCLUSIVE,
 		    ("vm_page_busy_release: %p xbusy not owned.", m));
 		if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
 			continue;
 		if ((x & VPB_BIT_WAITERS) != 0)
 			wakeup(m);
 		break;
 	}
 }
 
 /*
  *	vm_page_find_least:
  *
  *	Returns the page associated with the object with least pindex
  *	greater than or equal to the parameter pindex, or NULL.
  *
  *	The object must be locked.
  */
 vm_page_t
 vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
 {
 	vm_page_t m;
 
 	VM_OBJECT_ASSERT_LOCKED(object);
 	if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
 		m = vm_radix_lookup_ge(&object->rtree, pindex);
 	return (m);
 }
 
 /*
  *	vm_page_iter_lookup_ge:
  *
  *	Returns the page associated with the object with least pindex
  *	greater than or equal to the parameter pindex, or NULL.  Initializes the
  *	iterator to point to that page.
  *
  *	The iter pctrie must be locked.
  */
 vm_page_t
 vm_page_iter_lookup_ge(struct pctrie_iter *pages, vm_pindex_t pindex)
 {
 
 	return (vm_radix_iter_lookup_ge(pages, pindex));
 }
 
 /*
  * Returns the given page's successor (by pindex) within the object if it is
  * resident; if none is found, NULL is returned.
  *
  * The object must be locked.
  */
 vm_page_t
 vm_page_next(vm_page_t m)
 {
 	vm_page_t next;
 
 	VM_OBJECT_ASSERT_LOCKED(m->object);
 	if ((next = TAILQ_NEXT(m, listq)) != NULL) {
 		MPASS(next->object == m->object);
 		if (next->pindex != m->pindex + 1)
 			next = NULL;
 	}
 	return (next);
 }
 
 /*
  * Returns the given page's predecessor (by pindex) within the object if it is
  * resident; if none is found, NULL is returned.
  *
  * The object must be locked.
  */
 vm_page_t
 vm_page_prev(vm_page_t m)
 {
 	vm_page_t prev;
 
 	VM_OBJECT_ASSERT_LOCKED(m->object);
 	if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) {
 		MPASS(prev->object == m->object);
 		if (prev->pindex != m->pindex - 1)
 			prev = NULL;
 	}
 	return (prev);
 }
 
 /*
  * Uses the page mnew as a replacement for an existing page at index
  * pindex which must be already present in the object.
  *
  * Both pages must be exclusively busied on enter.  The old page is
  * unbusied on exit.
  *
  * A return value of true means mold is now free.  If this is not the
  * final ref and the caller does not hold a wire reference it may not
  * continue to access the page.
  */
 static bool
 vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
     vm_page_t mold)
 {
 	vm_page_t mret __diagused;
 	bool dropped;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	vm_page_assert_xbusied(mold);
 	KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0,
 	    ("vm_page_replace: page %p already in object", mnew));
 
 	/*
 	 * This function mostly follows vm_page_insert() and
 	 * vm_page_remove() without the radix, object count and vnode
 	 * dance.  Double check such functions for more comments.
 	 */
 
 	mnew->object = object;
 	mnew->pindex = pindex;
 	atomic_set_int(&mnew->ref_count, VPRC_OBJREF);
 	mret = vm_radix_replace(&object->rtree, mnew);
 	KASSERT(mret == mold,
 	    ("invalid page replacement, mold=%p, mret=%p", mold, mret));
 	KASSERT((mold->oflags & VPO_UNMANAGED) ==
 	    (mnew->oflags & VPO_UNMANAGED),
 	    ("vm_page_replace: mismatched VPO_UNMANAGED"));
 
 	/* Keep the resident page list in sorted order. */
 	TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq);
 	TAILQ_REMOVE(&object->memq, mold, listq);
 	mold->object = NULL;
 
 	/*
 	 * The object's resident_page_count does not change because we have
 	 * swapped one page for another, but the generation count should
 	 * change if the page is dirty.
 	 */
 	if (pmap_page_is_write_mapped(mnew))
 		vm_object_set_writeable_dirty(object);
 	dropped = vm_page_drop(mold, VPRC_OBJREF) == VPRC_OBJREF;
 	vm_page_xunbusy(mold);
 
 	return (dropped);
 }
 
 void
 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
     vm_page_t mold)
 {
 
 	vm_page_assert_xbusied(mnew);
 
 	if (vm_page_replace_hold(mnew, object, pindex, mold))
 		vm_page_free(mold);
 }
 
 /*
  *	vm_page_rename:
  *
- *	Move the given memory entry from its
- *	current object to the specified target object/offset.
+ *	Move the current page, as identified by iterator, from its current
+ *	object to the specified target object/offset.
  *
  *	Note: swap associated with the page must be invalidated by the move.  We
  *	      have to do this for several reasons:  (1) we aren't freeing the
  *	      page, (2) we are dirtying the page, (3) the VM system is probably
  *	      moving the page from object A to B, and will then later move
  *	      the backing store from A to B and we can't have a conflict.
  *
  *	Note: we *always* dirty the page.  It is necessary both for the
  *	      fact that we moved it, and because we may be invalidating
  *	      swap.
  *
  *	The objects must be locked.
  */
 int
-vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
+vm_page_rename(struct pctrie_iter *pages,
+    vm_object_t new_object, vm_pindex_t new_pindex)
 {
-	vm_page_t mpred;
+	vm_page_t m, mpred;
 	vm_pindex_t opidx;
 
 	VM_OBJECT_ASSERT_WLOCKED(new_object);
 
+	m = vm_radix_iter_page(pages);
 	KASSERT(m->ref_count != 0, ("vm_page_rename: page %p has no refs", m));
 
 	/*
 	 * Create a custom version of vm_page_insert() which does not depend
 	 * by m_prev and can cheat on the implementation aspects of the
 	 * function.
 	 */
 	opidx = m->pindex;
 	m->pindex = new_pindex;
 	if (vm_radix_insert_lookup_lt(&new_object->rtree, m, &mpred) != 0) {
 		m->pindex = opidx;
 		return (1);
 	}
 
 	/*
 	 * The operation cannot fail anymore.  The removal must happen before
 	 * the listq iterator is tainted.
 	 */
 	m->pindex = opidx;
-	vm_page_object_remove(m);
+	vm_radix_iter_remove(pages);
+	vm_page_remove_radixdone(m);
 
 	/* Return back to the new pindex to complete vm_page_insert(). */
 	m->pindex = new_pindex;
 	m->object = new_object;
 
 	vm_page_insert_radixdone(m, new_object, mpred);
 	vm_page_dirty(m);
 	vm_pager_page_inserted(new_object, m);
 	return (0);
 }
 
 /*
  *	vm_page_mpred:
  *
  *	Return the greatest page of the object with index <= pindex,
  *	or NULL, if there is none.  Assumes object lock is held.
  */
 vm_page_t
 vm_page_mpred(vm_object_t object, vm_pindex_t pindex)
 {
 	return (vm_radix_lookup_le(&object->rtree, pindex));
 }
 
 /*
  *	vm_page_alloc:
  *
  *	Allocate and return a page that is associated with the specified
  *	object and offset pair.  By default, this page is exclusive busied.
  *
  *	The caller must always specify an allocation class.
  *
  *	allocation classes:
  *	VM_ALLOC_NORMAL		normal process request
  *	VM_ALLOC_SYSTEM		system *really* needs a page
  *	VM_ALLOC_INTERRUPT	interrupt time request
  *
  *	optional allocation flags:
  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
  *				intends to allocate
  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
  *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
  *	VM_ALLOC_SBUSY		shared busy the allocated page
  *	VM_ALLOC_WIRED		wire the allocated page
  *	VM_ALLOC_ZERO		prefer a zeroed page
  */
 vm_page_t
 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
 {
 
 	return (vm_page_alloc_after(object, pindex, req,
 	    vm_page_mpred(object, pindex)));
 }
 
 /*
  * Allocate a page in the specified object with the given page index.  To
  * optimize insertion of the page into the object, the caller must also specify
  * the resident page in the object with largest index smaller than the given
  * page index, or NULL if no such page exists.
  */
 static vm_page_t
 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
     int req, vm_page_t mpred)
 {
 	struct vm_domainset_iter di;
 	vm_page_t m;
 	int domain;
 
 	vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
 	do {
 		m = vm_page_alloc_domain_after(object, pindex, domain, req,
 		    mpred);
 		if (m != NULL)
 			break;
 	} while (vm_domainset_iter_page(&di, object, &domain) == 0);
 
 	return (m);
 }
 
 /*
  * Returns true if the number of free pages exceeds the minimum
  * for the request class and false otherwise.
  */
 static int
 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages)
 {
 	u_int limit, old, new;
 
 	if (req_class == VM_ALLOC_INTERRUPT)
 		limit = 0;
 	else if (req_class == VM_ALLOC_SYSTEM)
 		limit = vmd->vmd_interrupt_free_min;
 	else
 		limit = vmd->vmd_free_reserved;
 
 	/*
 	 * Attempt to reserve the pages.  Fail if we're below the limit.
 	 */
 	limit += npages;
 	old = atomic_load_int(&vmd->vmd_free_count);
 	do {
 		if (old < limit)
 			return (0);
 		new = old - npages;
 	} while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0);
 
 	/* Wake the page daemon if we've crossed the threshold. */
 	if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
 		pagedaemon_wakeup(vmd->vmd_domain);
 
 	/* Only update bitsets on transitions. */
 	if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
 	    (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
 		vm_domain_set(vmd);
 
 	return (1);
 }
 
 int
 vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
 {
 	int req_class;
 
 	/*
 	 * The page daemon is allowed to dig deeper into the free page list.
 	 */
 	req_class = req & VM_ALLOC_CLASS_MASK;
 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
 		req_class = VM_ALLOC_SYSTEM;
 	return (_vm_domain_allocate(vmd, req_class, npages));
 }
 
 vm_page_t
 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
     int req, vm_page_t mpred)
 {
 	struct vm_domain *vmd;
 	vm_page_t m;
 	int flags;
 
 #define	VPA_FLAGS	(VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL |	\
 			 VM_ALLOC_NOWAIT | VM_ALLOC_NOBUSY |		\
 			 VM_ALLOC_SBUSY | VM_ALLOC_WIRED |		\
 			 VM_ALLOC_NODUMP | VM_ALLOC_ZERO |		\
 			 VM_ALLOC_NOFREE | VM_ALLOC_COUNT_MASK)
 	KASSERT((req & ~VPA_FLAGS) == 0,
 	    ("invalid request %#x", req));
 	KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
 	    ("invalid request %#x", req));
 	KASSERT(mpred == NULL || mpred->pindex < pindex,
 	    ("mpred %p doesn't precede pindex 0x%jx", mpred,
 	    (uintmax_t)pindex));
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	flags = 0;
 	m = NULL;
 	if (!vm_pager_can_alloc_page(object, pindex))
 		return (NULL);
 again:
 	if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) {
 		m = vm_page_alloc_nofree_domain(domain, req);
 		if (m != NULL)
 			goto found;
 	}
 #if VM_NRESERVLEVEL > 0
 	/*
 	 * Can we allocate the page from a reservation?
 	 */
 	if (vm_object_reserv(object) &&
 	    (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) !=
 	    NULL) {
 		goto found;
 	}
 #endif
 	vmd = VM_DOMAIN(domain);
 	if (vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone != NULL) {
 		m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone,
 		    M_NOWAIT | M_NOVM);
 		if (m != NULL) {
 			flags |= PG_PCPU_CACHE;
 			goto found;
 		}
 	}
 	if (vm_domain_allocate(vmd, req, 1)) {
 		/*
 		 * If not, allocate it from the free page queues.
 		 */
 		vm_domain_free_lock(vmd);
 		m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 0);
 		vm_domain_free_unlock(vmd);
 		if (m == NULL) {
 			vm_domain_freecnt_inc(vmd, 1);
 #if VM_NRESERVLEVEL > 0
 			if (vm_reserv_reclaim_inactive(domain))
 				goto again;
 #endif
 		}
 	}
 	if (m == NULL) {
 		/*
 		 * Not allocatable, give up.
 		 */
 		if (vm_domain_alloc_fail(vmd, object, req))
 			goto again;
 		return (NULL);
 	}
 
 	/*
 	 * At this point we had better have found a good page.
 	 */
 found:
 	vm_page_dequeue(m);
 	vm_page_alloc_check(m);
 
 	/*
 	 * Initialize the page.  Only the PG_ZERO flag is inherited.
 	 */
 	flags |= m->flags & PG_ZERO;
 	if ((req & VM_ALLOC_NODUMP) != 0)
 		flags |= PG_NODUMP;
 	if ((req & VM_ALLOC_NOFREE) != 0)
 		flags |= PG_NOFREE;
 	m->flags = flags;
 	m->a.flags = 0;
 	m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0;
 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
 		m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
 	else if ((req & VM_ALLOC_SBUSY) != 0)
 		m->busy_lock = VPB_SHARERS_WORD(1);
 	else
 		m->busy_lock = VPB_UNBUSIED;
 	if (req & VM_ALLOC_WIRED) {
 		vm_wire_add(1);
 		m->ref_count = 1;
 	}
 	m->a.act_count = 0;
 
 	if (vm_page_insert_after(m, object, pindex, mpred)) {
 		if (req & VM_ALLOC_WIRED) {
 			vm_wire_sub(1);
 			m->ref_count = 0;
 		}
 		KASSERT(m->object == NULL, ("page %p has object", m));
 		m->oflags = VPO_UNMANAGED;
 		m->busy_lock = VPB_UNBUSIED;
 		/* Don't change PG_ZERO. */
 		vm_page_free_toq(m);
 		if (req & VM_ALLOC_WAITFAIL) {
 			VM_OBJECT_WUNLOCK(object);
 			vm_radix_wait();
 			VM_OBJECT_WLOCK(object);
 		}
 		return (NULL);
 	}
 
 	/* Ignore device objects; the pager sets "memattr" for them. */
 	if (object->memattr != VM_MEMATTR_DEFAULT &&
 	    (object->flags & OBJ_FICTITIOUS) == 0)
 		pmap_page_set_memattr(m, object->memattr);
 
 	return (m);
 }
 
 /*
  *	vm_page_alloc_contig:
  *
  *	Allocate a contiguous set of physical pages of the given size "npages"
  *	from the free lists.  All of the physical pages must be at or above
  *	the given physical address "low" and below the given physical address
  *	"high".  The given value "alignment" determines the alignment of the
  *	first physical page in the set.  If the given value "boundary" is
  *	non-zero, then the set of physical pages cannot cross any physical
  *	address boundary that is a multiple of that value.  Both "alignment"
  *	and "boundary" must be a power of two.
  *
  *	If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
  *	then the memory attribute setting for the physical pages is configured
  *	to the object's memory attribute setting.  Otherwise, the memory
  *	attribute setting for the physical pages is configured to "memattr",
  *	overriding the object's memory attribute setting.  However, if the
  *	object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
  *	memory attribute setting for the physical pages cannot be configured
  *	to VM_MEMATTR_DEFAULT.
  *
  *	The specified object may not contain fictitious pages.
  *
  *	The caller must always specify an allocation class.
  *
  *	allocation classes:
  *	VM_ALLOC_NORMAL		normal process request
  *	VM_ALLOC_SYSTEM		system *really* needs a page
  *	VM_ALLOC_INTERRUPT	interrupt time request
  *
  *	optional allocation flags:
  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
  *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
  *	VM_ALLOC_SBUSY		shared busy the allocated page
  *	VM_ALLOC_WIRED		wire the allocated page
  *	VM_ALLOC_ZERO		prefer a zeroed page
  */
 vm_page_t
 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
     vm_paddr_t boundary, vm_memattr_t memattr)
 {
 	struct vm_domainset_iter di;
 	vm_page_t bounds[2];
 	vm_page_t m;
 	int domain;
 	int start_segind;
 
 	start_segind = -1;
 
 	vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
 	do {
 		m = vm_page_alloc_contig_domain(object, pindex, domain, req,
 		    npages, low, high, alignment, boundary, memattr);
 		if (m != NULL)
 			break;
 		if (start_segind == -1)
 			start_segind = vm_phys_lookup_segind(low);
 		if (vm_phys_find_range(bounds, start_segind, domain,
 		    npages, low, high) == -1) {
 			vm_domainset_iter_ignore(&di, domain);
 		}
 	} while (vm_domainset_iter_page(&di, object, &domain) == 0);
 
 	return (m);
 }
 
 static vm_page_t
 vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low,
     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
 {
 	struct vm_domain *vmd;
 	vm_page_t m_ret;
 
 	/*
 	 * Can we allocate the pages without the number of free pages falling
 	 * below the lower bound for the allocation class?
 	 */
 	vmd = VM_DOMAIN(domain);
 	if (!vm_domain_allocate(vmd, req, npages))
 		return (NULL);
 	/*
 	 * Try to allocate the pages from the free page queues.
 	 */
 	vm_domain_free_lock(vmd);
 	m_ret = vm_phys_alloc_contig(domain, npages, low, high,
 	    alignment, boundary);
 	vm_domain_free_unlock(vmd);
 	if (m_ret != NULL)
 		return (m_ret);
 #if VM_NRESERVLEVEL > 0
 	/*
 	 * Try to break a reservation to allocate the pages.
 	 */
 	if ((req & VM_ALLOC_NORECLAIM) == 0) {
 		m_ret = vm_reserv_reclaim_contig(domain, npages, low,
 	            high, alignment, boundary);
 		if (m_ret != NULL)
 			return (m_ret);
 	}
 #endif
 	vm_domain_freecnt_inc(vmd, npages);
 	return (NULL);
 }
 
 vm_page_t
 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
     int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
     vm_paddr_t boundary, vm_memattr_t memattr)
 {
 	struct pctrie_iter pages;
 	vm_page_t m, m_ret, mpred;
 	u_int busy_lock, flags, oflags;
 
 #define	VPAC_FLAGS	(VPA_FLAGS | VM_ALLOC_NORECLAIM)
 	KASSERT((req & ~VPAC_FLAGS) == 0,
 	    ("invalid request %#x", req));
 	KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
 	    ("invalid request %#x", req));
 	KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) !=
 	    (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM),
 	    ("invalid request %#x", req));
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
 	    ("vm_page_alloc_contig: object %p has fictitious pages",
 	    object));
 	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
 
 	vm_page_iter_init(&pages, object);
 	mpred = vm_radix_iter_lookup_le(&pages, pindex);
 	KASSERT(mpred == NULL || mpred->pindex != pindex,
 	    ("vm_page_alloc_contig: pindex already allocated"));
 	for (;;) {
 #if VM_NRESERVLEVEL > 0
 		/*
 		 * Can we allocate the pages from a reservation?
 		 */
 		if (vm_object_reserv(object) &&
 		    (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req,
 		    mpred, npages, low, high, alignment, boundary)) != NULL) {
 			break;
 		}
 #endif
 		if ((m_ret = vm_page_find_contig_domain(domain, req, npages,
 		    low, high, alignment, boundary)) != NULL)
 			break;
 		if (!vm_domain_alloc_fail(VM_DOMAIN(domain), object, req))
 			return (NULL);
 	}
 
 	/*
 	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
 	 */
 	flags = PG_ZERO;
 	if ((req & VM_ALLOC_NODUMP) != 0)
 		flags |= PG_NODUMP;
 	oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0;
 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
 		busy_lock = VPB_CURTHREAD_EXCLUSIVE;
 	else if ((req & VM_ALLOC_SBUSY) != 0)
 		busy_lock = VPB_SHARERS_WORD(1);
 	else
 		busy_lock = VPB_UNBUSIED;
 	if ((req & VM_ALLOC_WIRED) != 0)
 		vm_wire_add(npages);
 	if (object->memattr != VM_MEMATTR_DEFAULT &&
 	    memattr == VM_MEMATTR_DEFAULT)
 		memattr = object->memattr;
 	for (m = m_ret; m < &m_ret[npages]; m++) {
 		vm_page_dequeue(m);
 		vm_page_alloc_check(m);
 		m->a.flags = 0;
 		m->flags = (m->flags | PG_NODUMP) & flags;
 		m->busy_lock = busy_lock;
 		if ((req & VM_ALLOC_WIRED) != 0)
 			m->ref_count = 1;
 		m->a.act_count = 0;
 		m->oflags = oflags;
 		if (vm_page_iter_insert(&pages, m, object, pindex, mpred)) {
 			if ((req & VM_ALLOC_WIRED) != 0)
 				vm_wire_sub(npages);
 			KASSERT(m->object == NULL,
 			    ("page %p has object", m));
 			mpred = m;
 			for (m = m_ret; m < &m_ret[npages]; m++) {
 				if (m <= mpred &&
 				    (req & VM_ALLOC_WIRED) != 0)
 					m->ref_count = 0;
 				m->oflags = VPO_UNMANAGED;
 				m->busy_lock = VPB_UNBUSIED;
 				/* Don't change PG_ZERO. */
 				vm_page_free_toq(m);
 			}
 			if (req & VM_ALLOC_WAITFAIL) {
 				VM_OBJECT_WUNLOCK(object);
 				vm_radix_wait();
 				VM_OBJECT_WLOCK(object);
 			}
 			return (NULL);
 		}
 		mpred = m;
 		if (memattr != VM_MEMATTR_DEFAULT)
 			pmap_page_set_memattr(m, memattr);
 		pindex++;
 	}
 	return (m_ret);
 }
 
 /*
  * Allocate a physical page that is not intended to be inserted into a VM
  * object.
  */
 vm_page_t
 vm_page_alloc_noobj_domain(int domain, int req)
 {
 	struct vm_domain *vmd;
 	vm_page_t m;
 	int flags;
 
 #define	VPAN_FLAGS	(VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL |      \
 			 VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK |		\
 			 VM_ALLOC_NOBUSY | VM_ALLOC_WIRED |		\
 			 VM_ALLOC_NODUMP | VM_ALLOC_ZERO |		\
 			 VM_ALLOC_NOFREE | VM_ALLOC_COUNT_MASK)
 	KASSERT((req & ~VPAN_FLAGS) == 0,
 	    ("invalid request %#x", req));
 
 	flags = ((req & VM_ALLOC_NODUMP) != 0 ? PG_NODUMP : 0) |
 	    ((req & VM_ALLOC_NOFREE) != 0 ? PG_NOFREE : 0);
 	vmd = VM_DOMAIN(domain);
 again:
 	if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) {
 		m = vm_page_alloc_nofree_domain(domain, req);
 		if (m != NULL)
 			goto found;
 	}
 
 	if (vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) {
 		m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone,
 		    M_NOWAIT | M_NOVM);
 		if (m != NULL) {
 			flags |= PG_PCPU_CACHE;
 			goto found;
 		}
 	}
 
 	if (vm_domain_allocate(vmd, req, 1)) {
 		vm_domain_free_lock(vmd);
 		m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0);
 		vm_domain_free_unlock(vmd);
 		if (m == NULL) {
 			vm_domain_freecnt_inc(vmd, 1);
 #if VM_NRESERVLEVEL > 0
 			if (vm_reserv_reclaim_inactive(domain))
 				goto again;
 #endif
 		}
 	}
 	if (m == NULL) {
 		if (vm_domain_alloc_fail(vmd, NULL, req))
 			goto again;
 		return (NULL);
 	}
 
 found:
 	vm_page_dequeue(m);
 	vm_page_alloc_check(m);
 
 	/*
 	 * Consumers should not rely on a useful default pindex value.
 	 */
 	m->pindex = 0xdeadc0dedeadc0de;
 	m->flags = (m->flags & PG_ZERO) | flags;
 	m->a.flags = 0;
 	m->oflags = VPO_UNMANAGED;
 	m->busy_lock = VPB_UNBUSIED;
 	if ((req & VM_ALLOC_WIRED) != 0) {
 		vm_wire_add(1);
 		m->ref_count = 1;
 	}
 
 	if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
 		pmap_zero_page(m);
 
 	return (m);
 }
 
 #if VM_NRESERVLEVEL > 1
 #define	VM_NOFREE_IMPORT_ORDER	(VM_LEVEL_1_ORDER + VM_LEVEL_0_ORDER)
 #elif VM_NRESERVLEVEL > 0
 #define	VM_NOFREE_IMPORT_ORDER	VM_LEVEL_0_ORDER
 #else
 #define	VM_NOFREE_IMPORT_ORDER	8
 #endif
 
 /*
  * Allocate a single NOFREE page.
  *
  * This routine hands out NOFREE pages from higher-order
  * physical memory blocks in order to reduce memory fragmentation.
  * When a NOFREE for a given domain chunk is used up,
  * the routine will try to fetch a new one from the freelists
  * and discard the old one.
  */
 static vm_page_t
 vm_page_alloc_nofree_domain(int domain, int req)
 {
 	vm_page_t m;
 	struct vm_domain *vmd;
 	struct vm_nofreeq *nqp;
 
 	KASSERT((req & VM_ALLOC_NOFREE) != 0, ("invalid request %#x", req));
 
 	vmd = VM_DOMAIN(domain);
 	nqp = &vmd->vmd_nofreeq;
 	vm_domain_free_lock(vmd);
 	if (nqp->offs >= (1 << VM_NOFREE_IMPORT_ORDER) || nqp->ma == NULL) {
 		if (!vm_domain_allocate(vmd, req,
 		    1 << VM_NOFREE_IMPORT_ORDER)) {
 			vm_domain_free_unlock(vmd);
 			return (NULL);
 		}
 		nqp->ma = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
 		    VM_NOFREE_IMPORT_ORDER);
 		if (nqp->ma == NULL) {
 			vm_domain_freecnt_inc(vmd, 1 << VM_NOFREE_IMPORT_ORDER);
 			vm_domain_free_unlock(vmd);
 			return (NULL);
 		}
 		nqp->offs = 0;
 	}
 	m = &nqp->ma[nqp->offs++];
 	vm_domain_free_unlock(vmd);
 	VM_CNT_ADD(v_nofree_count, 1);
 
 	return (m);
 }
 
 vm_page_t
 vm_page_alloc_noobj(int req)
 {
 	struct vm_domainset_iter di;
 	vm_page_t m;
 	int domain;
 
 	vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
 	do {
 		m = vm_page_alloc_noobj_domain(domain, req);
 		if (m != NULL)
 			break;
 	} while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
 
 	return (m);
 }
 
 vm_page_t
 vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     vm_memattr_t memattr)
 {
 	struct vm_domainset_iter di;
 	vm_page_t m;
 	int domain;
 
 	vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
 	do {
 		m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low,
 		    high, alignment, boundary, memattr);
 		if (m != NULL)
 			break;
 	} while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
 
 	return (m);
 }
 
 vm_page_t
 vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     vm_memattr_t memattr)
 {
 	vm_page_t m, m_ret;
 	u_int flags;
 
 #define	VPANC_FLAGS	(VPAN_FLAGS | VM_ALLOC_NORECLAIM)
 	KASSERT((req & ~VPANC_FLAGS) == 0,
 	    ("invalid request %#x", req));
 	KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) !=
 	    (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM),
 	    ("invalid request %#x", req));
 	KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
 	    ("invalid request %#x", req));
 	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
 
 	while ((m_ret = vm_page_find_contig_domain(domain, req, npages,
 	    low, high, alignment, boundary)) == NULL) {
 		if (!vm_domain_alloc_fail(VM_DOMAIN(domain), NULL, req))
 			return (NULL);
 	}
 
 	/*
 	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
 	 */
 	flags = PG_ZERO;
 	if ((req & VM_ALLOC_NODUMP) != 0)
 		flags |= PG_NODUMP;
 	if ((req & VM_ALLOC_WIRED) != 0)
 		vm_wire_add(npages);
 	for (m = m_ret; m < &m_ret[npages]; m++) {
 		vm_page_dequeue(m);
 		vm_page_alloc_check(m);
 
 		/*
 		 * Consumers should not rely on a useful default pindex value.
 		 */
 		m->pindex = 0xdeadc0dedeadc0de;
 		m->a.flags = 0;
 		m->flags = (m->flags | PG_NODUMP) & flags;
 		m->busy_lock = VPB_UNBUSIED;
 		if ((req & VM_ALLOC_WIRED) != 0)
 			m->ref_count = 1;
 		m->a.act_count = 0;
 		m->oflags = VPO_UNMANAGED;
 
 		/*
 		 * Zero the page before updating any mappings since the page is
 		 * not yet shared with any devices which might require the
 		 * non-default memory attribute.  pmap_page_set_memattr()
 		 * flushes data caches before returning.
 		 */
 		if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
 		if (memattr != VM_MEMATTR_DEFAULT)
 			pmap_page_set_memattr(m, memattr);
 	}
 	return (m_ret);
 }
 
 /*
  * Check a page that has been freshly dequeued from a freelist.
  */
 static void
 vm_page_alloc_check(vm_page_t m)
 {
 
 	KASSERT(m->object == NULL, ("page %p has object", m));
 	KASSERT(m->a.queue == PQ_NONE &&
 	    (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
 	    ("page %p has unexpected queue %d, flags %#x",
 	    m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK)));
 	KASSERT(m->ref_count == 0, ("page %p has references", m));
 	KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m));
 	KASSERT(m->dirty == 0, ("page %p is dirty", m));
 	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
 	    ("page %p has unexpected memattr %d",
 	    m, pmap_page_get_memattr(m)));
 	KASSERT(vm_page_none_valid(m), ("free page %p is valid", m));
 	pmap_vm_page_alloc_check(m);
 }
 
 static int
 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags)
 {
 	struct vm_domain *vmd;
 	struct vm_pgcache *pgcache;
 	int i;
 
 	pgcache = arg;
 	vmd = VM_DOMAIN(pgcache->domain);
 
 	/*
 	 * The page daemon should avoid creating extra memory pressure since its
 	 * main purpose is to replenish the store of free pages.
 	 */
 	if (vmd->vmd_severeset || curproc == pageproc ||
 	    !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
 		return (0);
 	domain = vmd->vmd_domain;
 	vm_domain_free_lock(vmd);
 	i = vm_phys_alloc_npages(domain, pgcache->pool, cnt,
 	    (vm_page_t *)store);
 	vm_domain_free_unlock(vmd);
 	if (cnt != i)
 		vm_domain_freecnt_inc(vmd, cnt - i);
 
 	return (i);
 }
 
 static void
 vm_page_zone_release(void *arg, void **store, int cnt)
 {
 	struct vm_domain *vmd;
 	struct vm_pgcache *pgcache;
 	vm_page_t m;
 	int i;
 
 	pgcache = arg;
 	vmd = VM_DOMAIN(pgcache->domain);
 	vm_domain_free_lock(vmd);
 	for (i = 0; i < cnt; i++) {
 		m = (vm_page_t)store[i];
 		vm_phys_free_pages(m, 0);
 	}
 	vm_domain_free_unlock(vmd);
 	vm_domain_freecnt_inc(vmd, cnt);
 }
 
 #define	VPSC_ANY	0	/* No restrictions. */
 #define	VPSC_NORESERV	1	/* Skip reservations; implies VPSC_NOSUPER. */
 #define	VPSC_NOSUPER	2	/* Skip superpages. */
 
 /*
  *	vm_page_scan_contig:
  *
  *	Scan vm_page_array[] between the specified entries "m_start" and
  *	"m_end" for a run of contiguous physical pages that satisfy the
  *	specified conditions, and return the lowest page in the run.  The
  *	specified "alignment" determines the alignment of the lowest physical
  *	page in the run.  If the specified "boundary" is non-zero, then the
  *	run of physical pages cannot span a physical address that is a
  *	multiple of "boundary".
  *
  *	"m_end" is never dereferenced, so it need not point to a vm_page
  *	structure within vm_page_array[].
  *
  *	"npages" must be greater than zero.  "m_start" and "m_end" must not
  *	span a hole (or discontiguity) in the physical address space.  Both
  *	"alignment" and "boundary" must be a power of two.
  */
 static vm_page_t
 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
     u_long alignment, vm_paddr_t boundary, int options)
 {
 	vm_object_t object;
 	vm_paddr_t pa;
 	vm_page_t m, m_run;
 #if VM_NRESERVLEVEL > 0
 	int level;
 #endif
 	int m_inc, order, run_ext, run_len;
 
 	KASSERT(npages > 0, ("npages is 0"));
 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
 	m_run = NULL;
 	run_len = 0;
 	for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
 		KASSERT((m->flags & PG_MARKER) == 0,
 		    ("page %p is PG_MARKER", m));
 		KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1,
 		    ("fictitious page %p has invalid ref count", m));
 
 		/*
 		 * If the current page would be the start of a run, check its
 		 * physical address against the end, alignment, and boundary
 		 * conditions.  If it doesn't satisfy these conditions, either
 		 * terminate the scan or advance to the next page that
 		 * satisfies the failed condition.
 		 */
 		if (run_len == 0) {
 			KASSERT(m_run == NULL, ("m_run != NULL"));
 			if (m + npages > m_end)
 				break;
 			pa = VM_PAGE_TO_PHYS(m);
 			if (!vm_addr_align_ok(pa, alignment)) {
 				m_inc = atop(roundup2(pa, alignment) - pa);
 				continue;
 			}
 			if (!vm_addr_bound_ok(pa, ptoa(npages), boundary)) {
 				m_inc = atop(roundup2(pa, boundary) - pa);
 				continue;
 			}
 		} else
 			KASSERT(m_run != NULL, ("m_run == NULL"));
 
 retry:
 		m_inc = 1;
 		if (vm_page_wired(m))
 			run_ext = 0;
 #if VM_NRESERVLEVEL > 0
 		else if ((level = vm_reserv_level(m)) >= 0 &&
 		    (options & VPSC_NORESERV) != 0) {
 			run_ext = 0;
 			/* Advance to the end of the reservation. */
 			pa = VM_PAGE_TO_PHYS(m);
 			m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) -
 			    pa);
 		}
 #endif
 		else if ((object = atomic_load_ptr(&m->object)) != NULL) {
 			/*
 			 * The page is considered eligible for relocation if
 			 * and only if it could be laundered or reclaimed by
 			 * the page daemon.
 			 */
 			VM_OBJECT_RLOCK(object);
 			if (object != m->object) {
 				VM_OBJECT_RUNLOCK(object);
 				goto retry;
 			}
 			/* Don't care: PG_NODUMP, PG_ZERO. */
 			if ((object->flags & OBJ_SWAP) == 0 &&
 			    object->type != OBJT_VNODE) {
 				run_ext = 0;
 #if VM_NRESERVLEVEL > 0
 			} else if ((options & VPSC_NOSUPER) != 0 &&
 			    (level = vm_reserv_level_iffullpop(m)) >= 0) {
 				run_ext = 0;
 				/* Advance to the end of the superpage. */
 				pa = VM_PAGE_TO_PHYS(m);
 				m_inc = atop(roundup2(pa + 1,
 				    vm_reserv_size(level)) - pa);
 #endif
 			} else if (object->memattr == VM_MEMATTR_DEFAULT &&
 			    vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) {
 				/*
 				 * The page is allocated but eligible for
 				 * relocation.  Extend the current run by one
 				 * page.
 				 */
 				KASSERT(pmap_page_get_memattr(m) ==
 				    VM_MEMATTR_DEFAULT,
 				    ("page %p has an unexpected memattr", m));
 				KASSERT((m->oflags & (VPO_SWAPINPROG |
 				    VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
 				    ("page %p has unexpected oflags", m));
 				/* Don't care: PGA_NOSYNC. */
 				run_ext = 1;
 			} else
 				run_ext = 0;
 			VM_OBJECT_RUNLOCK(object);
 #if VM_NRESERVLEVEL > 0
 		} else if (level >= 0) {
 			/*
 			 * The page is reserved but not yet allocated.  In
 			 * other words, it is still free.  Extend the current
 			 * run by one page.
 			 */
 			run_ext = 1;
 #endif
 		} else if ((order = m->order) < VM_NFREEORDER) {
 			/*
 			 * The page is enqueued in the physical memory
 			 * allocator's free page queues.  Moreover, it is the
 			 * first page in a power-of-two-sized run of
 			 * contiguous free pages.  Add these pages to the end
 			 * of the current run, and jump ahead.
 			 */
 			run_ext = 1 << order;
 			m_inc = 1 << order;
 		} else {
 			/*
 			 * Skip the page for one of the following reasons: (1)
 			 * It is enqueued in the physical memory allocator's
 			 * free page queues.  However, it is not the first
 			 * page in a run of contiguous free pages.  (This case
 			 * rarely occurs because the scan is performed in
 			 * ascending order.) (2) It is not reserved, and it is
 			 * transitioning from free to allocated.  (Conversely,
 			 * the transition from allocated to free for managed
 			 * pages is blocked by the page busy lock.) (3) It is
 			 * allocated but not contained by an object and not
 			 * wired, e.g., allocated by Xen's balloon driver.
 			 */
 			run_ext = 0;
 		}
 
 		/*
 		 * Extend or reset the current run of pages.
 		 */
 		if (run_ext > 0) {
 			if (run_len == 0)
 				m_run = m;
 			run_len += run_ext;
 		} else {
 			if (run_len > 0) {
 				m_run = NULL;
 				run_len = 0;
 			}
 		}
 	}
 	if (run_len >= npages)
 		return (m_run);
 	return (NULL);
 }
 
 /*
  *	vm_page_reclaim_run:
  *
  *	Try to relocate each of the allocated virtual pages within the
  *	specified run of physical pages to a new physical address.  Free the
  *	physical pages underlying the relocated virtual pages.  A virtual page
  *	is relocatable if and only if it could be laundered or reclaimed by
  *	the page daemon.  Whenever possible, a virtual page is relocated to a
  *	physical address above "high".
  *
  *	Returns 0 if every physical page within the run was already free or
  *	just freed by a successful relocation.  Otherwise, returns a non-zero
  *	value indicating why the last attempt to relocate a virtual page was
  *	unsuccessful.
  *
  *	"req_class" must be an allocation class.
  */
 static int
 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
     vm_paddr_t high)
 {
 	struct vm_domain *vmd;
 	struct spglist free;
 	vm_object_t object;
 	vm_paddr_t pa;
 	vm_page_t m, m_end, m_new;
 	int error, order, req;
 
 	KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class,
 	    ("req_class is not an allocation class"));
 	SLIST_INIT(&free);
 	error = 0;
 	m = m_run;
 	m_end = m_run + npages;
 	for (; error == 0 && m < m_end; m++) {
 		KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
 		    ("page %p is PG_FICTITIOUS or PG_MARKER", m));
 
 		/*
 		 * Racily check for wirings.  Races are handled once the object
 		 * lock is held and the page is unmapped.
 		 */
 		if (vm_page_wired(m))
 			error = EBUSY;
 		else if ((object = atomic_load_ptr(&m->object)) != NULL) {
 			/*
 			 * The page is relocated if and only if it could be
 			 * laundered or reclaimed by the page daemon.
 			 */
 			VM_OBJECT_WLOCK(object);
 			/* Don't care: PG_NODUMP, PG_ZERO. */
 			if (m->object != object ||
 			    ((object->flags & OBJ_SWAP) == 0 &&
 			    object->type != OBJT_VNODE))
 				error = EINVAL;
 			else if (object->memattr != VM_MEMATTR_DEFAULT)
 				error = EINVAL;
 			else if (vm_page_queue(m) != PQ_NONE &&
 			    vm_page_tryxbusy(m) != 0) {
 				if (vm_page_wired(m)) {
 					vm_page_xunbusy(m);
 					error = EBUSY;
 					goto unlock;
 				}
 				KASSERT(pmap_page_get_memattr(m) ==
 				    VM_MEMATTR_DEFAULT,
 				    ("page %p has an unexpected memattr", m));
 				KASSERT(m->oflags == 0,
 				    ("page %p has unexpected oflags", m));
 				/* Don't care: PGA_NOSYNC. */
 				if (!vm_page_none_valid(m)) {
 					/*
 					 * First, try to allocate a new page
 					 * that is above "high".  Failing
 					 * that, try to allocate a new page
 					 * that is below "m_run".  Allocate
 					 * the new page between the end of
 					 * "m_run" and "high" only as a last
 					 * resort.
 					 */
 					req = req_class;
 					if ((m->flags & PG_NODUMP) != 0)
 						req |= VM_ALLOC_NODUMP;
 					if (trunc_page(high) !=
 					    ~(vm_paddr_t)PAGE_MASK) {
 						m_new =
 						    vm_page_alloc_noobj_contig(
 						    req, 1, round_page(high),
 						    ~(vm_paddr_t)0, PAGE_SIZE,
 						    0, VM_MEMATTR_DEFAULT);
 					} else
 						m_new = NULL;
 					if (m_new == NULL) {
 						pa = VM_PAGE_TO_PHYS(m_run);
 						m_new =
 						    vm_page_alloc_noobj_contig(
 						    req, 1, 0, pa - 1,
 						    PAGE_SIZE, 0,
 						    VM_MEMATTR_DEFAULT);
 					}
 					if (m_new == NULL) {
 						pa += ptoa(npages);
 						m_new =
 						    vm_page_alloc_noobj_contig(
 						    req, 1, pa, high, PAGE_SIZE,
 						    0, VM_MEMATTR_DEFAULT);
 					}
 					if (m_new == NULL) {
 						vm_page_xunbusy(m);
 						error = ENOMEM;
 						goto unlock;
 					}
 
 					/*
 					 * Unmap the page and check for new
 					 * wirings that may have been acquired
 					 * through a pmap lookup.
 					 */
 					if (object->ref_count != 0 &&
 					    !vm_page_try_remove_all(m)) {
 						vm_page_xunbusy(m);
 						vm_page_free(m_new);
 						error = EBUSY;
 						goto unlock;
 					}
 
 					/*
 					 * Replace "m" with the new page.  For
 					 * vm_page_replace(), "m" must be busy
 					 * and dequeued.  Finally, change "m"
 					 * as if vm_page_free() was called.
 					 */
 					m_new->a.flags = m->a.flags &
 					    ~PGA_QUEUE_STATE_MASK;
 					KASSERT(m_new->oflags == VPO_UNMANAGED,
 					    ("page %p is managed", m_new));
 					m_new->oflags = 0;
 					pmap_copy_page(m, m_new);
 					m_new->valid = m->valid;
 					m_new->dirty = m->dirty;
 					m->flags &= ~PG_ZERO;
 					vm_page_dequeue(m);
 					if (vm_page_replace_hold(m_new, object,
 					    m->pindex, m) &&
-					    vm_page_free_prep(m))
+					    vm_page_free_prep(m, true))
 						SLIST_INSERT_HEAD(&free, m,
 						    plinks.s.ss);
 
 					/*
 					 * The new page must be deactivated
 					 * before the object is unlocked.
 					 */
 					vm_page_deactivate(m_new);
 				} else {
 					m->flags &= ~PG_ZERO;
 					vm_page_dequeue(m);
-					if (vm_page_free_prep(m))
+					if (vm_page_free_prep(m, true))
 						SLIST_INSERT_HEAD(&free, m,
 						    plinks.s.ss);
 					KASSERT(m->dirty == 0,
 					    ("page %p is dirty", m));
 				}
 			} else
 				error = EBUSY;
 unlock:
 			VM_OBJECT_WUNLOCK(object);
 		} else {
 			MPASS(vm_page_domain(m) == domain);
 			vmd = VM_DOMAIN(domain);
 			vm_domain_free_lock(vmd);
 			order = m->order;
 			if (order < VM_NFREEORDER) {
 				/*
 				 * The page is enqueued in the physical memory
 				 * allocator's free page queues.  Moreover, it
 				 * is the first page in a power-of-two-sized
 				 * run of contiguous free pages.  Jump ahead
 				 * to the last page within that run, and
 				 * continue from there.
 				 */
 				m += (1 << order) - 1;
 			}
 #if VM_NRESERVLEVEL > 0
 			else if (vm_reserv_is_page_free(m))
 				order = 0;
 #endif
 			vm_domain_free_unlock(vmd);
 			if (order == VM_NFREEORDER)
 				error = EINVAL;
 		}
 	}
 	if ((m = SLIST_FIRST(&free)) != NULL) {
 		int cnt;
 
 		vmd = VM_DOMAIN(domain);
 		cnt = 0;
 		vm_domain_free_lock(vmd);
 		do {
 			MPASS(vm_page_domain(m) == domain);
 			SLIST_REMOVE_HEAD(&free, plinks.s.ss);
 			vm_phys_free_pages(m, 0);
 			cnt++;
 		} while ((m = SLIST_FIRST(&free)) != NULL);
 		vm_domain_free_unlock(vmd);
 		vm_domain_freecnt_inc(vmd, cnt);
 	}
 	return (error);
 }
 
 #define	NRUNS	16
 
 #define	RUN_INDEX(count, nruns)	((count) % (nruns))
 
 #define	MIN_RECLAIM	8
 
 /*
  *	vm_page_reclaim_contig:
  *
  *	Reclaim allocated, contiguous physical memory satisfying the specified
  *	conditions by relocating the virtual pages using that physical memory.
  *	Returns 0 if reclamation is successful, ERANGE if the specified domain
  *	can't possibly satisfy the reclamation request, or ENOMEM if not
  *	currently able to reclaim the requested number of pages.  Since
  *	relocation requires the allocation of physical pages, reclamation may
  *	fail with ENOMEM due to a shortage of free pages.  When reclamation
  *	fails in this manner, callers are expected to perform vm_wait() before
  *	retrying a failed allocation operation, e.g., vm_page_alloc_contig().
  *
  *	The caller must always specify an allocation class through "req".
  *
  *	allocation classes:
  *	VM_ALLOC_NORMAL		normal process request
  *	VM_ALLOC_SYSTEM		system *really* needs a page
  *	VM_ALLOC_INTERRUPT	interrupt time request
  *
  *	The optional allocation flags are ignored.
  *
  *	"npages" must be greater than zero.  Both "alignment" and "boundary"
  *	must be a power of two.
  */
 int
 vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages,
     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     int desired_runs)
 {
 	struct vm_domain *vmd;
 	vm_page_t bounds[2], m_run, _m_runs[NRUNS], *m_runs;
 	u_long count, minalign, reclaimed;
 	int error, i, min_reclaim, nruns, options, req_class;
 	int segind, start_segind;
 	int ret;
 
 	KASSERT(npages > 0, ("npages is 0"));
 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
 
 	ret = ENOMEM;
 
 	/*
 	 * If the caller wants to reclaim multiple runs, try to allocate
 	 * space to store the runs.  If that fails, fall back to the old
 	 * behavior of just reclaiming MIN_RECLAIM pages.
 	 */
 	if (desired_runs > 1)
 		m_runs = malloc((NRUNS + desired_runs) * sizeof(*m_runs),
 		    M_TEMP, M_NOWAIT);
 	else
 		m_runs = NULL;
 
 	if (m_runs == NULL) {
 		m_runs = _m_runs;
 		nruns = NRUNS;
 	} else {
 		nruns = NRUNS + desired_runs - 1;
 	}
 	min_reclaim = MAX(desired_runs * npages, MIN_RECLAIM);
 
 	/*
 	 * The caller will attempt an allocation after some runs have been
 	 * reclaimed and added to the vm_phys buddy lists.  Due to limitations
 	 * of vm_phys_alloc_contig(), round up the requested length to the next
 	 * power of two or maximum chunk size, and ensure that each run is
 	 * suitably aligned.
 	 */
 	minalign = 1ul << imin(flsl(npages - 1), VM_NFREEORDER - 1);
 	npages = roundup2(npages, minalign);
 	if (alignment < ptoa(minalign))
 		alignment = ptoa(minalign);
 
 	/*
 	 * The page daemon is allowed to dig deeper into the free page list.
 	 */
 	req_class = req & VM_ALLOC_CLASS_MASK;
 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
 		req_class = VM_ALLOC_SYSTEM;
 
 	start_segind = vm_phys_lookup_segind(low);
 
 	/*
 	 * Return if the number of free pages cannot satisfy the requested
 	 * allocation.
 	 */
 	vmd = VM_DOMAIN(domain);
 	count = vmd->vmd_free_count;
 	if (count < npages + vmd->vmd_free_reserved || (count < npages +
 	    vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
 	    (count < npages && req_class == VM_ALLOC_INTERRUPT))
 		goto done;
 
 	/*
 	 * Scan up to three times, relaxing the restrictions ("options") on
 	 * the reclamation of reservations and superpages each time.
 	 */
 	for (options = VPSC_NORESERV;;) {
 		bool phys_range_exists = false;
 
 		/*
 		 * Find the highest runs that satisfy the given constraints
 		 * and restrictions, and record them in "m_runs".
 		 */
 		count = 0;
 		segind = start_segind;
 		while ((segind = vm_phys_find_range(bounds, segind, domain,
 		    npages, low, high)) != -1) {
 			phys_range_exists = true;
 			while ((m_run = vm_page_scan_contig(npages, bounds[0],
 			    bounds[1], alignment, boundary, options))) {
 				bounds[0] = m_run + npages;
 				m_runs[RUN_INDEX(count, nruns)] = m_run;
 				count++;
 			}
 			segind++;
 		}
 
 		if (!phys_range_exists) {
 			ret = ERANGE;
 			goto done;
 		}
 
 		/*
 		 * Reclaim the highest runs in LIFO (descending) order until
 		 * the number of reclaimed pages, "reclaimed", is at least
 		 * "min_reclaim".  Reset "reclaimed" each time because each
 		 * reclamation is idempotent, and runs will (likely) recur
 		 * from one scan to the next as restrictions are relaxed.
 		 */
 		reclaimed = 0;
 		for (i = 0; count > 0 && i < nruns; i++) {
 			count--;
 			m_run = m_runs[RUN_INDEX(count, nruns)];
 			error = vm_page_reclaim_run(req_class, domain, npages,
 			    m_run, high);
 			if (error == 0) {
 				reclaimed += npages;
 				if (reclaimed >= min_reclaim) {
 					ret = 0;
 					goto done;
 				}
 			}
 		}
 
 		/*
 		 * Either relax the restrictions on the next scan or return if
 		 * the last scan had no restrictions.
 		 */
 		if (options == VPSC_NORESERV)
 			options = VPSC_NOSUPER;
 		else if (options == VPSC_NOSUPER)
 			options = VPSC_ANY;
 		else if (options == VPSC_ANY) {
 			if (reclaimed != 0)
 				ret = 0;
 			goto done;
 		}
 	}
 done:
 	if (m_runs != _m_runs)
 		free(m_runs, M_TEMP);
 	return (ret);
 }
 
 int
 vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
 {
 	return (vm_page_reclaim_contig_domain_ext(domain, req, npages, low, high,
 	    alignment, boundary, 1));
 }
 
 int
 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
     u_long alignment, vm_paddr_t boundary)
 {
 	struct vm_domainset_iter di;
 	int domain, ret, status;
 
 	ret = ERANGE;
 
 	vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
 	do {
 		status = vm_page_reclaim_contig_domain(domain, req, npages, low,
 		    high, alignment, boundary);
 		if (status == 0)
 			return (0);
 		else if (status == ERANGE)
 			vm_domainset_iter_ignore(&di, domain);
 		else {
 			KASSERT(status == ENOMEM, ("Unrecognized error %d "
 			    "from vm_page_reclaim_contig_domain()", status));
 			ret = ENOMEM;
 		}
 	} while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
 
 	return (ret);
 }
 
 /*
  * Set the domain in the appropriate page level domainset.
  */
 void
 vm_domain_set(struct vm_domain *vmd)
 {
 
 	mtx_lock(&vm_domainset_lock);
 	if (!vmd->vmd_minset && vm_paging_min(vmd)) {
 		vmd->vmd_minset = 1;
 		DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains);
 	}
 	if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
 		vmd->vmd_severeset = 1;
 		DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains);
 	}
 	mtx_unlock(&vm_domainset_lock);
 }
 
 /*
  * Clear the domain from the appropriate page level domainset.
  */
 void
 vm_domain_clear(struct vm_domain *vmd)
 {
 
 	mtx_lock(&vm_domainset_lock);
 	if (vmd->vmd_minset && !vm_paging_min(vmd)) {
 		vmd->vmd_minset = 0;
 		DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
 		if (vm_min_waiters != 0) {
 			vm_min_waiters = 0;
 			wakeup(&vm_min_domains);
 		}
 	}
 	if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
 		vmd->vmd_severeset = 0;
 		DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
 		if (vm_severe_waiters != 0) {
 			vm_severe_waiters = 0;
 			wakeup(&vm_severe_domains);
 		}
 	}
 
 	/*
 	 * If pageout daemon needs pages, then tell it that there are
 	 * some free.
 	 */
 	if (vmd->vmd_pageout_pages_needed &&
 	    vmd->vmd_free_count >= vmd->vmd_pageout_free_min) {
 		wakeup(&vmd->vmd_pageout_pages_needed);
 		vmd->vmd_pageout_pages_needed = 0;
 	}
 
 	/* See comments in vm_wait_doms(). */
 	if (vm_pageproc_waiters) {
 		vm_pageproc_waiters = 0;
 		wakeup(&vm_pageproc_waiters);
 	}
 	mtx_unlock(&vm_domainset_lock);
 }
 
 /*
  * Wait for free pages to exceed the min threshold globally.
  */
 void
 vm_wait_min(void)
 {
 
 	mtx_lock(&vm_domainset_lock);
 	while (vm_page_count_min()) {
 		vm_min_waiters++;
 		msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
 	}
 	mtx_unlock(&vm_domainset_lock);
 }
 
 /*
  * Wait for free pages to exceed the severe threshold globally.
  */
 void
 vm_wait_severe(void)
 {
 
 	mtx_lock(&vm_domainset_lock);
 	while (vm_page_count_severe()) {
 		vm_severe_waiters++;
 		msleep(&vm_severe_domains, &vm_domainset_lock, PVM,
 		    "vmwait", 0);
 	}
 	mtx_unlock(&vm_domainset_lock);
 }
 
 u_int
 vm_wait_count(void)
 {
 
 	return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters);
 }
 
 int
 vm_wait_doms(const domainset_t *wdoms, int mflags)
 {
 	int error;
 
 	error = 0;
 
 	/*
 	 * We use racey wakeup synchronization to avoid expensive global
 	 * locking for the pageproc when sleeping with a non-specific vm_wait.
 	 * To handle this, we only sleep for one tick in this instance.  It
 	 * is expected that most allocations for the pageproc will come from
 	 * kmem or vm_page_grab* which will use the more specific and
 	 * race-free vm_wait_domain().
 	 */
 	if (curproc == pageproc) {
 		mtx_lock(&vm_domainset_lock);
 		vm_pageproc_waiters++;
 		error = msleep(&vm_pageproc_waiters, &vm_domainset_lock,
 		    PVM | PDROP | mflags, "pageprocwait", 1);
 	} else {
 		/*
 		 * XXX Ideally we would wait only until the allocation could
 		 * be satisfied.  This condition can cause new allocators to
 		 * consume all freed pages while old allocators wait.
 		 */
 		mtx_lock(&vm_domainset_lock);
 		if (vm_page_count_min_set(wdoms)) {
 			if (pageproc == NULL)
 				panic("vm_wait in early boot");
 			vm_min_waiters++;
 			error = msleep(&vm_min_domains, &vm_domainset_lock,
 			    PVM | PDROP | mflags, "vmwait", 0);
 		} else
 			mtx_unlock(&vm_domainset_lock);
 	}
 	return (error);
 }
 
 /*
  *	vm_wait_domain:
  *
  *	Sleep until free pages are available for allocation.
  *	- Called in various places after failed memory allocations.
  */
 void
 vm_wait_domain(int domain)
 {
 	struct vm_domain *vmd;
 	domainset_t wdom;
 
 	vmd = VM_DOMAIN(domain);
 	vm_domain_free_assert_unlocked(vmd);
 
 	if (curproc == pageproc) {
 		mtx_lock(&vm_domainset_lock);
 		if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) {
 			vmd->vmd_pageout_pages_needed = 1;
 			msleep(&vmd->vmd_pageout_pages_needed,
 			    &vm_domainset_lock, PDROP | PSWP, "VMWait", 0);
 		} else
 			mtx_unlock(&vm_domainset_lock);
 	} else {
 		DOMAINSET_ZERO(&wdom);
 		DOMAINSET_SET(vmd->vmd_domain, &wdom);
 		vm_wait_doms(&wdom, 0);
 	}
 }
 
 static int
 vm_wait_flags(vm_object_t obj, int mflags)
 {
 	struct domainset *d;
 
 	d = NULL;
 
 	/*
 	 * Carefully fetch pointers only once: the struct domainset
 	 * itself is ummutable but the pointer might change.
 	 */
 	if (obj != NULL)
 		d = obj->domain.dr_policy;
 	if (d == NULL)
 		d = curthread->td_domain.dr_policy;
 
 	return (vm_wait_doms(&d->ds_mask, mflags));
 }
 
 /*
  *	vm_wait:
  *
  *	Sleep until free pages are available for allocation in the
  *	affinity domains of the obj.  If obj is NULL, the domain set
  *	for the calling thread is used.
  *	Called in various places after failed memory allocations.
  */
 void
 vm_wait(vm_object_t obj)
 {
 	(void)vm_wait_flags(obj, 0);
 }
 
 int
 vm_wait_intr(vm_object_t obj)
 {
 	return (vm_wait_flags(obj, PCATCH));
 }
 
 /*
  *	vm_domain_alloc_fail:
  *
  *	Called when a page allocation function fails.  Informs the
  *	pagedaemon and performs the requested wait.  Requires the
  *	domain_free and object lock on entry.  Returns with the
  *	object lock held and free lock released.  Returns an error when
  *	retry is necessary.
  *
  */
 static int
 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
 {
 
 	vm_domain_free_assert_unlocked(vmd);
 
 	atomic_add_int(&vmd->vmd_pageout_deficit,
 	    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
 	if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
 		if (object != NULL) 
 			VM_OBJECT_WUNLOCK(object);
 		vm_wait_domain(vmd->vmd_domain);
 		if (object != NULL) 
 			VM_OBJECT_WLOCK(object);
 		if (req & VM_ALLOC_WAITOK)
 			return (EAGAIN);
 	}
 
 	return (0);
 }
 
 /*
  *	vm_waitpfault:
  *
  *	Sleep until free pages are available for allocation.
  *	- Called only in vm_fault so that processes page faulting
  *	  can be easily tracked.
  *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
  *	  processes will be able to grab memory first.  Do not change
  *	  this balance without careful testing first.
  */
 void
 vm_waitpfault(struct domainset *dset, int timo)
 {
 
 	/*
 	 * XXX Ideally we would wait only until the allocation could
 	 * be satisfied.  This condition can cause new allocators to
 	 * consume all freed pages while old allocators wait.
 	 */
 	mtx_lock(&vm_domainset_lock);
 	if (vm_page_count_min_set(&dset->ds_mask)) {
 		vm_min_waiters++;
 		msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP,
 		    "pfault", timo);
 	} else
 		mtx_unlock(&vm_domainset_lock);
 }
 
 static struct vm_pagequeue *
 _vm_page_pagequeue(vm_page_t m, uint8_t queue)
 {
 
 	return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
 }
 
 #ifdef INVARIANTS
 static struct vm_pagequeue *
 vm_page_pagequeue(vm_page_t m)
 {
 
 	return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue));
 }
 #endif
 
 static __always_inline bool
 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
 {
 	vm_page_astate_t tmp;
 
 	tmp = *old;
 	do {
 		if (__predict_true(vm_page_astate_fcmpset(m, old, new)))
 			return (true);
 		counter_u64_add(pqstate_commit_retries, 1);
 	} while (old->_bits == tmp._bits);
 
 	return (false);
 }
 
 /*
  * Do the work of committing a queue state update that moves the page out of
  * its current queue.
  */
 static bool
 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m,
     vm_page_astate_t *old, vm_page_astate_t new)
 {
 	vm_page_t next;
 
 	vm_pagequeue_assert_locked(pq);
 	KASSERT(vm_page_pagequeue(m) == pq,
 	    ("%s: queue %p does not match page %p", __func__, pq, m));
 	KASSERT(old->queue != PQ_NONE && new.queue != old->queue,
 	    ("%s: invalid queue indices %d %d",
 	    __func__, old->queue, new.queue));
 
 	/*
 	 * Once the queue index of the page changes there is nothing
 	 * synchronizing with further updates to the page's physical
 	 * queue state.  Therefore we must speculatively remove the page
 	 * from the queue now and be prepared to roll back if the queue
 	 * state update fails.  If the page is not physically enqueued then
 	 * we just update its queue index.
 	 */
 	if ((old->flags & PGA_ENQUEUED) != 0) {
 		new.flags &= ~PGA_ENQUEUED;
 		next = TAILQ_NEXT(m, plinks.q);
 		TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 		vm_pagequeue_cnt_dec(pq);
 		if (!vm_page_pqstate_fcmpset(m, old, new)) {
 			if (next == NULL)
 				TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
 			else
 				TAILQ_INSERT_BEFORE(next, m, plinks.q);
 			vm_pagequeue_cnt_inc(pq);
 			return (false);
 		} else {
 			return (true);
 		}
 	} else {
 		return (vm_page_pqstate_fcmpset(m, old, new));
 	}
 }
 
 static bool
 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old,
     vm_page_astate_t new)
 {
 	struct vm_pagequeue *pq;
 	vm_page_astate_t as;
 	bool ret;
 
 	pq = _vm_page_pagequeue(m, old->queue);
 
 	/*
 	 * The queue field and PGA_ENQUEUED flag are stable only so long as the
 	 * corresponding page queue lock is held.
 	 */
 	vm_pagequeue_lock(pq);
 	as = vm_page_astate_load(m);
 	if (__predict_false(as._bits != old->_bits)) {
 		*old = as;
 		ret = false;
 	} else {
 		ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new);
 	}
 	vm_pagequeue_unlock(pq);
 	return (ret);
 }
 
 /*
  * Commit a queue state update that enqueues or requeues a page.
  */
 static bool
 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m,
     vm_page_astate_t *old, vm_page_astate_t new)
 {
 	struct vm_domain *vmd;
 
 	vm_pagequeue_assert_locked(pq);
 	KASSERT(old->queue != PQ_NONE && new.queue == old->queue,
 	    ("%s: invalid queue indices %d %d",
 	    __func__, old->queue, new.queue));
 
 	new.flags |= PGA_ENQUEUED;
 	if (!vm_page_pqstate_fcmpset(m, old, new))
 		return (false);
 
 	if ((old->flags & PGA_ENQUEUED) != 0)
 		TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 	else
 		vm_pagequeue_cnt_inc(pq);
 
 	/*
 	 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE.  In particular, if
 	 * both flags are set in close succession, only PGA_REQUEUE_HEAD will be
 	 * applied, even if it was set first.
 	 */
 	if ((old->flags & PGA_REQUEUE_HEAD) != 0) {
 		vmd = vm_pagequeue_domain(m);
 		KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE],
 		    ("%s: invalid page queue for page %p", __func__, m));
 		TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
 	} else {
 		TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
 	}
 	return (true);
 }
 
 /*
  * Commit a queue state update that encodes a request for a deferred queue
  * operation.
  */
 static bool
 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old,
     vm_page_astate_t new)
 {
 
 	KASSERT(old->queue == new.queue || new.queue != PQ_NONE,
 	    ("%s: invalid state, queue %d flags %x",
 	    __func__, new.queue, new.flags));
 
 	if (old->_bits != new._bits &&
 	    !vm_page_pqstate_fcmpset(m, old, new))
 		return (false);
 	vm_page_pqbatch_submit(m, new.queue);
 	return (true);
 }
 
 /*
  * A generic queue state update function.  This handles more cases than the
  * specialized functions above.
  */
 bool
 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
 {
 
 	if (old->_bits == new._bits)
 		return (true);
 
 	if (old->queue != PQ_NONE && new.queue != old->queue) {
 		if (!vm_page_pqstate_commit_dequeue(m, old, new))
 			return (false);
 		if (new.queue != PQ_NONE)
 			vm_page_pqbatch_submit(m, new.queue);
 	} else {
 		if (!vm_page_pqstate_fcmpset(m, old, new))
 			return (false);
 		if (new.queue != PQ_NONE &&
 		    ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0)
 			vm_page_pqbatch_submit(m, new.queue);
 	}
 	return (true);
 }
 
 /*
  * Apply deferred queue state updates to a page.
  */
 static inline void
 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue)
 {
 	vm_page_astate_t new, old;
 
 	CRITICAL_ASSERT(curthread);
 	vm_pagequeue_assert_locked(pq);
 	KASSERT(queue < PQ_COUNT,
 	    ("%s: invalid queue index %d", __func__, queue));
 	KASSERT(pq == _vm_page_pagequeue(m, queue),
 	    ("%s: page %p does not belong to queue %p", __func__, m, pq));
 
 	for (old = vm_page_astate_load(m);;) {
 		if (__predict_false(old.queue != queue ||
 		    (old.flags & PGA_QUEUE_OP_MASK) == 0)) {
 			counter_u64_add(queue_nops, 1);
 			break;
 		}
 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 		    ("%s: page %p is unmanaged", __func__, m));
 
 		new = old;
 		if ((old.flags & PGA_DEQUEUE) != 0) {
 			new.flags &= ~PGA_QUEUE_OP_MASK;
 			new.queue = PQ_NONE;
 			if (__predict_true(_vm_page_pqstate_commit_dequeue(pq,
 			    m, &old, new))) {
 				counter_u64_add(queue_ops, 1);
 				break;
 			}
 		} else {
 			new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD);
 			if (__predict_true(_vm_page_pqstate_commit_requeue(pq,
 			    m, &old, new))) {
 				counter_u64_add(queue_ops, 1);
 				break;
 			}
 		}
 	}
 }
 
 static void
 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
     uint8_t queue)
 {
 	int i;
 
 	for (i = 0; i < bq->bq_cnt; i++)
 		vm_pqbatch_process_page(pq, bq->bq_pa[i], queue);
 	vm_batchqueue_init(bq);
 }
 
 /*
  *	vm_page_pqbatch_submit:		[ internal use only ]
  *
  *	Enqueue a page in the specified page queue's batched work queue.
  *	The caller must have encoded the requested operation in the page
  *	structure's a.flags field.
  */
 void
 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
 {
 	struct vm_batchqueue *bq;
 	struct vm_pagequeue *pq;
 	int domain, slots_remaining;
 
 	KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
 
 	domain = vm_page_domain(m);
 	critical_enter();
 	bq = DPCPU_PTR(pqbatch[domain][queue]);
 	slots_remaining = vm_batchqueue_insert(bq, m);
 	if (slots_remaining > (VM_BATCHQUEUE_SIZE >> 1)) {
 		/* keep building the bq */
 		critical_exit();
 		return;
 	} else if (slots_remaining > 0 ) {
 		/* Try to process the bq if we can get the lock */
 		pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
 		if (vm_pagequeue_trylock(pq)) {
 			vm_pqbatch_process(pq, bq, queue);
 			vm_pagequeue_unlock(pq);
 		}
 		critical_exit();
 		return;
 	}
 	critical_exit();
 
 	/* if we make it here, the bq is full so wait for the lock */
 
 	pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
 	vm_pagequeue_lock(pq);
 	critical_enter();
 	bq = DPCPU_PTR(pqbatch[domain][queue]);
 	vm_pqbatch_process(pq, bq, queue);
 	vm_pqbatch_process_page(pq, m, queue);
 	vm_pagequeue_unlock(pq);
 	critical_exit();
 }
 
 /*
  *	vm_page_pqbatch_drain:		[ internal use only ]
  *
  *	Force all per-CPU page queue batch queues to be drained.  This is
  *	intended for use in severe memory shortages, to ensure that pages
  *	do not remain stuck in the batch queues.
  */
 void
 vm_page_pqbatch_drain(void)
 {
 	struct thread *td;
 	struct vm_domain *vmd;
 	struct vm_pagequeue *pq;
 	int cpu, domain, queue;
 
 	td = curthread;
 	CPU_FOREACH(cpu) {
 		thread_lock(td);
 		sched_bind(td, cpu);
 		thread_unlock(td);
 
 		for (domain = 0; domain < vm_ndomains; domain++) {
 			vmd = VM_DOMAIN(domain);
 			for (queue = 0; queue < PQ_COUNT; queue++) {
 				pq = &vmd->vmd_pagequeues[queue];
 				vm_pagequeue_lock(pq);
 				critical_enter();
 				vm_pqbatch_process(pq,
 				    DPCPU_PTR(pqbatch[domain][queue]), queue);
 				critical_exit();
 				vm_pagequeue_unlock(pq);
 			}
 		}
 	}
 	thread_lock(td);
 	sched_unbind(td);
 	thread_unlock(td);
 }
 
 /*
  *	vm_page_dequeue_deferred:	[ internal use only ]
  *
  *	Request removal of the given page from its current page
  *	queue.  Physical removal from the queue may be deferred
  *	indefinitely.
  */
 void
 vm_page_dequeue_deferred(vm_page_t m)
 {
 	vm_page_astate_t new, old;
 
 	old = vm_page_astate_load(m);
 	do {
 		if (old.queue == PQ_NONE) {
 			KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0,
 			    ("%s: page %p has unexpected queue state",
 			    __func__, m));
 			break;
 		}
 		new = old;
 		new.flags |= PGA_DEQUEUE;
 	} while (!vm_page_pqstate_commit_request(m, &old, new));
 }
 
 /*
  *	vm_page_dequeue:
  *
  *	Remove the page from whichever page queue it's in, if any, before
  *	returning.
  */
 void
 vm_page_dequeue(vm_page_t m)
 {
 	vm_page_astate_t new, old;
 
 	old = vm_page_astate_load(m);
 	do {
 		if (old.queue == PQ_NONE) {
 			KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0,
 			    ("%s: page %p has unexpected queue state",
 			    __func__, m));
 			break;
 		}
 		new = old;
 		new.flags &= ~PGA_QUEUE_OP_MASK;
 		new.queue = PQ_NONE;
 	} while (!vm_page_pqstate_commit_dequeue(m, &old, new));
 
 }
 
 /*
  * Schedule the given page for insertion into the specified page queue.
  * Physical insertion of the page may be deferred indefinitely.
  */
 static void
 vm_page_enqueue(vm_page_t m, uint8_t queue)
 {
 
 	KASSERT(m->a.queue == PQ_NONE &&
 	    (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
 	    ("%s: page %p is already enqueued", __func__, m));
 	KASSERT(m->ref_count > 0,
 	    ("%s: page %p does not carry any references", __func__, m));
 
 	m->a.queue = queue;
 	if ((m->a.flags & PGA_REQUEUE) == 0)
 		vm_page_aflag_set(m, PGA_REQUEUE);
 	vm_page_pqbatch_submit(m, queue);
 }
 
 /*
  *	vm_page_free_prep:
  *
  *	Prepares the given page to be put on the free list,
  *	disassociating it from any VM object. The caller may return
  *	the page to the free list only if this function returns true.
  *
  *	The object, if it exists, must be locked, and then the page must
  *	be xbusy.  Otherwise the page must be not busied.  A managed
  *	page must be unmapped.
  */
 static bool
-vm_page_free_prep(vm_page_t m)
+vm_page_free_prep(vm_page_t m, bool do_remove)
 {
 
 	/*
 	 * Synchronize with threads that have dropped a reference to this
 	 * page.
 	 */
 	atomic_thread_fence_acq();
 
 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP)
 	if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) {
 		uint64_t *p;
 		int i;
 		p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 		for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++)
 			KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx",
 			    m, i, (uintmax_t)*p));
 	}
 #endif
 	KASSERT((m->flags & PG_NOFREE) == 0,
 	    ("%s: attempting to free a PG_NOFREE page", __func__));
 	if ((m->oflags & VPO_UNMANAGED) == 0) {
 		KASSERT(!pmap_page_is_mapped(m),
 		    ("vm_page_free_prep: freeing mapped page %p", m));
 		KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0,
 		    ("vm_page_free_prep: mapping flags set in page %p", m));
 	} else {
 		KASSERT(m->a.queue == PQ_NONE,
 		    ("vm_page_free_prep: unmanaged page %p is queued", m));
 	}
 	VM_CNT_INC(v_tfree);
 
 	if (m->object != NULL) {
 		KASSERT(((m->oflags & VPO_UNMANAGED) != 0) ==
 		    ((m->object->flags & OBJ_UNMANAGED) != 0),
 		    ("vm_page_free_prep: managed flag mismatch for page %p",
 		    m));
 		vm_page_assert_xbusied(m);
 
 		/*
 		 * The object reference can be released without an atomic
 		 * operation.
 		 */
 		KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
 		    m->ref_count == VPRC_OBJREF,
 		    ("vm_page_free_prep: page %p has unexpected ref_count %u",
 		    m, m->ref_count));
-		vm_page_object_remove(m);
+		if (do_remove)
+			vm_page_radix_remove(m);
+		vm_page_remove_radixdone(m);
 		m->ref_count -= VPRC_OBJREF;
 	} else
 		vm_page_assert_unbusied(m);
 
 	vm_page_busy_free(m);
 
 	/*
 	 * If fictitious remove object association and
 	 * return.
 	 */
 	if ((m->flags & PG_FICTITIOUS) != 0) {
 		KASSERT(m->ref_count == 1,
 		    ("fictitious page %p is referenced", m));
 		KASSERT(m->a.queue == PQ_NONE,
 		    ("fictitious page %p is queued", m));
 		return (false);
 	}
 
 	/*
 	 * Pages need not be dequeued before they are returned to the physical
 	 * memory allocator, but they must at least be marked for a deferred
 	 * dequeue.
 	 */
 	if ((m->oflags & VPO_UNMANAGED) == 0)
 		vm_page_dequeue_deferred(m);
 
 	m->valid = 0;
 	vm_page_undirty(m);
 
 	if (m->ref_count != 0)
 		panic("vm_page_free_prep: page %p has references", m);
 
 	/*
 	 * Restore the default memory attribute to the page.
 	 */
 	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
 		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
 
 #if VM_NRESERVLEVEL > 0
 	/*
 	 * Determine whether the page belongs to a reservation.  If the page was
 	 * allocated from a per-CPU cache, it cannot belong to a reservation, so
 	 * as an optimization, we avoid the check in that case.
 	 */
 	if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m))
 		return (false);
 #endif
 
 	return (true);
 }
 
-/*
- *	vm_page_free_toq:
- *
- *	Returns the given page to the free list, disassociating it
- *	from any VM object.
- *
- *	The object must be locked.  The page must be exclusively busied if it
- *	belongs to an object.
- */
 static void
-vm_page_free_toq(vm_page_t m)
+vm_page_free_toq_impl(vm_page_t m, bool do_remove)
 {
 	struct vm_domain *vmd;
 	uma_zone_t zone;
 
-	if (!vm_page_free_prep(m))
+	if (!vm_page_free_prep(m, do_remove))
 		return;
 
 	vmd = vm_pagequeue_domain(m);
 	zone = vmd->vmd_pgcache[m->pool].zone;
 	if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
 		uma_zfree(zone, m);
 		return;
 	}
 	vm_domain_free_lock(vmd);
 	vm_phys_free_pages(m, 0);
 	vm_domain_free_unlock(vmd);
 	vm_domain_freecnt_inc(vmd, 1);
 }
 
+/*
+ *	vm_page_free_toq:
+ *
+ *	Returns the given page to the free list, disassociating it
+ *	from any VM object.
+ *
+ *	The object must be locked.  The page must be exclusively busied if it
+ *	belongs to an object.
+ */
+static void
+vm_page_free_toq(vm_page_t m)
+{
+	vm_page_free_toq_impl(m, true);
+}
+
 /*
  *	vm_page_free_pages_toq:
  *
  *	Returns a list of pages to the free list, disassociating it
  *	from any VM object.  In other words, this is equivalent to
  *	calling vm_page_free_toq() for each page of a list of VM objects.
  */
 int
 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
 {
 	vm_page_t m;
 	int count;
 
 	if (SLIST_EMPTY(free))
 		return (0);
 
 	count = 0;
 	while ((m = SLIST_FIRST(free)) != NULL) {
 		count++;
 		SLIST_REMOVE_HEAD(free, plinks.s.ss);
 		vm_page_free_toq(m);
 	}
 
 	if (update_wire_count)
 		vm_wire_sub(count);
 	return (count);
 }
 
 /*
  * Mark this page as wired down.  For managed pages, this prevents reclamation
  * by the page daemon, or when the containing object, if any, is destroyed.
  */
 void
 vm_page_wire(vm_page_t m)
 {
 	u_int old;
 
 #ifdef INVARIANTS
 	if (m->object != NULL && !vm_page_busied(m) &&
 	    !vm_object_busied(m->object))
 		VM_OBJECT_ASSERT_LOCKED(m->object);
 #endif
 	KASSERT((m->flags & PG_FICTITIOUS) == 0 ||
 	    VPRC_WIRE_COUNT(m->ref_count) >= 1,
 	    ("vm_page_wire: fictitious page %p has zero wirings", m));
 
 	old = atomic_fetchadd_int(&m->ref_count, 1);
 	KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX,
 	    ("vm_page_wire: counter overflow for page %p", m));
 	if (VPRC_WIRE_COUNT(old) == 0) {
 		if ((m->oflags & VPO_UNMANAGED) == 0)
 			vm_page_aflag_set(m, PGA_DEQUEUE);
 		vm_wire_add(1);
 	}
 }
 
 /*
  * Attempt to wire a mapped page following a pmap lookup of that page.
  * This may fail if a thread is concurrently tearing down mappings of the page.
  * The transient failure is acceptable because it translates to the
  * failure of the caller pmap_extract_and_hold(), which should be then
  * followed by the vm_fault() fallback, see e.g. vm_fault_quick_hold_pages().
  */
 bool
 vm_page_wire_mapped(vm_page_t m)
 {
 	u_int old;
 
 	old = atomic_load_int(&m->ref_count);
 	do {
 		KASSERT(old > 0,
 		    ("vm_page_wire_mapped: wiring unreferenced page %p", m));
 		if ((old & VPRC_BLOCKED) != 0)
 			return (false);
 	} while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1));
 
 	if (VPRC_WIRE_COUNT(old) == 0) {
 		if ((m->oflags & VPO_UNMANAGED) == 0)
 			vm_page_aflag_set(m, PGA_DEQUEUE);
 		vm_wire_add(1);
 	}
 	return (true);
 }
 
 /*
  * Release a wiring reference to a managed page.  If the page still belongs to
  * an object, update its position in the page queues to reflect the reference.
  * If the wiring was the last reference to the page, free the page.
  */
 static void
 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse)
 {
 	u_int old;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("%s: page %p is unmanaged", __func__, m));
 
 	/*
 	 * Update LRU state before releasing the wiring reference.
 	 * Use a release store when updating the reference count to
 	 * synchronize with vm_page_free_prep().
 	 */
 	old = atomic_load_int(&m->ref_count);
 	do {
 		u_int count;
 
 		KASSERT(VPRC_WIRE_COUNT(old) > 0,
 		    ("vm_page_unwire: wire count underflow for page %p", m));
 
 		count = old & ~VPRC_BLOCKED;
 		if (count > VPRC_OBJREF + 1) {
 			/*
 			 * The page has at least one other wiring reference.  An
 			 * earlier iteration of this loop may have called
 			 * vm_page_release_toq() and cleared PGA_DEQUEUE, so
 			 * re-set it if necessary.
 			 */
 			if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0)
 				vm_page_aflag_set(m, PGA_DEQUEUE);
 		} else if (count == VPRC_OBJREF + 1) {
 			/*
 			 * This is the last wiring.  Clear PGA_DEQUEUE and
 			 * update the page's queue state to reflect the
 			 * reference.  If the page does not belong to an object
 			 * (i.e., the VPRC_OBJREF bit is clear), we only need to
 			 * clear leftover queue state.
 			 */
 			vm_page_release_toq(m, nqueue, noreuse);
 		} else if (count == 1) {
 			vm_page_aflag_clear(m, PGA_DEQUEUE);
 		}
 	} while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1));
 
 	if (VPRC_WIRE_COUNT(old) == 1) {
 		vm_wire_sub(1);
 		if (old == 1)
 			vm_page_free(m);
 	}
 }
 
 /*
  * Release one wiring of the specified page, potentially allowing it to be
  * paged out.
  *
  * Only managed pages belonging to an object can be paged out.  If the number
  * of wirings transitions to zero and the page is eligible for page out, then
  * the page is added to the specified paging queue.  If the released wiring
  * represented the last reference to the page, the page is freed.
  */
 void
 vm_page_unwire(vm_page_t m, uint8_t nqueue)
 {
 
 	KASSERT(nqueue < PQ_COUNT,
 	    ("vm_page_unwire: invalid queue %u request for page %p",
 	    nqueue, m));
 
 	if ((m->oflags & VPO_UNMANAGED) != 0) {
 		if (vm_page_unwire_noq(m) && m->ref_count == 0)
 			vm_page_free(m);
 		return;
 	}
 	vm_page_unwire_managed(m, nqueue, false);
 }
 
 /*
  * Unwire a page without (re-)inserting it into a page queue.  It is up
  * to the caller to enqueue, requeue, or free the page as appropriate.
  * In most cases involving managed pages, vm_page_unwire() should be used
  * instead.
  */
 bool
 vm_page_unwire_noq(vm_page_t m)
 {
 	u_int old;
 
 	old = vm_page_drop(m, 1);
 	KASSERT(VPRC_WIRE_COUNT(old) != 0,
 	    ("%s: counter underflow for page %p", __func__,  m));
 	KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1,
 	    ("%s: missing ref on fictitious page %p", __func__, m));
 
 	if (VPRC_WIRE_COUNT(old) > 1)
 		return (false);
 	if ((m->oflags & VPO_UNMANAGED) == 0)
 		vm_page_aflag_clear(m, PGA_DEQUEUE);
 	vm_wire_sub(1);
 	return (true);
 }
 
 /*
  * Ensure that the page ends up in the specified page queue.  If the page is
  * active or being moved to the active queue, ensure that its act_count is
  * at least ACT_INIT but do not otherwise mess with it.
  */
 static __always_inline void
 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag)
 {
 	vm_page_astate_t old, new;
 
 	KASSERT(m->ref_count > 0,
 	    ("%s: page %p does not carry any references", __func__, m));
 	KASSERT(nflag == PGA_REQUEUE || nflag == PGA_REQUEUE_HEAD,
 	    ("%s: invalid flags %x", __func__, nflag));
 
 	if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m))
 		return;
 
 	old = vm_page_astate_load(m);
 	do {
 		if ((old.flags & PGA_DEQUEUE) != 0)
 			break;
 		new = old;
 		new.flags &= ~PGA_QUEUE_OP_MASK;
 		if (nqueue == PQ_ACTIVE)
 			new.act_count = max(old.act_count, ACT_INIT);
 		if (old.queue == nqueue) {
 			/*
 			 * There is no need to requeue pages already in the
 			 * active queue.
 			 */
 			if (nqueue != PQ_ACTIVE ||
 			    (old.flags & PGA_ENQUEUED) == 0)
 				new.flags |= nflag;
 		} else {
 			new.flags |= nflag;
 			new.queue = nqueue;
 		}
 	} while (!vm_page_pqstate_commit(m, &old, new));
 }
 
 /*
  * Put the specified page on the active list (if appropriate).
  */
 void
 vm_page_activate(vm_page_t m)
 {
 
 	vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE);
 }
 
 /*
  * Move the specified page to the tail of the inactive queue, or requeue
  * the page if it is already in the inactive queue.
  */
 void
 vm_page_deactivate(vm_page_t m)
 {
 
 	vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE);
 }
 
 void
 vm_page_deactivate_noreuse(vm_page_t m)
 {
 
 	vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD);
 }
 
 /*
  * Put a page in the laundry, or requeue it if it is already there.
  */
 void
 vm_page_launder(vm_page_t m)
 {
 
 	vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE);
 }
 
 /*
  * Put a page in the PQ_UNSWAPPABLE holding queue.
  */
 void
 vm_page_unswappable(vm_page_t m)
 {
 
 	VM_OBJECT_ASSERT_LOCKED(m->object);
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("page %p already unswappable", m));
 
 	vm_page_dequeue(m);
 	vm_page_enqueue(m, PQ_UNSWAPPABLE);
 }
 
 /*
  * Release a page back to the page queues in preparation for unwiring.
  */
 static void
 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse)
 {
 	vm_page_astate_t old, new;
 	uint16_t nflag;
 
 	/*
 	 * Use a check of the valid bits to determine whether we should
 	 * accelerate reclamation of the page.  The object lock might not be
 	 * held here, in which case the check is racy.  At worst we will either
 	 * accelerate reclamation of a valid page and violate LRU, or
 	 * unnecessarily defer reclamation of an invalid page.
 	 *
 	 * If we were asked to not cache the page, place it near the head of the
 	 * inactive queue so that is reclaimed sooner.
 	 */
 	if (noreuse || vm_page_none_valid(m)) {
 		nqueue = PQ_INACTIVE;
 		nflag = PGA_REQUEUE_HEAD;
 	} else {
 		nflag = PGA_REQUEUE;
 	}
 
 	old = vm_page_astate_load(m);
 	do {
 		new = old;
 
 		/*
 		 * If the page is already in the active queue and we are not
 		 * trying to accelerate reclamation, simply mark it as
 		 * referenced and avoid any queue operations.
 		 */
 		new.flags &= ~PGA_QUEUE_OP_MASK;
 		if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE &&
 		    (old.flags & PGA_ENQUEUED) != 0)
 			new.flags |= PGA_REFERENCED;
 		else {
 			new.flags |= nflag;
 			new.queue = nqueue;
 		}
 	} while (!vm_page_pqstate_commit(m, &old, new));
 }
 
 /*
  * Unwire a page and either attempt to free it or re-add it to the page queues.
  */
 void
 vm_page_release(vm_page_t m, int flags)
 {
 	vm_object_t object;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("vm_page_release: page %p is unmanaged", m));
 
 	if ((flags & VPR_TRYFREE) != 0) {
 		for (;;) {
 			object = atomic_load_ptr(&m->object);
 			if (object == NULL)
 				break;
 			/* Depends on type-stability. */
 			if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object))
 				break;
 			if (object == m->object) {
 				vm_page_release_locked(m, flags);
 				VM_OBJECT_WUNLOCK(object);
 				return;
 			}
 			VM_OBJECT_WUNLOCK(object);
 		}
 	}
 	vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0);
 }
 
 /* See vm_page_release(). */
 void
 vm_page_release_locked(vm_page_t m, int flags)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("vm_page_release_locked: page %p is unmanaged", m));
 
 	if (vm_page_unwire_noq(m)) {
 		if ((flags & VPR_TRYFREE) != 0 &&
 		    (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
 		    m->dirty == 0 && vm_page_tryxbusy(m)) {
 			/*
 			 * An unlocked lookup may have wired the page before the
 			 * busy lock was acquired, in which case the page must
 			 * not be freed.
 			 */
 			if (__predict_true(!vm_page_wired(m))) {
 				vm_page_free(m);
 				return;
 			}
 			vm_page_xunbusy(m);
 		} else {
 			vm_page_release_toq(m, PQ_INACTIVE, flags != 0);
 		}
 	}
 }
 
 static bool
 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t))
 {
 	u_int old;
 
 	KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0,
 	    ("vm_page_try_blocked_op: page %p has no object", m));
 	KASSERT(vm_page_busied(m),
 	    ("vm_page_try_blocked_op: page %p is not busy", m));
 	VM_OBJECT_ASSERT_LOCKED(m->object);
 
 	old = atomic_load_int(&m->ref_count);
 	do {
 		KASSERT(old != 0,
 		    ("vm_page_try_blocked_op: page %p has no references", m));
 		KASSERT((old & VPRC_BLOCKED) == 0,
 		    ("vm_page_try_blocked_op: page %p blocks wirings", m));
 		if (VPRC_WIRE_COUNT(old) != 0)
 			return (false);
 	} while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED));
 
 	(op)(m);
 
 	/*
 	 * If the object is read-locked, new wirings may be created via an
 	 * object lookup.
 	 */
 	old = vm_page_drop(m, VPRC_BLOCKED);
 	KASSERT(!VM_OBJECT_WOWNED(m->object) ||
 	    old == (VPRC_BLOCKED | VPRC_OBJREF),
 	    ("vm_page_try_blocked_op: unexpected refcount value %u for %p",
 	    old, m));
 	return (true);
 }
 
 /*
  * Atomically check for wirings and remove all mappings of the page.
  */
 bool
 vm_page_try_remove_all(vm_page_t m)
 {
 
 	return (vm_page_try_blocked_op(m, pmap_remove_all));
 }
 
 /*
  * Atomically check for wirings and remove all writeable mappings of the page.
  */
 bool
 vm_page_try_remove_write(vm_page_t m)
 {
 
 	return (vm_page_try_blocked_op(m, pmap_remove_write));
 }
 
 /*
  * vm_page_advise
  *
  * 	Apply the specified advice to the given page.
  */
 void
 vm_page_advise(vm_page_t m, int advice)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	vm_page_assert_xbusied(m);
 
 	if (advice == MADV_FREE)
 		/*
 		 * Mark the page clean.  This will allow the page to be freed
 		 * without first paging it out.  MADV_FREE pages are often
 		 * quickly reused by malloc(3), so we do not do anything that
 		 * would result in a page fault on a later access.
 		 */
 		vm_page_undirty(m);
 	else if (advice != MADV_DONTNEED) {
 		if (advice == MADV_WILLNEED)
 			vm_page_activate(m);
 		return;
 	}
 
 	if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
 		vm_page_dirty(m);
 
 	/*
 	 * Clear any references to the page.  Otherwise, the page daemon will
 	 * immediately reactivate the page.
 	 */
 	vm_page_aflag_clear(m, PGA_REFERENCED);
 
 	/*
 	 * Place clean pages near the head of the inactive queue rather than
 	 * the tail, thus defeating the queue's LRU operation and ensuring that
 	 * the page will be reused quickly.  Dirty pages not already in the
 	 * laundry are moved there.
 	 */
 	if (m->dirty == 0)
 		vm_page_deactivate_noreuse(m);
 	else if (!vm_page_in_laundry(m))
 		vm_page_launder(m);
 }
 
 /*
  *	vm_page_grab_release
  *
  *	Helper routine for grab functions to release busy on return.
  */
 static inline void
 vm_page_grab_release(vm_page_t m, int allocflags)
 {
 
 	if ((allocflags & VM_ALLOC_NOBUSY) != 0) {
 		if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
 			vm_page_sunbusy(m);
 		else
 			vm_page_xunbusy(m);
 	}
 }
 
 /*
  *	vm_page_grab_sleep
  *
  *	Sleep for busy according to VM_ALLOC_ parameters.  Returns true
  *	if the caller should retry and false otherwise.
  *
  *	If the object is locked on entry the object will be unlocked with
  *	false returns and still locked but possibly having been dropped
  *	with true returns.
  */
 static bool
 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex,
     const char *wmesg, int allocflags, bool locked)
 {
 
 	if ((allocflags & VM_ALLOC_NOWAIT) != 0)
 		return (false);
 
 	/*
 	 * Reference the page before unlocking and sleeping so that
 	 * the page daemon is less likely to reclaim it.
 	 */
 	if (locked && (allocflags & VM_ALLOC_NOCREAT) == 0)
 		vm_page_reference(m);
 
 	if (_vm_page_busy_sleep(object, m, pindex, wmesg, allocflags, locked) &&
 	    locked)
 		VM_OBJECT_WLOCK(object);
 	if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
 		return (false);
 
 	return (true);
 }
 
 /*
  * Assert that the grab flags are valid.
  */
 static inline void
 vm_page_grab_check(int allocflags)
 {
 
 	KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 ||
 	    (allocflags & VM_ALLOC_WIRED) != 0,
 	    ("vm_page_grab*: the pages must be busied or wired"));
 
 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
 	    ("vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
 }
 
 /*
  * Calculate the page allocation flags for grab.
  */
 static inline int
 vm_page_grab_pflags(int allocflags)
 {
 	int pflags;
 
 	pflags = allocflags &
 	    ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL |
 	    VM_ALLOC_NOBUSY | VM_ALLOC_IGN_SBUSY);
 	if ((allocflags & VM_ALLOC_NOWAIT) == 0)
 		pflags |= VM_ALLOC_WAITFAIL;
 	if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
 		pflags |= VM_ALLOC_SBUSY;
 
 	return (pflags);
 }
 
 /*
  * Grab a page, waiting until we are waken up due to the page
  * changing state.  We keep on waiting, if the page continues
  * to be in the object.  If the page doesn't exist, first allocate it
  * and then conditionally zero it.
  *
  * This routine may sleep.
  *
  * The object must be locked on entry.  The lock will, however, be released
  * and reacquired if the routine sleeps.
  */
 vm_page_t
 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
 {
 	vm_page_t m;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	vm_page_grab_check(allocflags);
 
 retrylookup:
 	if ((m = vm_page_lookup(object, pindex)) != NULL) {
 		if (!vm_page_tryacquire(m, allocflags)) {
 			if (vm_page_grab_sleep(object, m, pindex, "pgrbwt",
 			    allocflags, true))
 				goto retrylookup;
 			return (NULL);
 		}
 		goto out;
 	}
 	if ((allocflags & VM_ALLOC_NOCREAT) != 0)
 		return (NULL);
 	m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags));
 	if (m == NULL) {
 		if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0)
 			return (NULL);
 		goto retrylookup;
 	}
 	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
 		pmap_zero_page(m);
 
 out:
 	vm_page_grab_release(m, allocflags);
 
 	return (m);
 }
 
 /*
  * Attempt to validate a page, locklessly acquiring it if necessary, given a
  * (object, pindex) tuple and either an invalided page or NULL.  The resulting
  * page will be validated against the identity tuple, and busied or wired as
  * requested.  A NULL page returned guarantees that the page was not in radix at
  * the time of the call but callers must perform higher level synchronization or
  * retry the operation under a lock if they require an atomic answer.  This is
  * the only lock free validation routine, other routines can depend on the
  * resulting page state.
  *
  * The return value PAGE_NOT_ACQUIRED indicates that the operation failed due to
  * caller flags.
  */
 #define PAGE_NOT_ACQUIRED ((vm_page_t)1)
 static vm_page_t
 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, vm_page_t m,
     int allocflags)
 {
 	if (m == NULL)
 		m = vm_page_lookup_unlocked(object, pindex);
 	for (; m != NULL; m = vm_page_lookup_unlocked(object, pindex)) {
 		if (vm_page_trybusy(m, allocflags)) {
 			if (m->object == object && m->pindex == pindex) {
 				if ((allocflags & VM_ALLOC_WIRED) != 0)
 					vm_page_wire(m);
 				vm_page_grab_release(m, allocflags);
 				break;
 			}
 			/* relookup. */
 			vm_page_busy_release(m);
 			cpu_spinwait();
 			continue;
 		}
 		if (!vm_page_grab_sleep(object, m, pindex, "pgnslp",
 		    allocflags, false))
 			return (PAGE_NOT_ACQUIRED);
 	}
 	return (m);
 }
 
 /*
  * Try to locklessly grab a page and fall back to the object lock if NOCREAT
  * is not set.
  */
 vm_page_t
 vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags)
 {
 	vm_page_t m;
 
 	vm_page_grab_check(allocflags);
 	m = vm_page_acquire_unlocked(object, pindex, NULL, allocflags);
 	if (m == PAGE_NOT_ACQUIRED)
 		return (NULL);
 	if (m != NULL)
 		return (m);
 
 	/*
 	 * The radix lockless lookup should never return a false negative
 	 * errors.  If the user specifies NOCREAT they are guaranteed there
 	 * was no page present at the instant of the call.  A NOCREAT caller
 	 * must handle create races gracefully.
 	 */
 	if ((allocflags & VM_ALLOC_NOCREAT) != 0)
 		return (NULL);
 
 	VM_OBJECT_WLOCK(object);
 	m = vm_page_grab(object, pindex, allocflags);
 	VM_OBJECT_WUNLOCK(object);
 
 	return (m);
 }
 
 /*
  * Grab a page and make it valid, paging in if necessary.  Pages missing from
  * their pager are zero filled and validated.  If a VM_ALLOC_COUNT is supplied
  * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought
  * in simultaneously.  Additional pages will be left on a paging queue but
  * will neither be wired nor busy regardless of allocflags.
  */
 int
 vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
 {
 	vm_page_t m;
 	vm_page_t ma[VM_INITIAL_PAGEIN];
 	int after, i, pflags, rv;
 
 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
 	    ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
 	KASSERT((allocflags &
 	    (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0,
 	    ("vm_page_grab_valid: Invalid flags 0x%X", allocflags));
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY |
 	    VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY);
 	pflags |= VM_ALLOC_WAITFAIL;
 
 retrylookup:
 	if ((m = vm_page_lookup(object, pindex)) != NULL) {
 		/*
 		 * If the page is fully valid it can only become invalid
 		 * with the object lock held.  If it is not valid it can
 		 * become valid with the busy lock held.  Therefore, we
 		 * may unnecessarily lock the exclusive busy here if we
 		 * race with I/O completion not using the object lock.
 		 * However, we will not end up with an invalid page and a
 		 * shared lock.
 		 */
 		if (!vm_page_trybusy(m,
 		    vm_page_all_valid(m) ? allocflags : 0)) {
 			(void)vm_page_grab_sleep(object, m, pindex, "pgrbwt",
 			    allocflags, true);
 			goto retrylookup;
 		}
 		if (vm_page_all_valid(m))
 			goto out;
 		if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
 			vm_page_busy_release(m);
 			*mp = NULL;
 			return (VM_PAGER_FAIL);
 		}
 	} else if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
 		*mp = NULL;
 		return (VM_PAGER_FAIL);
 	} else if ((m = vm_page_alloc(object, pindex, pflags)) == NULL) {
 		if (!vm_pager_can_alloc_page(object, pindex)) {
 			*mp = NULL;
 			return (VM_PAGER_AGAIN);
 		}
 		goto retrylookup;
 	}
 
 	vm_page_assert_xbusied(m);
 	if (vm_pager_has_page(object, pindex, NULL, &after)) {
 		after = MIN(after, VM_INITIAL_PAGEIN);
 		after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT);
 		after = MAX(after, 1);
 		ma[0] = m;
 		for (i = 1; i < after; i++) {
 			if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
 				if (vm_page_any_valid(ma[i]) ||
 				    !vm_page_tryxbusy(ma[i]))
 					break;
 			} else {
 				ma[i] = vm_page_alloc(object, m->pindex + i,
 				    VM_ALLOC_NORMAL);
 				if (ma[i] == NULL)
 					break;
 			}
 		}
 		after = i;
 		vm_object_pip_add(object, after);
 		VM_OBJECT_WUNLOCK(object);
 		rv = vm_pager_get_pages(object, ma, after, NULL, NULL);
 		VM_OBJECT_WLOCK(object);
 		vm_object_pip_wakeupn(object, after);
 		/* Pager may have replaced a page. */
 		m = ma[0];
 		if (rv != VM_PAGER_OK) {
 			for (i = 0; i < after; i++) {
 				if (!vm_page_wired(ma[i]))
 					vm_page_free(ma[i]);
 				else
 					vm_page_xunbusy(ma[i]);
 			}
 			*mp = NULL;
 			return (rv);
 		}
 		for (i = 1; i < after; i++)
 			vm_page_readahead_finish(ma[i]);
 		MPASS(vm_page_all_valid(m));
 	} else {
 		vm_page_zero_invalid(m, TRUE);
 	}
 out:
 	if ((allocflags & VM_ALLOC_WIRED) != 0)
 		vm_page_wire(m);
 	if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m))
 		vm_page_busy_downgrade(m);
 	else if ((allocflags & VM_ALLOC_NOBUSY) != 0)
 		vm_page_busy_release(m);
 	*mp = m;
 	return (VM_PAGER_OK);
 }
 
 /*
  * Locklessly grab a valid page.  If the page is not valid or not yet
  * allocated this will fall back to the object lock method.
  */
 int
 vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
     vm_pindex_t pindex, int allocflags)
 {
 	vm_page_t m;
 	int flags;
 	int error;
 
 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
 	    ("vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY "
 	    "mismatch"));
 	KASSERT((allocflags &
 	    (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0,
 	    ("vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags));
 
 	/*
 	 * Attempt a lockless lookup and busy.  We need at least an sbusy
 	 * before we can inspect the valid field and return a wired page.
 	 */
 	flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
 	vm_page_grab_check(flags);
 	m = vm_page_acquire_unlocked(object, pindex, NULL, flags);
 	if (m == PAGE_NOT_ACQUIRED)
 		return (VM_PAGER_FAIL);
 	if (m != NULL) {
 		if (vm_page_all_valid(m)) {
 			if ((allocflags & VM_ALLOC_WIRED) != 0)
 				vm_page_wire(m);
 			vm_page_grab_release(m, allocflags);
 			*mp = m;
 			return (VM_PAGER_OK);
 		}
 		vm_page_busy_release(m);
 	}
 	if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
 		*mp = NULL;
 		return (VM_PAGER_FAIL);
 	}
 	VM_OBJECT_WLOCK(object);
 	error = vm_page_grab_valid(mp, object, pindex, allocflags);
 	VM_OBJECT_WUNLOCK(object);
 
 	return (error);
 }
 
 /*
  * Return the specified range of pages from the given object.  For each
  * page offset within the range, if a page already exists within the object
  * at that offset and it is busy, then wait for it to change state.  If,
  * instead, the page doesn't exist, then allocate it.
  *
  * The caller must always specify an allocation class.
  *
  * allocation classes:
  *	VM_ALLOC_NORMAL		normal process request
  *	VM_ALLOC_SYSTEM		system *really* needs the pages
  *
  * The caller must always specify that the pages are to be busied and/or
  * wired.
  *
  * optional allocation flags:
  *	VM_ALLOC_IGN_SBUSY	do not sleep on soft busy pages
  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
  *	VM_ALLOC_NOWAIT		do not sleep
  *	VM_ALLOC_SBUSY		set page to sbusy state
  *	VM_ALLOC_WIRED		wire the pages
  *	VM_ALLOC_ZERO		zero and validate any invalid pages
  *
  * If VM_ALLOC_NOWAIT is not specified, this routine may sleep.  Otherwise, it
  * may return a partial prefix of the requested range.
  */
 int
 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
     vm_page_t *ma, int count)
 {
 	vm_page_t m, mpred;
 	int pflags;
 	int i;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0,
 	    ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
 	KASSERT(count > 0,
 	    ("vm_page_grab_pages: invalid page count %d", count));
 	vm_page_grab_check(allocflags);
 
 	pflags = vm_page_grab_pflags(allocflags);
 	i = 0;
 retrylookup:
 	m = vm_page_mpred(object, pindex + i);
 	if (m == NULL || m->pindex != pindex + i) {
 		mpred = m;
 		m = NULL;
 	} else
 		mpred = TAILQ_PREV(m, pglist, listq);
 	for (; i < count; i++) {
 		if (m != NULL) {
 			if (!vm_page_tryacquire(m, allocflags)) {
 				if (vm_page_grab_sleep(object, m, pindex + i,
 				    "grbmaw", allocflags, true))
 					goto retrylookup;
 				break;
 			}
 		} else {
 			if ((allocflags & VM_ALLOC_NOCREAT) != 0)
 				break;
 			m = vm_page_alloc_after(object, pindex + i,
 			    pflags | VM_ALLOC_COUNT(count - i), mpred);
 			if (m == NULL) {
 				if ((allocflags & (VM_ALLOC_NOWAIT |
 				    VM_ALLOC_WAITFAIL)) != 0)
 					break;
 				goto retrylookup;
 			}
 		}
 		if (vm_page_none_valid(m) &&
 		    (allocflags & VM_ALLOC_ZERO) != 0) {
 			if ((m->flags & PG_ZERO) == 0)
 				pmap_zero_page(m);
 			vm_page_valid(m);
 		}
 		vm_page_grab_release(m, allocflags);
 		ma[i] = mpred = m;
 		m = vm_page_next(m);
 	}
 	return (i);
 }
 
 /*
  * Unlocked variant of vm_page_grab_pages().  This accepts the same flags
  * and will fall back to the locked variant to handle allocation.
  */
 int
 vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
     int allocflags, vm_page_t *ma, int count)
 {
 	vm_page_t m;
 	int flags;
 	int i;
 
 	KASSERT(count > 0,
 	    ("vm_page_grab_pages_unlocked: invalid page count %d", count));
 	vm_page_grab_check(allocflags);
 
 	/*
 	 * Modify flags for lockless acquire to hold the page until we
 	 * set it valid if necessary.
 	 */
 	flags = allocflags & ~VM_ALLOC_NOBUSY;
 	vm_page_grab_check(flags);
 	m = NULL;
 	for (i = 0; i < count; i++, pindex++) {
 		/*
 		 * We may see a false NULL here because the previous page has
 		 * been removed or just inserted and the list is loaded without
 		 * barriers.  Switch to radix to verify.
 		 */
 		if (m == NULL || QMD_IS_TRASHED(m) || m->pindex != pindex ||
 		    atomic_load_ptr(&m->object) != object) {
 			/*
 			 * This guarantees the result is instantaneously
 			 * correct.
 			 */
 			m = NULL;
 		}
 		m = vm_page_acquire_unlocked(object, pindex, m, flags);
 		if (m == PAGE_NOT_ACQUIRED)
 			return (i);
 		if (m == NULL)
 			break;
 		if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) {
 			if ((m->flags & PG_ZERO) == 0)
 				pmap_zero_page(m);
 			vm_page_valid(m);
 		}
 		/* m will still be wired or busy according to flags. */
 		vm_page_grab_release(m, allocflags);
 		ma[i] = m;
 		m = TAILQ_NEXT(m, listq);
 	}
 	if (i == count || (allocflags & VM_ALLOC_NOCREAT) != 0)
 		return (i);
 	count -= i;
 	VM_OBJECT_WLOCK(object);
 	i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count);
 	VM_OBJECT_WUNLOCK(object);
 
 	return (i);
 }
 
 /*
  * Mapping function for valid or dirty bits in a page.
  *
  * Inputs are required to range within a page.
  */
 vm_page_bits_t
 vm_page_bits(int base, int size)
 {
 	int first_bit;
 	int last_bit;
 
 	KASSERT(
 	    base + size <= PAGE_SIZE,
 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
 	);
 
 	if (size == 0)		/* handle degenerate case */
 		return (0);
 
 	first_bit = base >> DEV_BSHIFT;
 	last_bit = (base + size - 1) >> DEV_BSHIFT;
 
 	return (((vm_page_bits_t)2 << last_bit) -
 	    ((vm_page_bits_t)1 << first_bit));
 }
 
 void
 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set)
 {
 
 #if PAGE_SIZE == 32768
 	atomic_set_64((uint64_t *)bits, set);
 #elif PAGE_SIZE == 16384
 	atomic_set_32((uint32_t *)bits, set);
 #elif (PAGE_SIZE == 8192) && defined(atomic_set_16)
 	atomic_set_16((uint16_t *)bits, set);
 #elif (PAGE_SIZE == 4096) && defined(atomic_set_8)
 	atomic_set_8((uint8_t *)bits, set);
 #else		/* PAGE_SIZE <= 8192 */
 	uintptr_t addr;
 	int shift;
 
 	addr = (uintptr_t)bits;
 	/*
 	 * Use a trick to perform a 32-bit atomic on the
 	 * containing aligned word, to not depend on the existence
 	 * of atomic_{set, clear}_{8, 16}.
 	 */
 	shift = addr & (sizeof(uint32_t) - 1);
 #if BYTE_ORDER == BIG_ENDIAN
 	shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
 #else
 	shift *= NBBY;
 #endif
 	addr &= ~(sizeof(uint32_t) - 1);
 	atomic_set_32((uint32_t *)addr, set << shift);
 #endif		/* PAGE_SIZE */
 }
 
 static inline void
 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear)
 {
 
 #if PAGE_SIZE == 32768
 	atomic_clear_64((uint64_t *)bits, clear);
 #elif PAGE_SIZE == 16384
 	atomic_clear_32((uint32_t *)bits, clear);
 #elif (PAGE_SIZE == 8192) && defined(atomic_clear_16)
 	atomic_clear_16((uint16_t *)bits, clear);
 #elif (PAGE_SIZE == 4096) && defined(atomic_clear_8)
 	atomic_clear_8((uint8_t *)bits, clear);
 #else		/* PAGE_SIZE <= 8192 */
 	uintptr_t addr;
 	int shift;
 
 	addr = (uintptr_t)bits;
 	/*
 	 * Use a trick to perform a 32-bit atomic on the
 	 * containing aligned word, to not depend on the existence
 	 * of atomic_{set, clear}_{8, 16}.
 	 */
 	shift = addr & (sizeof(uint32_t) - 1);
 #if BYTE_ORDER == BIG_ENDIAN
 	shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
 #else
 	shift *= NBBY;
 #endif
 	addr &= ~(sizeof(uint32_t) - 1);
 	atomic_clear_32((uint32_t *)addr, clear << shift);
 #endif		/* PAGE_SIZE */
 }
 
 static inline vm_page_bits_t
 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits)
 {
 #if PAGE_SIZE == 32768
 	uint64_t old;
 
 	old = *bits;
 	while (atomic_fcmpset_64(bits, &old, newbits) == 0);
 	return (old);
 #elif PAGE_SIZE == 16384
 	uint32_t old;
 
 	old = *bits;
 	while (atomic_fcmpset_32(bits, &old, newbits) == 0);
 	return (old);
 #elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16)
 	uint16_t old;
 
 	old = *bits;
 	while (atomic_fcmpset_16(bits, &old, newbits) == 0);
 	return (old);
 #elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8)
 	uint8_t old;
 
 	old = *bits;
 	while (atomic_fcmpset_8(bits, &old, newbits) == 0);
 	return (old);
 #else		/* PAGE_SIZE <= 4096*/
 	uintptr_t addr;
 	uint32_t old, new, mask;
 	int shift;
 
 	addr = (uintptr_t)bits;
 	/*
 	 * Use a trick to perform a 32-bit atomic on the
 	 * containing aligned word, to not depend on the existence
 	 * of atomic_{set, swap, clear}_{8, 16}.
 	 */
 	shift = addr & (sizeof(uint32_t) - 1);
 #if BYTE_ORDER == BIG_ENDIAN
 	shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
 #else
 	shift *= NBBY;
 #endif
 	addr &= ~(sizeof(uint32_t) - 1);
 	mask = VM_PAGE_BITS_ALL << shift;
 
 	old = *bits;
 	do {
 		new = old & ~mask;
 		new |= newbits << shift;
 	} while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0);
 	return (old >> shift);
 #endif		/* PAGE_SIZE */
 }
 
 /*
  *	vm_page_set_valid_range:
  *
  *	Sets portions of a page valid.  The arguments are expected
  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
  *	of any partial chunks touched by the range.  The invalid portion of
  *	such chunks will be zeroed.
  *
  *	(base + size) must be less then or equal to PAGE_SIZE.
  */
 void
 vm_page_set_valid_range(vm_page_t m, int base, int size)
 {
 	int endoff, frag;
 	vm_page_bits_t pagebits;
 
 	vm_page_assert_busied(m);
 	if (size == 0)	/* handle degenerate case */
 		return;
 
 	/*
 	 * If the base is not DEV_BSIZE aligned and the valid
 	 * bit is clear, we have to zero out a portion of the
 	 * first block.
 	 */
 	if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
 		pmap_zero_page_area(m, frag, base - frag);
 
 	/*
 	 * If the ending offset is not DEV_BSIZE aligned and the
 	 * valid bit is clear, we have to zero out a portion of
 	 * the last block.
 	 */
 	endoff = base + size;
 	if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
 		pmap_zero_page_area(m, endoff,
 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
 
 	/*
 	 * Assert that no previously invalid block that is now being validated
 	 * is already dirty.
 	 */
 	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
 	    ("vm_page_set_valid_range: page %p is dirty", m));
 
 	/*
 	 * Set valid bits inclusive of any overlap.
 	 */
 	pagebits = vm_page_bits(base, size);
 	if (vm_page_xbusied(m))
 		m->valid |= pagebits;
 	else
 		vm_page_bits_set(m, &m->valid, pagebits);
 }
 
 /*
  * Set the page dirty bits and free the invalid swap space if
  * present.  Returns the previous dirty bits.
  */
 vm_page_bits_t
 vm_page_set_dirty(vm_page_t m)
 {
 	vm_page_bits_t old;
 
 	VM_PAGE_OBJECT_BUSY_ASSERT(m);
 
 	if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) {
 		old = m->dirty;
 		m->dirty = VM_PAGE_BITS_ALL;
 	} else
 		old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL);
 	if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0)
 		vm_pager_page_unswapped(m);
 
 	return (old);
 }
 
 /*
  * Clear the given bits from the specified page's dirty field.
  */
 static __inline void
 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
 {
 
 	vm_page_assert_busied(m);
 
 	/*
 	 * If the page is xbusied and not write mapped we are the
 	 * only thread that can modify dirty bits.  Otherwise, The pmap
 	 * layer can call vm_page_dirty() without holding a distinguished
 	 * lock.  The combination of page busy and atomic operations
 	 * suffice to guarantee consistency of the page dirty field.
 	 */
 	if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
 		m->dirty &= ~pagebits;
 	else
 		vm_page_bits_clear(m, &m->dirty, pagebits);
 }
 
 /*
  *	vm_page_set_validclean:
  *
  *	Sets portions of a page valid and clean.  The arguments are expected
  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
  *	of any partial chunks touched by the range.  The invalid portion of
  *	such chunks will be zero'd.
  *
  *	(base + size) must be less then or equal to PAGE_SIZE.
  */
 void
 vm_page_set_validclean(vm_page_t m, int base, int size)
 {
 	vm_page_bits_t oldvalid, pagebits;
 	int endoff, frag;
 
 	vm_page_assert_busied(m);
 	if (size == 0)	/* handle degenerate case */
 		return;
 
 	/*
 	 * If the base is not DEV_BSIZE aligned and the valid
 	 * bit is clear, we have to zero out a portion of the
 	 * first block.
 	 */
 	if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
 	    (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
 		pmap_zero_page_area(m, frag, base - frag);
 
 	/*
 	 * If the ending offset is not DEV_BSIZE aligned and the
 	 * valid bit is clear, we have to zero out a portion of
 	 * the last block.
 	 */
 	endoff = base + size;
 	if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
 	    (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
 		pmap_zero_page_area(m, endoff,
 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
 
 	/*
 	 * Set valid, clear dirty bits.  If validating the entire
 	 * page we can safely clear the pmap modify bit.  We also
 	 * use this opportunity to clear the PGA_NOSYNC flag.  If a process
 	 * takes a write fault on a MAP_NOSYNC memory area the flag will
 	 * be set again.
 	 *
 	 * We set valid bits inclusive of any overlap, but we can only
 	 * clear dirty bits for DEV_BSIZE chunks that are fully within
 	 * the range.
 	 */
 	oldvalid = m->valid;
 	pagebits = vm_page_bits(base, size);
 	if (vm_page_xbusied(m))
 		m->valid |= pagebits;
 	else
 		vm_page_bits_set(m, &m->valid, pagebits);
 #if 0	/* NOT YET */
 	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
 		frag = DEV_BSIZE - frag;
 		base += frag;
 		size -= frag;
 		if (size < 0)
 			size = 0;
 	}
 	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
 #endif
 	if (base == 0 && size == PAGE_SIZE) {
 		/*
 		 * The page can only be modified within the pmap if it is
 		 * mapped, and it can only be mapped if it was previously
 		 * fully valid.
 		 */
 		if (oldvalid == VM_PAGE_BITS_ALL)
 			/*
 			 * Perform the pmap_clear_modify() first.  Otherwise,
 			 * a concurrent pmap operation, such as
 			 * pmap_protect(), could clear a modification in the
 			 * pmap and set the dirty field on the page before
 			 * pmap_clear_modify() had begun and after the dirty
 			 * field was cleared here.
 			 */
 			pmap_clear_modify(m);
 		m->dirty = 0;
 		vm_page_aflag_clear(m, PGA_NOSYNC);
 	} else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m))
 		m->dirty &= ~pagebits;
 	else
 		vm_page_clear_dirty_mask(m, pagebits);
 }
 
 void
 vm_page_clear_dirty(vm_page_t m, int base, int size)
 {
 
 	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
 }
 
 /*
  *	vm_page_set_invalid:
  *
  *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
  *	valid and dirty bits for the effected areas are cleared.
  */
 void
 vm_page_set_invalid(vm_page_t m, int base, int size)
 {
 	vm_page_bits_t bits;
 	vm_object_t object;
 
 	/*
 	 * The object lock is required so that pages can't be mapped
 	 * read-only while we're in the process of invalidating them.
 	 */
 	object = m->object;
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	vm_page_assert_busied(m);
 
 	if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
 	    size >= object->un_pager.vnp.vnp_size)
 		bits = VM_PAGE_BITS_ALL;
 	else
 		bits = vm_page_bits(base, size);
 	if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0)
 		pmap_remove_all(m);
 	KASSERT((bits == 0 && vm_page_all_valid(m)) ||
 	    !pmap_page_is_mapped(m),
 	    ("vm_page_set_invalid: page %p is mapped", m));
 	if (vm_page_xbusied(m)) {
 		m->valid &= ~bits;
 		m->dirty &= ~bits;
 	} else {
 		vm_page_bits_clear(m, &m->valid, bits);
 		vm_page_bits_clear(m, &m->dirty, bits);
 	}
 }
 
 /*
  *	vm_page_invalid:
  *
  *	Invalidates the entire page.  The page must be busy, unmapped, and
  *	the enclosing object must be locked.  The object locks protects
  *	against concurrent read-only pmap enter which is done without
  *	busy.
  */
 void
 vm_page_invalid(vm_page_t m)
 {
 
 	vm_page_assert_busied(m);
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	MPASS(!pmap_page_is_mapped(m));
 
 	if (vm_page_xbusied(m))
 		m->valid = 0;
 	else
 		vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL);
 }
 
 /*
  * vm_page_zero_invalid()
  *
  *	The kernel assumes that the invalid portions of a page contain
  *	garbage, but such pages can be mapped into memory by user code.
  *	When this occurs, we must zero out the non-valid portions of the
  *	page so user code sees what it expects.
  *
  *	Pages are most often semi-valid when the end of a file is mapped
  *	into memory and the file's size is not page aligned.
  */
 void
 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
 {
 	int b;
 	int i;
 
 	/*
 	 * Scan the valid bits looking for invalid sections that
 	 * must be zeroed.  Invalid sub-DEV_BSIZE'd areas ( where the
 	 * valid bit may be set ) have already been zeroed by
 	 * vm_page_set_validclean().
 	 */
 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
 		    (m->valid & ((vm_page_bits_t)1 << i))) {
 			if (i > b) {
 				pmap_zero_page_area(m,
 				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
 			}
 			b = i + 1;
 		}
 	}
 
 	/*
 	 * setvalid is TRUE when we can safely set the zero'd areas
 	 * as being valid.  We can do this if there are no cache consistency
 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
 	 */
 	if (setvalid)
 		vm_page_valid(m);
 }
 
 /*
  *	vm_page_is_valid:
  *
  *	Is (partial) page valid?  Note that the case where size == 0
  *	will return FALSE in the degenerate case where the page is
  *	entirely invalid, and TRUE otherwise.
  *
  *	Some callers envoke this routine without the busy lock held and
  *	handle races via higher level locks.  Typical callers should
  *	hold a busy lock to prevent invalidation.
  */
 int
 vm_page_is_valid(vm_page_t m, int base, int size)
 {
 	vm_page_bits_t bits;
 
 	bits = vm_page_bits(base, size);
 	return (vm_page_any_valid(m) && (m->valid & bits) == bits);
 }
 
 /*
  * Returns true if all of the specified predicates are true for the entire
  * (super)page and false otherwise.
  */
 bool
 vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m)
 {
 	vm_object_t object;
 	int i, npages;
 
 	object = m->object;
 	if (skip_m != NULL && skip_m->object != object)
 		return (false);
 	VM_OBJECT_ASSERT_LOCKED(object);
 	KASSERT(psind <= m->psind,
 	    ("psind %d > psind %d of m %p", psind, m->psind, m));
 	npages = atop(pagesizes[psind]);
 
 	/*
 	 * The physically contiguous pages that make up a superpage, i.e., a
 	 * page with a page size index ("psind") greater than zero, will
 	 * occupy adjacent entries in vm_page_array[].
 	 */
 	for (i = 0; i < npages; i++) {
 		/* Always test object consistency, including "skip_m". */
 		if (m[i].object != object)
 			return (false);
 		if (&m[i] == skip_m)
 			continue;
 		if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i]))
 			return (false);
 		if ((flags & PS_ALL_DIRTY) != 0) {
 			/*
 			 * Calling vm_page_test_dirty() or pmap_is_modified()
 			 * might stop this case from spuriously returning
 			 * "false".  However, that would require a write lock
 			 * on the object containing "m[i]".
 			 */
 			if (m[i].dirty != VM_PAGE_BITS_ALL)
 				return (false);
 		}
 		if ((flags & PS_ALL_VALID) != 0 &&
 		    m[i].valid != VM_PAGE_BITS_ALL)
 			return (false);
 	}
 	return (true);
 }
 
 /*
  * Set the page's dirty bits if the page is modified.
  */
 void
 vm_page_test_dirty(vm_page_t m)
 {
 
 	vm_page_assert_busied(m);
 	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
 		vm_page_dirty(m);
 }
 
 void
 vm_page_valid(vm_page_t m)
 {
 
 	vm_page_assert_busied(m);
 	if (vm_page_xbusied(m))
 		m->valid = VM_PAGE_BITS_ALL;
 	else
 		vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL);
 }
 
 void
 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
 {
 
 	mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
 }
 
 void
 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
 {
 
 	mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
 }
 
 int
 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
 {
 
 	return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
 }
 
 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
 void
 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
 {
 
 	vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
 }
 
 void
 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
 {
 
 	mtx_assert_(vm_page_lockptr(m), a, file, line);
 }
 #endif
 
 #ifdef INVARIANTS
 void
 vm_page_object_busy_assert(vm_page_t m)
 {
 
 	/*
 	 * Certain of the page's fields may only be modified by the
 	 * holder of a page or object busy.
 	 */
 	if (m->object != NULL && !vm_page_busied(m))
 		VM_OBJECT_ASSERT_BUSY(m->object);
 }
 
 void
 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits)
 {
 
 	if ((bits & PGA_WRITEABLE) == 0)
 		return;
 
 	/*
 	 * The PGA_WRITEABLE flag can only be set if the page is
 	 * managed, is exclusively busied or the object is locked.
 	 * Currently, this flag is only set by pmap_enter().
 	 */
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("PGA_WRITEABLE on unmanaged page"));
 	if (!vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_BUSY(m->object);
 }
 #endif
 
 #include "opt_ddb.h"
 #ifdef DDB
 #include <sys/kernel.h>
 
 #include <ddb/ddb.h>
 
 DB_SHOW_COMMAND_FLAGS(page, vm_page_print_page_info, DB_CMD_MEMSAFE)
 {
 
 	db_printf("vm_cnt.v_free_count: %d\n", vm_free_count());
 	db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count());
 	db_printf("vm_cnt.v_active_count: %d\n", vm_active_count());
 	db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count());
 	db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count());
 	db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved);
 	db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min);
 	db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);
 	db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target);
 }
 
 DB_SHOW_COMMAND_FLAGS(pageq, vm_page_print_pageq_info, DB_CMD_MEMSAFE)
 {
 	int dom;
 
 	db_printf("pq_free %d\n", vm_free_count());
 	for (dom = 0; dom < vm_ndomains; dom++) {
 		db_printf(
     "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n",
 		    dom,
 		    vm_dom[dom].vmd_page_count,
 		    vm_dom[dom].vmd_free_count,
 		    vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
 		    vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
 		    vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt,
 		    vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt);
 	}
 }
 
 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
 {
 	vm_page_t m;
 	boolean_t phys, virt;
 
 	if (!have_addr) {
 		db_printf("show pginfo addr\n");
 		return;
 	}
 
 	phys = strchr(modif, 'p') != NULL;
 	virt = strchr(modif, 'v') != NULL;
 	if (virt)
 		m = PHYS_TO_VM_PAGE(pmap_kextract(addr));
 	else if (phys)
 		m = PHYS_TO_VM_PAGE(addr);
 	else
 		m = (vm_page_t)addr;
 	db_printf(
     "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref 0x%x\n"
     "  af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
 	    m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
 	    m->a.queue, m->ref_count, m->a.flags, m->oflags,
 	    m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty);
 }
 #endif /* DDB */
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 893608bcacf1..613896e77dd9 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -1,1040 +1,1042 @@
 /*-
  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
  *
  * Copyright (c) 1991, 1993
  *	The Regents of the University of California.  All rights reserved.
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. Neither the name of the University nor the names of its contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  * All rights reserved.
  *
  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
  *
  * Permission to use, copy, modify and distribute this software and
  * its documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
  *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  *
  * Carnegie Mellon requests users of this software to return to
  *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
  *
  * any improvements or extensions that they make and grant Carnegie the
  * rights to redistribute these changes.
  */
 
 /*
  *	Resident memory system definitions.
  */
 
 #ifndef	_VM_PAGE_
 #define	_VM_PAGE_
 
 #include <vm/pmap.h>
 #include <vm/_vm_phys.h>
 
 /*
  *	Management of resident (logical) pages.
  *
  *	A small structure is kept for each resident
  *	page, indexed by page number.  Each structure
  *	is an element of several collections:
  *
  *		A radix tree used to quickly
  *		perform object/offset lookups
  *
  *		A list of all pages for a given object,
  *		so they can be quickly deactivated at
  *		time of deallocation.
  *
  *		An ordered list of pages due for pageout.
  *
  *	In addition, the structure contains the object
  *	and offset to which this page belongs (for pageout),
  *	and sundry status bits.
  *
  *	In general, operations on this structure's mutable fields are
  *	synchronized using either one of or a combination of locks.  If a
  *	field is annotated with two of these locks then holding either is
  *	sufficient for read access but both are required for write access.
  *	The queue lock for a page depends on the value of its queue field and is
  *	described in detail below.
  *
  *	The following annotations are possible:
  *	(A) the field must be accessed using atomic(9) and may require
  *	    additional synchronization.
  *	(B) the page busy lock.
  *	(C) the field is immutable.
  *	(F) the per-domain lock for the free queues.
  *	(M) Machine dependent, defined by pmap layer.
  *	(O) the object that the page belongs to.
  *	(Q) the page's queue lock.
  *
  *	The busy lock is an embedded reader-writer lock that protects the
  *	page's contents and identity (i.e., its <object, pindex> tuple) as
  *	well as certain valid/dirty modifications.  To avoid bloating the
  *	the page structure, the busy lock lacks some of the features available
  *	the kernel's general-purpose synchronization primitives.  As a result,
  *	busy lock ordering rules are not verified, lock recursion is not
  *	detected, and an attempt to xbusy a busy page or sbusy an xbusy page
  *	results will trigger a panic rather than causing the thread to block.
  *	vm_page_sleep_if_busy() can be used to sleep until the page's busy
  *	state changes, after which the caller must re-lookup the page and
  *	re-evaluate its state.  vm_page_busy_acquire() will block until
  *	the lock is acquired.
  *
  *	The valid field is protected by the page busy lock (B) and object
  *	lock (O).  Transitions from invalid to valid are generally done
  *	via I/O or zero filling and do not require the object lock.
  *	These must be protected with the busy lock to prevent page-in or
  *	creation races.  Page invalidation generally happens as a result
  *	of truncate or msync.  When invalidated, pages must not be present
  *	in pmap and must hold the object lock to prevent concurrent
  *	speculative read-only mappings that do not require busy.  I/O
  *	routines may check for validity without a lock if they are prepared
  *	to handle invalidation races with higher level locks (vnode) or are
  *	unconcerned with races so long as they hold a reference to prevent
  *	recycling.  When a valid bit is set while holding a shared busy
  *	lock (A) atomic operations are used to protect against concurrent
  *	modification.
  *
  *	In contrast, the synchronization of accesses to the page's
  *	dirty field is a mix of machine dependent (M) and busy (B).  In
  *	the machine-independent layer, the page busy must be held to
  *	operate on the field.  However, the pmap layer is permitted to
  *	set all bits within the field without holding that lock.  If the
  *	underlying architecture does not support atomic read-modify-write
  *	operations on the field's type, then the machine-independent
  *	layer uses a 32-bit atomic on the aligned 32-bit word that
  *	contains the dirty field.  In the machine-independent layer,
  *	the implementation of read-modify-write operations on the
  *	field is encapsulated in vm_page_clear_dirty_mask().  An
  *	exclusive busy lock combined with pmap_remove_{write/all}() is the
  *	only way to ensure a page can not become dirty.  I/O generally
  *	removes the page from pmap to ensure exclusive access and atomic
  *	writes.
  *
  *	The ref_count field tracks references to the page.  References that
  *	prevent the page from being reclaimable are called wirings and are
  *	counted in the low bits of ref_count.  The containing object's
  *	reference, if one exists, is counted using the VPRC_OBJREF bit in the
  *	ref_count field.  Additionally, the VPRC_BLOCKED bit is used to
  *	atomically check for wirings and prevent new wirings via
  *	pmap_extract_and_hold().  When a page belongs to an object, it may be
  *	wired only when the object is locked, or the page is busy, or by
  *	pmap_extract_and_hold().  As a result, if the object is locked and the
  *	page is not busy (or is exclusively busied by the current thread), and
  *	the page is unmapped, its wire count will not increase.  The ref_count
  *	field is updated using atomic operations in most cases, except when it
  *	is known that no other references to the page exist, such as in the page
  *	allocator.  A page may be present in the page queues, or even actively
  *	scanned by the page daemon, without an explicitly counted referenced.
  *	The page daemon must therefore handle the possibility of a concurrent
  *	free of the page.
  *
  *	The queue state of a page consists of the queue and act_count fields of
  *	its atomically updated state, and the subset of atomic flags specified
  *	by PGA_QUEUE_STATE_MASK.  The queue field contains the page's page queue
  *	index, or PQ_NONE if it does not belong to a page queue.  To modify the
  *	queue field, the page queue lock corresponding to the old value must be
  *	held, unless that value is PQ_NONE, in which case the queue index must
  *	be updated using an atomic RMW operation.  There is one exception to
  *	this rule: the page daemon may transition the queue field from
  *	PQ_INACTIVE to PQ_NONE immediately prior to freeing the page during an
  *	inactive queue scan.  At that point the page is already dequeued and no
  *	other references to that vm_page structure can exist.  The PGA_ENQUEUED
  *	flag, when set, indicates that the page structure is physically inserted
  *	into the queue corresponding to the page's queue index, and may only be
  *	set or cleared with the corresponding page queue lock held.
  *
  *	To avoid contention on page queue locks, page queue operations (enqueue,
  *	dequeue, requeue) are batched using fixed-size per-CPU queues.  A
  *	deferred operation is requested by setting one of the flags in
  *	PGA_QUEUE_OP_MASK and inserting an entry into a batch queue.  When a
  *	queue is full, an attempt to insert a new entry will lock the page
  *	queues and trigger processing of the pending entries.  The
  *	type-stability of vm_page structures is crucial to this scheme since the
  *	processing of entries in a given batch queue may be deferred
  *	indefinitely.  In particular, a page may be freed with pending batch
  *	queue entries.  The page queue operation flags must be set using atomic
  *	RWM operations.
  */
 
 #if PAGE_SIZE == 4096
 #define VM_PAGE_BITS_ALL 0xffu
 typedef uint8_t vm_page_bits_t;
 #elif PAGE_SIZE == 8192
 #define VM_PAGE_BITS_ALL 0xffffu
 typedef uint16_t vm_page_bits_t;
 #elif PAGE_SIZE == 16384
 #define VM_PAGE_BITS_ALL 0xffffffffu
 typedef uint32_t vm_page_bits_t;
 #elif PAGE_SIZE == 32768
 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
 typedef uint64_t vm_page_bits_t;
 #endif
 
 typedef union vm_page_astate {
 	struct {
 		uint16_t flags;
 		uint8_t	queue;
 		uint8_t act_count;
 	};
 	uint32_t _bits;
 } vm_page_astate_t;
 
 struct vm_page {
 	union {
 		TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
 		struct {
 			SLIST_ENTRY(vm_page) ss; /* private slists */
 		} s;
 		struct {
 			u_long p;
 			u_long v;
 		} memguard;
 		struct {
 			void *slab;
 			void *zone;
 		} uma;
 	} plinks;
 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) */
 	vm_object_t object;		/* which object am I in (O) */
 	vm_pindex_t pindex;		/* offset into object (O,P) */
 	vm_paddr_t phys_addr;		/* physical address of page (C) */
 	struct md_page md;		/* machine dependent stuff */
 	u_int ref_count;		/* page references (A) */
 	u_int busy_lock;		/* busy owners lock (A) */
 	union vm_page_astate a;		/* state accessed atomically (A) */
 	uint8_t order;			/* index of the buddy queue (F) */
 	uint8_t pool;			/* vm_phys freepool index (F) */
 	uint8_t flags;			/* page PG_* flags (P) */
 	uint8_t oflags;			/* page VPO_* flags (O) */
 	int8_t psind;			/* pagesizes[] index (O) */
 	int8_t segind;			/* vm_phys segment index (C) */
 	/* NOTE that these must support one bit per DEV_BSIZE in a page */
 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
 	vm_page_bits_t valid;		/* valid DEV_BSIZE chunk map (O,B) */
 	vm_page_bits_t dirty;		/* dirty DEV_BSIZE chunk map (M,B) */
 };
 
 /*
  * Special bits used in the ref_count field.
  *
  * ref_count is normally used to count wirings that prevent the page from being
  * reclaimed, but also supports several special types of references that do not
  * prevent reclamation.  Accesses to the ref_count field must be atomic unless
  * the page is unallocated.
  *
  * VPRC_OBJREF is the reference held by the containing object.  It can set or
  * cleared only when the corresponding object's write lock is held.
  *
  * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while
  * attempting to tear down all mappings of a given page.  The page busy lock and
  * object write lock must both be held in order to set or clear this bit.
  */
 #define	VPRC_BLOCKED	0x40000000u	/* mappings are being removed */
 #define	VPRC_OBJREF	0x80000000u	/* object reference, cleared with (O) */
 #define	VPRC_WIRE_COUNT(c)	((c) & ~(VPRC_BLOCKED | VPRC_OBJREF))
 #define	VPRC_WIRE_COUNT_MAX	(~(VPRC_BLOCKED | VPRC_OBJREF))
 
 /*
  * Page flags stored in oflags:
  *
  * Access to these page flags is synchronized by the lock on the object
  * containing the page (O).
  *
  * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
  * 	 indicates that the page is not under PV management but
  * 	 otherwise should be treated as a normal page.  Pages not
  * 	 under PV management cannot be paged out via the
  * 	 object/vm_page_t because there is no knowledge of their pte
  * 	 mappings, and such pages are also not on any PQ queue.
  *
  */
 #define	VPO_KMEM_EXEC	0x01		/* kmem mapping allows execution */
 #define	VPO_SWAPSLEEP	0x02		/* waiting for swap to finish */
 #define	VPO_UNMANAGED	0x04		/* no PV management for page */
 #define	VPO_SWAPINPROG	0x08		/* swap I/O in progress on page */
 
 /*
  * Busy page implementation details.
  * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
  * even if the support for owner identity is removed because of size
  * constraints.  Checks on lock recursion are then not possible, while the
  * lock assertions effectiveness is someway reduced.
  */
 #define	VPB_BIT_SHARED		0x01
 #define	VPB_BIT_EXCLUSIVE	0x02
 #define	VPB_BIT_WAITERS		0x04
 #define	VPB_BIT_FLAGMASK						\
 	(VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
 
 #define	VPB_SHARERS_SHIFT	3
 #define	VPB_SHARERS(x)							\
 	(((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
 #define	VPB_SHARERS_WORD(x)	((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
 #define	VPB_ONE_SHARER		(1 << VPB_SHARERS_SHIFT)
 
 #define	VPB_SINGLE_EXCLUSIVE	VPB_BIT_EXCLUSIVE
 #ifdef INVARIANTS
 #define	VPB_CURTHREAD_EXCLUSIVE						\
 	(VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK))
 #else
 #define	VPB_CURTHREAD_EXCLUSIVE	VPB_SINGLE_EXCLUSIVE
 #endif
 
 #define	VPB_UNBUSIED		VPB_SHARERS_WORD(0)
 
 /* Freed lock blocks both shared and exclusive. */
 #define	VPB_FREED		(0xffffffff - VPB_BIT_SHARED)
 
 #define	PQ_NONE		255
 #define	PQ_INACTIVE	0
 #define	PQ_ACTIVE	1
 #define	PQ_LAUNDRY	2
 #define	PQ_UNSWAPPABLE	3
 #define	PQ_COUNT	4
 
 #ifndef VM_PAGE_HAVE_PGLIST
 TAILQ_HEAD(pglist, vm_page);
 #define VM_PAGE_HAVE_PGLIST
 #endif
 SLIST_HEAD(spglist, vm_page);
 
 #ifdef _KERNEL
 extern vm_page_t bogus_page;
 #endif	/* _KERNEL */
 
 extern struct mtx_padalign pa_lock[];
 
 #if defined(__arm__)
 #define	PDRSHIFT	PDR_SHIFT
 #elif !defined(PDRSHIFT)
 #define PDRSHIFT	21
 #endif
 
 #define	pa_index(pa)	((pa) >> PDRSHIFT)
 #define	PA_LOCKPTR(pa)	((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
 #define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
 #define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
 #define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
 #define	PA_UNLOCK(pa)	mtx_unlock(PA_LOCKPTR(pa))
 #define	PA_UNLOCK_COND(pa) 			\
 	do {		   			\
 		if ((pa) != 0) {		\
 			PA_UNLOCK((pa));	\
 			(pa) = 0;		\
 		}				\
 	} while (0)
 
 #define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
 
 #if defined(KLD_MODULE) && !defined(KLD_TIED)
 #define	vm_page_lock(m)		vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
 #define	vm_page_unlock(m)	vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
 #define	vm_page_trylock(m)	vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
 #else	/* !KLD_MODULE */
 #define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
 #define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
 #define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
 #define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
 #endif
 #if defined(INVARIANTS)
 #define	vm_page_assert_locked(m)		\
     vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
 #define	vm_page_lock_assert(m, a)		\
     vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
 #else
 #define	vm_page_assert_locked(m)
 #define	vm_page_lock_assert(m, a)
 #endif
 
 /*
  * The vm_page's aflags are updated using atomic operations.  To set or clear
  * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
  * must be used.  Neither these flags nor these functions are part of the KBI.
  *
  * PGA_REFERENCED may be cleared only if the page is locked.  It is set by
  * both the MI and MD VM layers.  However, kernel loadable modules should not
  * directly set this flag.  They should call vm_page_reference() instead.
  *
  * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
  * When it does so, the object must be locked, or the page must be
  * exclusive busied.  The MI VM layer must never access this flag
  * directly.  Instead, it should call pmap_page_is_write_mapped().
  *
  * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
  * at least one executable mapping.  It is not consumed by the MI VM layer.
  *
  * PGA_NOSYNC must be set and cleared with the page busy lock held.
  *
  * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
  * from a page queue, respectively.  It determines whether the plinks.q field
  * of the page is valid.  To set or clear this flag, page's "queue" field must
  * be a valid queue index, and the corresponding page queue lock must be held.
  *
  * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
  * queue, and cleared when the dequeue request is processed.  A page may
  * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
  * is requested after the page is scheduled to be enqueued but before it is
  * actually inserted into the page queue.
  *
  * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
  * in its page queue.
  *
  * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
  * the inactive queue, thus bypassing LRU.
  *
  * The PGA_DEQUEUE, PGA_REQUEUE and PGA_REQUEUE_HEAD flags must be set using an
  * atomic RMW operation to ensure that the "queue" field is a valid queue index,
  * and the corresponding page queue lock must be held when clearing any of the
  * flags.
  *
  * PGA_SWAP_FREE is used to defer freeing swap space to the pageout daemon
  * when the context that dirties the page does not have the object write lock
  * held.
  */
 #define	PGA_WRITEABLE	0x0001		/* page may be mapped writeable */
 #define	PGA_REFERENCED	0x0002		/* page has been referenced */
 #define	PGA_EXECUTABLE	0x0004		/* page may be mapped executable */
 #define	PGA_ENQUEUED	0x0008		/* page is enqueued in a page queue */
 #define	PGA_DEQUEUE	0x0010		/* page is due to be dequeued */
 #define	PGA_REQUEUE	0x0020		/* page is due to be requeued */
 #define	PGA_REQUEUE_HEAD 0x0040		/* page requeue should bypass LRU */
 #define	PGA_NOSYNC	0x0080		/* do not collect for syncer */
 #define	PGA_SWAP_FREE	0x0100		/* page with swap space was dirtied */
 #define	PGA_SWAP_SPACE	0x0200		/* page has allocated swap space */
 
 #define	PGA_QUEUE_OP_MASK	(PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD)
 #define	PGA_QUEUE_STATE_MASK	(PGA_ENQUEUED | PGA_QUEUE_OP_MASK)
 
 /*
  * Page flags.  Updates to these flags are not synchronized, and thus they must
  * be set during page allocation or free to avoid races.
  *
  * The PG_PCPU_CACHE flag is set at allocation time if the page was
  * allocated from a per-CPU cache.  It is cleared the next time that the
  * page is allocated from the physical memory allocator.
  */
 #define	PG_PCPU_CACHE	0x01		/* was allocated from per-CPU caches */
 #define	PG_FICTITIOUS	0x02		/* physical page doesn't exist */
 #define	PG_ZERO		0x04		/* page is zeroed */
 #define	PG_MARKER	0x08		/* special queue marker page */
 #define	PG_NODUMP	0x10		/* don't include this page in a dump */
 #define	PG_NOFREE	0x20		/* page should never be freed. */
 
 /*
  * Misc constants.
  */
 #define ACT_DECLINE		1
 #define ACT_ADVANCE		3
 #define ACT_INIT		5
 #define ACT_MAX			64
 
 #ifdef _KERNEL
 
 #include <sys/kassert.h>
 #include <machine/atomic.h>
 struct pctrie_iter;
 
 /*
  * Each pageable resident page falls into one of five lists:
  *
  *	free
  *		Available for allocation now.
  *
  *	inactive
  *		Low activity, candidates for reclamation.
  *		This list is approximately LRU ordered.
  *
  *	laundry
  *		This is the list of pages that should be
  *		paged out next.
  *
  *	unswappable
  *		Dirty anonymous pages that cannot be paged
  *		out because no swap device is configured.
  *
  *	active
  *		Pages that are "active", i.e., they have been
  *		recently referenced.
  *
  */
 
 extern vm_page_t vm_page_array;		/* First resident page in table */
 extern long vm_page_array_size;		/* number of vm_page_t's */
 extern long first_page;			/* first physical page number */
 
 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
 
 /*
  * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
  * page to which the given physical address belongs. The correct vm_page_t
  * object is returned for addresses that are not page-aligned.
  */
 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
 
 /*
  * Page allocation parameters for vm_page for the functions
  * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
  * vm_page_alloc_freelist().  Some functions support only a subset
  * of the flags, and ignore others, see the flags legend.
  *
  * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
  * and the vm_page_grab*() functions.  See these functions for details.
  *
  * Bits 0 - 1 define class.
  * Bits 2 - 15 dedicated for flags.
  * Legend:
  * (a) - vm_page_alloc() supports the flag.
  * (c) - vm_page_alloc_contig() supports the flag.
  * (g) - vm_page_grab() supports the flag.
  * (n) - vm_page_alloc_noobj() and vm_page_alloc_freelist() support the flag.
  * (p) - vm_page_grab_pages() supports the flag.
  * Bits above 15 define the count of additional pages that the caller
  * intends to allocate.
  */
 #define VM_ALLOC_NORMAL		0
 #define VM_ALLOC_INTERRUPT	1
 #define VM_ALLOC_SYSTEM		2
 #define	VM_ALLOC_CLASS_MASK	3
 #define	VM_ALLOC_WAITOK		0x0008	/* (acn) Sleep and retry */
 #define	VM_ALLOC_WAITFAIL	0x0010	/* (acn) Sleep and return error */
 #define	VM_ALLOC_WIRED		0x0020	/* (acgnp) Allocate a wired page */
 #define	VM_ALLOC_ZERO		0x0040	/* (acgnp) Allocate a zeroed page */
 #define	VM_ALLOC_NORECLAIM	0x0080	/* (c) Do not reclaim after failure */
 #define	VM_ALLOC_NOFREE		0x0100	/* (an) Page will never be released */
 #define	VM_ALLOC_NOBUSY		0x0200	/* (acgp) Do not excl busy the page */
 #define	VM_ALLOC_NOCREAT	0x0400	/* (gp) Don't create a page */
 #define	VM_ALLOC_AVAIL1		0x0800
 #define	VM_ALLOC_IGN_SBUSY	0x1000	/* (gp) Ignore shared busy flag */
 #define	VM_ALLOC_NODUMP		0x2000	/* (ag) don't include in dump */
 #define	VM_ALLOC_SBUSY		0x4000	/* (acgp) Shared busy the page */
 #define	VM_ALLOC_NOWAIT		0x8000	/* (acgnp) Do not sleep */
 #define	VM_ALLOC_COUNT_MAX	0xffff
 #define	VM_ALLOC_COUNT_SHIFT	16
 #define	VM_ALLOC_COUNT_MASK	(VM_ALLOC_COUNT(VM_ALLOC_COUNT_MAX))
 #define	VM_ALLOC_COUNT(count)	({				\
 	KASSERT((count) <= VM_ALLOC_COUNT_MAX,			\
 	    ("%s: invalid VM_ALLOC_COUNT value", __func__));	\
 	(count) << VM_ALLOC_COUNT_SHIFT;			\
 })
 
 #ifdef M_NOWAIT
 static inline int
 malloc2vm_flags(int malloc_flags)
 {
 	int pflags;
 
 	KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
 	    (malloc_flags & M_NOWAIT) != 0,
 	    ("M_USE_RESERVE requires M_NOWAIT"));
 	pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
 	    VM_ALLOC_SYSTEM;
 	if ((malloc_flags & M_ZERO) != 0)
 		pflags |= VM_ALLOC_ZERO;
 	if ((malloc_flags & M_NODUMP) != 0)
 		pflags |= VM_ALLOC_NODUMP;
 	if ((malloc_flags & M_NOWAIT))
 		pflags |= VM_ALLOC_NOWAIT;
 	if ((malloc_flags & M_WAITOK))
 		pflags |= VM_ALLOC_WAITOK;
 	if ((malloc_flags & M_NORECLAIM))
 		pflags |= VM_ALLOC_NORECLAIM;
 	if ((malloc_flags & M_NEVERFREED))
 		pflags |= VM_ALLOC_NOFREE;
 	return (pflags);
 }
 #endif
 
 /*
  * Predicates supported by vm_page_ps_test():
  *
  *	PS_ALL_DIRTY is true only if the entire (super)page is dirty.
  *	However, it can be spuriously false when the (super)page has become
  *	dirty in the pmap but that information has not been propagated to the
  *	machine-independent layer.
  */
 #define	PS_ALL_DIRTY	0x1
 #define	PS_ALL_VALID	0x2
 #define	PS_NONE_BUSY	0x4
 
 bool vm_page_busy_acquire(vm_page_t m, int allocflags);
 void vm_page_busy_downgrade(vm_page_t m);
 int vm_page_busy_tryupgrade(vm_page_t m);
 bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags);
 void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m,
     vm_pindex_t pindex, const char *wmesg, int allocflags);
 void vm_page_free(vm_page_t m);
+void vm_page_iter_free(struct pctrie_iter *);
 void vm_page_free_zero(vm_page_t m);
 
 void vm_page_activate (vm_page_t);
 void vm_page_advise(vm_page_t m, int advice);
 vm_page_t vm_page_mpred(vm_object_t, vm_pindex_t);
 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
 vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
     vm_page_t);
 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
     vm_paddr_t boundary, vm_memattr_t memattr);
 vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
     vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     vm_memattr_t memattr);
 vm_page_t vm_page_alloc_noobj(int);
 vm_page_t vm_page_alloc_noobj_domain(int, int);
 vm_page_t vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     vm_memattr_t memattr);
 vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     vm_memattr_t memattr);
 void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
 bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
 vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
 vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
     vm_page_t *ma, int count);
 int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
     int allocflags, vm_page_t *ma, int count);
 int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
     int allocflags);
 int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
     vm_pindex_t pindex, int allocflags);
 void vm_page_deactivate(vm_page_t);
 void vm_page_deactivate_noreuse(vm_page_t);
 void vm_page_dequeue(vm_page_t m);
 void vm_page_dequeue_deferred(vm_page_t m);
 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
 vm_page_t vm_page_iter_lookup_ge(struct pctrie_iter *, vm_pindex_t);
 void vm_page_free_invalid(vm_page_t);
 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
 void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags);
 void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool);
 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
 void vm_page_invalid(vm_page_t m);
 void vm_page_launder(vm_page_t m);
 vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t);
 void vm_page_iter_init(struct pctrie_iter *, vm_object_t);
 void vm_page_iter_limit_init(struct pctrie_iter *, vm_object_t, vm_pindex_t);
 vm_page_t vm_page_iter_lookup(struct pctrie_iter *, vm_pindex_t);
 vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t);
 vm_page_t vm_page_next(vm_page_t m);
 void vm_page_pqbatch_drain(void);
 void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
 bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old,
     vm_page_astate_t new);
 vm_page_t vm_page_prev(vm_page_t m);
 bool vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m);
 void vm_page_putfake(vm_page_t m);
 void vm_page_readahead_finish(vm_page_t m);
 int vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
     vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
 int vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
 int vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages,
     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     int desired_runs);
 void vm_page_reference(vm_page_t m);
 #define	VPR_TRYFREE	0x01
 #define	VPR_NOREUSE	0x02
 void vm_page_release(vm_page_t m, int flags);
 void vm_page_release_locked(vm_page_t m, int flags);
 vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t);
 bool vm_page_remove(vm_page_t);
+bool vm_page_iter_remove(struct pctrie_iter *);
 bool vm_page_remove_xbusy(vm_page_t);
-int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t);
+int vm_page_rename(struct pctrie_iter *, vm_object_t, vm_pindex_t);
 void vm_page_replace(vm_page_t mnew, vm_object_t object,
     vm_pindex_t pindex, vm_page_t mold);
 int vm_page_sbusied(vm_page_t m);
 vm_page_bits_t vm_page_set_dirty(vm_page_t m);
 void vm_page_set_valid_range(vm_page_t m, int base, int size);
 vm_offset_t vm_page_startup(vm_offset_t vaddr);
 void vm_page_sunbusy(vm_page_t m);
 bool vm_page_try_remove_all(vm_page_t m);
 bool vm_page_try_remove_write(vm_page_t m);
 int vm_page_trysbusy(vm_page_t m);
 int vm_page_tryxbusy(vm_page_t m);
 void vm_page_unhold_pages(vm_page_t *ma, int count);
 void vm_page_unswappable(vm_page_t m);
 void vm_page_unwire(vm_page_t m, uint8_t queue);
 bool vm_page_unwire_noq(vm_page_t m);
 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
 void vm_page_wire(vm_page_t);
 bool vm_page_wire_mapped(vm_page_t m);
 void vm_page_xunbusy_hard(vm_page_t m);
 void vm_page_xunbusy_hard_unchecked(vm_page_t m);
 void vm_page_set_validclean (vm_page_t, int, int);
 void vm_page_clear_dirty(vm_page_t, int, int);
 void vm_page_set_invalid(vm_page_t, int, int);
 void vm_page_valid(vm_page_t m);
 int vm_page_is_valid(vm_page_t, int, int);
 void vm_page_test_dirty(vm_page_t);
 vm_page_bits_t vm_page_bits(int base, int size);
 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
 int vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
 
 void vm_page_dirty_KBI(vm_page_t m);
 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
 #endif
 
 #define	vm_page_busy_fetch(m)	atomic_load_int(&(m)->busy_lock)
 
 #define	vm_page_assert_busied(m)					\
 	KASSERT(vm_page_busied(m),					\
 	    ("vm_page_assert_busied: page %p not busy @ %s:%d", \
 	    (m), __FILE__, __LINE__))
 
 #define	vm_page_assert_sbusied(m)					\
 	KASSERT(vm_page_sbusied(m),					\
 	    ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
 	    (m), __FILE__, __LINE__))
 
 #define	vm_page_assert_unbusied(m)					\
 	KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) !=		\
 	    VPB_CURTHREAD_EXCLUSIVE,					\
 	    ("vm_page_assert_unbusied: page %p busy_lock %#x owned"	\
 	     " by me (%p) @ %s:%d",					\
 	    (m), (m)->busy_lock, curthread, __FILE__, __LINE__));	\
 
 #define	vm_page_assert_xbusied_unchecked(m) do {			\
 	KASSERT(vm_page_xbusied(m),					\
 	    ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
 	    (m), __FILE__, __LINE__));					\
 } while (0)
 #define	vm_page_assert_xbusied(m) do {					\
 	vm_page_assert_xbusied_unchecked(m);				\
 	KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) ==		\
 	    VPB_CURTHREAD_EXCLUSIVE,					\
 	    ("vm_page_assert_xbusied: page %p busy_lock %#x not owned"	\
 	     " by me (%p) @ %s:%d",					\
 	    (m), (m)->busy_lock, curthread, __FILE__, __LINE__));	\
 } while (0)
 
 #define	vm_page_busied(m)						\
 	(vm_page_busy_fetch(m) != VPB_UNBUSIED)
 
 #define	vm_page_xbusied(m)						\
 	((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
 
 #define	vm_page_busy_freed(m)						\
 	(vm_page_busy_fetch(m) == VPB_FREED)
 
 /* Note: page m's lock must not be owned by the caller. */
 #define	vm_page_xunbusy(m) do {						\
 	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
 	    VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED))			\
 		vm_page_xunbusy_hard(m);				\
 } while (0)
 #define	vm_page_xunbusy_unchecked(m) do {				\
 	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
 	    VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED))			\
 		vm_page_xunbusy_hard_unchecked(m);			\
 } while (0)
 
 #ifdef INVARIANTS
 void vm_page_object_busy_assert(vm_page_t m);
 #define	VM_PAGE_OBJECT_BUSY_ASSERT(m)	vm_page_object_busy_assert(m)
 void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
 #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)				\
 	vm_page_assert_pga_writeable(m, bits)
 /*
  * Claim ownership of a page's xbusy state.  In non-INVARIANTS kernels this
  * operation is a no-op since ownership is not tracked.  In particular
  * this macro does not provide any synchronization with the previous owner.
  */
 #define	vm_page_xbusy_claim(m) do {					\
 	u_int _busy_lock;						\
 									\
 	vm_page_assert_xbusied_unchecked((m));				\
 	do {								\
 		_busy_lock = vm_page_busy_fetch(m);			\
 	} while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock,	\
 	    (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \
 } while (0)
 #else
 #define	VM_PAGE_OBJECT_BUSY_ASSERT(m)	(void)0
 #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)	(void)0
 #define	vm_page_xbusy_claim(m)
 #endif
 
 #if BYTE_ORDER == BIG_ENDIAN
 #define	VM_PAGE_AFLAG_SHIFT	16
 #else
 #define	VM_PAGE_AFLAG_SHIFT	0
 #endif
 
 /*
  *	Load a snapshot of a page's 32-bit atomic state.
  */
 static inline vm_page_astate_t
 vm_page_astate_load(vm_page_t m)
 {
 	vm_page_astate_t a;
 
 	a._bits = atomic_load_32(&m->a._bits);
 	return (a);
 }
 
 /*
  *	Atomically compare and set a page's atomic state.
  */
 static inline bool
 vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
 {
 
 	KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0,
 	    ("%s: invalid head requeue request for page %p", __func__, m));
 	KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE,
 	    ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m));
 	KASSERT(new._bits != old->_bits,
 	    ("%s: bits are unchanged", __func__));
 
 	return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0);
 }
 
 /*
  *	Clear the given bits in the specified page.
  */
 static inline void
 vm_page_aflag_clear(vm_page_t m, uint16_t bits)
 {
 	uint32_t *addr, val;
 
 	/*
 	 * Access the whole 32-bit word containing the aflags field with an
 	 * atomic update.  Parallel non-atomic updates to the other fields
 	 * within this word are handled properly by the atomic update.
 	 */
 	addr = (void *)&m->a;
 	val = bits << VM_PAGE_AFLAG_SHIFT;
 	atomic_clear_32(addr, val);
 }
 
 /*
  *	Set the given bits in the specified page.
  */
 static inline void
 vm_page_aflag_set(vm_page_t m, uint16_t bits)
 {
 	uint32_t *addr, val;
 
 	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
 
 	/*
 	 * Access the whole 32-bit word containing the aflags field with an
 	 * atomic update.  Parallel non-atomic updates to the other fields
 	 * within this word are handled properly by the atomic update.
 	 */
 	addr = (void *)&m->a;
 	val = bits << VM_PAGE_AFLAG_SHIFT;
 	atomic_set_32(addr, val);
 }
 
 /*
  *	vm_page_dirty:
  *
  *	Set all bits in the page's dirty field.
  *
  *	The object containing the specified page must be locked if the
  *	call is made from the machine-independent layer.
  *
  *	See vm_page_clear_dirty_mask().
  */
 static __inline void
 vm_page_dirty(vm_page_t m)
 {
 
 	/* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
 #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS)
 	vm_page_dirty_KBI(m);
 #else
 	m->dirty = VM_PAGE_BITS_ALL;
 #endif
 }
 
 /*
  *	vm_page_undirty:
  *
  *	Set page to not be dirty.  Note: does not clear pmap modify bits
  */
 static __inline void
 vm_page_undirty(vm_page_t m)
 {
 
 	VM_PAGE_OBJECT_BUSY_ASSERT(m);
 	m->dirty = 0;
 }
 
 static inline uint8_t
 _vm_page_queue(vm_page_astate_t as)
 {
 
 	if ((as.flags & PGA_DEQUEUE) != 0)
 		return (PQ_NONE);
 	return (as.queue);
 }
 
 /*
  *	vm_page_queue:
  *
  *	Return the index of the queue containing m.
  */
 static inline uint8_t
 vm_page_queue(vm_page_t m)
 {
 
 	return (_vm_page_queue(vm_page_astate_load(m)));
 }
 
 static inline bool
 vm_page_active(vm_page_t m)
 {
 
 	return (vm_page_queue(m) == PQ_ACTIVE);
 }
 
 static inline bool
 vm_page_inactive(vm_page_t m)
 {
 
 	return (vm_page_queue(m) == PQ_INACTIVE);
 }
 
 static inline bool
 vm_page_in_laundry(vm_page_t m)
 {
 	uint8_t queue;
 
 	queue = vm_page_queue(m);
 	return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
 }
 
 static inline void
 vm_page_clearref(vm_page_t m)
 {
 	u_int r;
 
 	r = m->ref_count;
 	while (atomic_fcmpset_int(&m->ref_count, &r, r & (VPRC_BLOCKED |
 	    VPRC_OBJREF)) == 0)
 		;
 }
 
 /*
  *	vm_page_drop:
  *
  *	Release a reference to a page and return the old reference count.
  */
 static inline u_int
 vm_page_drop(vm_page_t m, u_int val)
 {
 	u_int old;
 
 	/*
 	 * Synchronize with vm_page_free_prep(): ensure that all updates to the
 	 * page structure are visible before it is freed.
 	 */
 	atomic_thread_fence_rel();
 	old = atomic_fetchadd_int(&m->ref_count, -val);
 	KASSERT(old != VPRC_BLOCKED,
 	    ("vm_page_drop: page %p has an invalid refcount value", m));
 	return (old);
 }
 
 /*
  *	vm_page_wired:
  *
  *	Perform a racy check to determine whether a reference prevents the page
  *	from being reclaimable.  If the page's object is locked, and the page is
  *	unmapped and exclusively busied by the current thread, no new wirings
  *	may be created.
  */
 static inline bool
 vm_page_wired(vm_page_t m)
 {
 
 	return (VPRC_WIRE_COUNT(m->ref_count) > 0);
 }
 
 static inline bool
 vm_page_all_valid(vm_page_t m)
 {
 
 	return (m->valid == VM_PAGE_BITS_ALL);
 }
 
 static inline bool
 vm_page_any_valid(vm_page_t m)
 {
 
 	return (m->valid != 0);
 }
 
 static inline bool
 vm_page_none_valid(vm_page_t m)
 {
 
 	return (m->valid == 0);
 }
 
 static inline int
 vm_page_domain(vm_page_t m __numa_used)
 {
 #ifdef NUMA
 	int domn, segind;
 
 	segind = m->segind;
 	KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m));
 	domn = vm_phys_segs[segind].domain;
 	KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m));
 	return (domn);
 #else
 	return (0);
 #endif
 }
 
 #endif				/* _KERNEL */
 #endif				/* !_VM_PAGE_ */