diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h
index fc88296f754c..205848489644 100644
--- a/sys/amd64/include/vmparam.h
+++ b/sys/amd64/include/vmparam.h
@@ -1,310 +1,310 @@
 /*-
  * SPDX-License-Identifier: BSD-4-Clause
  *
  * Copyright (c) 1990 The Regents of the University of California.
  * All rights reserved.
  * Copyright (c) 1994 John S. Dyson
  * All rights reserved.
  * Copyright (c) 2003 Peter Wemm
  * All rights reserved.
  *
  * This code is derived from software contributed to Berkeley by
  * William Jolitz.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. All advertising materials mentioning features or use of this software
  *    must display the following acknowledgement:
  *	This product includes software developed by the University of
  *	California, Berkeley and its contributors.
  * 4. Neither the name of the University nor the names of its contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  *	from: @(#)vmparam.h	5.9 (Berkeley) 5/12/91
  * $FreeBSD$
  */
 
 #ifdef __i386__
 #include <i386/vmparam.h>
 #else /* !__i386__ */
 
 #ifndef _MACHINE_VMPARAM_H_
 #define	_MACHINE_VMPARAM_H_ 1
 
 /*
  * Machine dependent constants for AMD64.
  */
 
 /*
  * Virtual memory related constants, all in bytes
  */
 #define	MAXTSIZ		(32768UL*1024*1024)	/* max text size */
 #ifndef DFLDSIZ
 #define	DFLDSIZ		(32768UL*1024*1024)	/* initial data size limit */
 #endif
 #ifndef MAXDSIZ
 #define	MAXDSIZ		(32768UL*1024*1024)	/* max data size */
 #endif
 #ifndef	DFLSSIZ
 #define	DFLSSIZ		(8UL*1024*1024)		/* initial stack size limit */
 #endif
 #ifndef	MAXSSIZ
 #define	MAXSSIZ		(512UL*1024*1024)	/* max stack size */
 #endif
 #ifndef SGROWSIZ
 #define	SGROWSIZ	(128UL*1024)		/* amount to grow stack */
 #endif
 
 /*
  * We provide a machine specific single page allocator through the use
  * of the direct mapped segment.  This uses 2MB pages for reduced
  * TLB pressure.
  */
 #if !defined(KASAN) && !defined(KMSAN)
 #define	UMA_MD_SMALL_ALLOC
 #endif
 
 /*
  * The physical address space is densely populated.
  */
 #define	VM_PHYSSEG_DENSE
 
 /*
  * The number of PHYSSEG entries must be one greater than the number
  * of phys_avail entries because the phys_avail entry that spans the
  * largest physical address that is accessible by ISA DMA is split
  * into two PHYSSEG entries. 
  */
 #define	VM_PHYSSEG_MAX		63
 
 /*
  * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool
  * from which physical pages are allocated and VM_FREEPOOL_DIRECT is
  * the pool from which physical pages for page tables and small UMA
  * objects are allocated.
  */
 #define	VM_NFREEPOOL		2
 #define	VM_FREEPOOL_DEFAULT	0
 #define	VM_FREEPOOL_DIRECT	1
 
 /*
  * Create up to three free page lists: VM_FREELIST_DMA32 is for physical pages
  * that have physical addresses below 4G but are not accessible by ISA DMA,
  * and VM_FREELIST_ISADMA is for physical pages that are accessible by ISA
  * DMA.
  */
 #define	VM_NFREELIST		3
 #define	VM_FREELIST_DEFAULT	0
 #define	VM_FREELIST_DMA32	1
 #define	VM_FREELIST_LOWMEM	2
 
 #define VM_LOWMEM_BOUNDARY	(16 << 20)	/* 16MB ISA DMA limit */
 
 /*
  * Create the DMA32 free list only if the number of physical pages above
  * physical address 4G is at least 16M, which amounts to 64GB of physical
  * memory.
  */
 #define	VM_DMA32_NPAGES_THRESHOLD	16777216
 
 /*
  * An allocation size of 16MB is supported in order to optimize the
  * use of the direct map by UMA.  Specifically, a cache line contains
  * at most 8 PDEs, collectively mapping 16MB of physical memory.  By
  * reducing the number of distinct 16MB "pages" that are used by UMA,
  * the physical memory allocator reduces the likelihood of both 2MB
  * page TLB misses and cache misses caused by 2MB page TLB misses.
  */
 #define	VM_NFREEORDER		13
 
 /*
  * Enable superpage reservations: 1 level.
  */
 #ifndef	VM_NRESERVLEVEL
 #define	VM_NRESERVLEVEL		1
 #endif
 
 /*
  * Level 0 reservations consist of 512 pages.
  */
 #ifndef	VM_LEVEL_0_ORDER
 #define	VM_LEVEL_0_ORDER	9
 #endif
 
 #ifdef	SMP
 #define	PA_LOCK_COUNT	256
 #endif
 
 /*
  * Kernel physical load address for non-UEFI boot and for legacy UEFI loader.
  * Newer UEFI loader loads kernel anywhere below 4G, with memory allocated
  * by boot services.
  * Needs to be aligned at 2MB superpage boundary.
  */
 #ifndef KERNLOAD
 #define	KERNLOAD	0x200000
 #endif
 
 /*
  * Virtual addresses of things.  Derived from the page directory and
  * page table indexes from pmap.h for precision.
  *
  * 0x0000000000000000 - 0x00007fffffffffff   user map
  * 0x0000800000000000 - 0xffff7fffffffffff   does not exist (hole)
  * 0xffff800000000000 - 0xffff804020100fff   recursive page table (512GB slot)
  * 0xffff804020100fff - 0xffff807fffffffff   unused
  * 0xffff808000000000 - 0xffff847fffffffff   large map (can be tuned up)
  * 0xffff848000000000 - 0xfffff77fffffffff   unused (large map extends there)
  * 0xfffff60000000000 - 0xfffff7ffffffffff   2TB KMSAN origin map, optional
  * 0xfffff78000000000 - 0xfffff7bfffffffff   512GB KASAN shadow map, optional
  * 0xfffff80000000000 - 0xfffffbffffffffff   4TB direct map
  * 0xfffffc0000000000 - 0xfffffdffffffffff   2TB KMSAN shadow map, optional
  * 0xfffffe0000000000 - 0xffffffffffffffff   2TB kernel map
  *
  * Within the kernel map:
  *
  * 0xfffffe0000000000                        vm_page_array
  * 0xffffffff80000000                        KERNBASE
  */
 
 #define	VM_MIN_KERNEL_ADDRESS	KV4ADDR(KPML4BASE, 0, 0, 0)
 #define	VM_MAX_KERNEL_ADDRESS	KV4ADDR(KPML4BASE + NKPML4E - 1, \
 					NPDPEPG-1, NPDEPG-1, NPTEPG-1)
 
 #define	DMAP_MIN_ADDRESS	KV4ADDR(DMPML4I, 0, 0, 0)
 #define	DMAP_MAX_ADDRESS	KV4ADDR(DMPML4I + NDMPML4E, 0, 0, 0)
 
 #define	KASAN_MIN_ADDRESS	KV4ADDR(KASANPML4I, 0, 0, 0)
 #define	KASAN_MAX_ADDRESS	KV4ADDR(KASANPML4I + NKASANPML4E, 0, 0, 0)
 
 #define	KMSAN_SHAD_MIN_ADDRESS	KV4ADDR(KMSANSHADPML4I, 0, 0, 0)
 #define	KMSAN_SHAD_MAX_ADDRESS	KV4ADDR(KMSANSHADPML4I + NKMSANSHADPML4E, \
 					0, 0, 0)
 
 #define	KMSAN_ORIG_MIN_ADDRESS	KV4ADDR(KMSANORIGPML4I, 0, 0, 0)
 #define	KMSAN_ORIG_MAX_ADDRESS	KV4ADDR(KMSANORIGPML4I + NKMSANORIGPML4E, \
 					0, 0, 0)
 
 #define	LARGEMAP_MIN_ADDRESS	KV4ADDR(LMSPML4I, 0, 0, 0)
 #define	LARGEMAP_MAX_ADDRESS	KV4ADDR(LMEPML4I + 1, 0, 0, 0)
 
 /*
  * Formally kernel mapping starts at KERNBASE, but kernel linker
  * script leaves first PDE reserved.  For legacy BIOS boot, kernel is
  * loaded at KERNLOAD = 2M, and initial kernel page table maps
  * physical memory from zero to KERNend starting at KERNBASE.
  *
  * KERNSTART is where the first actual kernel page is mapped, after
  * the compatibility mapping.
  */
 #define	KERNBASE		KV4ADDR(KPML4I, KPDPI, 0, 0)
 #define	KERNSTART		(KERNBASE + NBPDR)
 
 #define	UPT_MAX_ADDRESS		KV4ADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I)
 #define	UPT_MIN_ADDRESS		KV4ADDR(PML4PML4I, 0, 0, 0)
 
 #define	VM_MAXUSER_ADDRESS_LA57	UVADDR(NUPML5E, 0, 0, 0, 0)
 #define	VM_MAXUSER_ADDRESS_LA48	UVADDR(0, NUP4ML4E, 0, 0, 0)
 #define	VM_MAXUSER_ADDRESS	VM_MAXUSER_ADDRESS_LA57
 
 #define	SHAREDPAGE_LA57		(VM_MAXUSER_ADDRESS_LA57 - PAGE_SIZE)
 #define	SHAREDPAGE_LA48		(VM_MAXUSER_ADDRESS_LA48 - PAGE_SIZE)
 #define	USRSTACK_LA57		SHAREDPAGE_LA57
 #define	USRSTACK_LA48		SHAREDPAGE_LA48
 #define	USRSTACK		USRSTACK_LA48
 #define	PS_STRINGS_LA57		(USRSTACK_LA57 - sizeof(struct ps_strings))
 #define	PS_STRINGS_LA48		(USRSTACK_LA48 - sizeof(struct ps_strings))
 
 #define	VM_MAX_ADDRESS		UPT_MAX_ADDRESS
 #define	VM_MIN_ADDRESS		(0)
 
 /*
  * XXX Allowing dmaplimit == 0 is a temporary workaround for vt(4) efifb's
  * early use of PHYS_TO_DMAP before the mapping is actually setup. This works
  * because the result is not actually accessed until later, but the early
  * vt fb startup needs to be reworked.
  */
 #define	PHYS_IN_DMAP(pa)	(dmaplimit == 0 || (pa) < dmaplimit)
 #define	VIRT_IN_DMAP(va)	((va) >= DMAP_MIN_ADDRESS &&		\
     (va) < (DMAP_MIN_ADDRESS + dmaplimit))
 
 #define	PMAP_HAS_DMAP	1
 #define	PHYS_TO_DMAP(x)	({						\
 	KASSERT(PHYS_IN_DMAP(x),					\
 	    ("physical address %#jx not covered by the DMAP",		\
 	    (uintmax_t)x));						\
 	(x) | DMAP_MIN_ADDRESS; })
 
 #define	DMAP_TO_PHYS(x)	({						\
 	KASSERT(VIRT_IN_DMAP(x),					\
 	    ("virtual address %#jx not covered by the DMAP",		\
 	    (uintmax_t)x));						\
 	(x) & ~DMAP_MIN_ADDRESS; })
 
 /*
  * amd64 maps the page array into KVA so that it can be more easily
  * allocated on the correct memory domains.
  */
 #define	PMAP_HAS_PAGE_ARRAY	1
 
 /*
  * How many physical pages per kmem arena virtual page.
  */
 #ifndef VM_KMEM_SIZE_SCALE
 #define	VM_KMEM_SIZE_SCALE	(1)
 #endif
 
 /*
  * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the
  * kernel map.
  */
 #ifndef VM_KMEM_SIZE_MAX
 #define	VM_KMEM_SIZE_MAX	((VM_MAX_KERNEL_ADDRESS - \
     VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
 #endif
 
 /* initial pagein size of beginning of executable file */
 #ifndef VM_INITIAL_PAGEIN
 #define	VM_INITIAL_PAGEIN	16
 #endif
 
 #define	ZERO_REGION_SIZE	(2 * 1024 * 1024)	/* 2MB */
 
 /*
  * Use a fairly large batch size since we expect amd64 systems to have lots of
  * memory.
  */
-#define	VM_BATCHQUEUE_SIZE	31
+#define	VM_BATCHQUEUE_SIZE	63
 
 /*
  * The pmap can create non-transparent large page mappings.
  */
 #define	PMAP_HAS_LARGEPAGES	1
 
 /*
  * Need a page dump array for minidump.
  */
 #define MINIDUMP_PAGE_TRACKING	1
 
 #endif /* _MACHINE_VMPARAM_H_ */
 
 #endif /* __i386__ */
diff --git a/sys/powerpc/include/vmparam.h b/sys/powerpc/include/vmparam.h
index 77457717a3fd..1b9873aede4a 100644
--- a/sys/powerpc/include/vmparam.h
+++ b/sys/powerpc/include/vmparam.h
@@ -1,338 +1,338 @@
 /*-
  * SPDX-License-Identifier: BSD-4-Clause
  *
  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
  * Copyright (C) 1995, 1996 TooLs GmbH.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. All advertising materials mentioning features or use of this software
  *    must display the following acknowledgement:
  *	This product includes software developed by TooLs GmbH.
  * 4. The name of TooLs GmbH may not be used to endorse or promote products
  *    derived from this software without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *	$NetBSD: vmparam.h,v 1.11 2000/02/11 19:25:16 thorpej Exp $
  * $FreeBSD$
  */
 
 #ifndef _MACHINE_VMPARAM_H_
 #define	_MACHINE_VMPARAM_H_
 
 #ifndef LOCORE
 #include <machine/md_var.h>
 #endif
 
 #define	USRSTACK	SHAREDPAGE
 
 #ifndef	MAXTSIZ
 #define	MAXTSIZ		(1*1024*1024*1024)		/* max text size */
 #endif
 
 #ifndef	DFLDSIZ
 #define	DFLDSIZ		(128*1024*1024)		/* default data size */
 #endif
 
 #ifndef	MAXDSIZ
 #ifdef __powerpc64__
 #define	MAXDSIZ		(32UL*1024*1024*1024)	/* max data size */
 #else
 #define	MAXDSIZ		(1*1024*1024*1024)	/* max data size */
 #endif
 #endif
 
 #ifndef	DFLSSIZ
 #define	DFLSSIZ		(8*1024*1024)		/* default stack size */
 #endif
 
 #ifndef	MAXSSIZ
 #ifdef __powerpc64__
 #define	MAXSSIZ		(512*1024*1024)		/* max stack size */
 #else
 #define	MAXSSIZ		(64*1024*1024)		/* max stack size */
 #endif
 #endif
 
 #ifdef AIM
 #define	VM_MAXUSER_ADDRESS32	0xfffff000
 #else
 #define	VM_MAXUSER_ADDRESS32	0x7ffff000
 #endif
 
 /*
  * Would like to have MAX addresses = 0, but this doesn't (currently) work
  */
 #ifdef __powerpc64__
 /*
  * Virtual addresses of things.  Derived from the page directory and
  * page table indexes from pmap.h for precision.
  *
  * kernel map should be able to start at 0xc008000000000000 -
  * but at least the functional simulator doesn't like it
  *
  * 0x0000000000000000 - 0x000fffffffffffff   user map
  * 0xc000000000000000 - 0xc007ffffffffffff   direct map
  * 0xc008000000000000 - 0xc00fffffffffffff   kernel map
  *
  */
 #define	VM_MIN_ADDRESS		0x0000000000000000
 #define	VM_MAXUSER_ADDRESS	0x000fffffc0000000
 #define	VM_MAX_ADDRESS		0xc00fffffffffffff
 #define	VM_MIN_KERNEL_ADDRESS	0xc008000000000000
 #define	VM_MAX_KERNEL_ADDRESS	0xc0080007ffffffff
 #define	VM_MAX_SAFE_KERNEL_ADDRESS	VM_MAX_KERNEL_ADDRESS
 #else
 #define	VM_MIN_ADDRESS		0
 #define	VM_MAXUSER_ADDRESS	VM_MAXUSER_ADDRESS32
 #define	VM_MAX_ADDRESS		0xffffffff
 #endif
 
 #define	SHAREDPAGE		(VM_MAXUSER_ADDRESS - PAGE_SIZE)
 
 #define	FREEBSD32_SHAREDPAGE	(VM_MAXUSER_ADDRESS32 - PAGE_SIZE)
 #define	FREEBSD32_USRSTACK	FREEBSD32_SHAREDPAGE
 
 #define	KERNBASE		0x00100100	/* start of kernel virtual */
 
 #ifdef AIM
 #ifndef __powerpc64__
 #define	VM_MIN_KERNEL_ADDRESS	((vm_offset_t)KERNEL_SR << ADDR_SR_SHFT)
 #define	VM_MAX_SAFE_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 2*SEGMENT_LENGTH -1)
 #define	VM_MAX_KERNEL_ADDRESS	(VM_MIN_KERNEL_ADDRESS + 3*SEGMENT_LENGTH - 1)
 #endif
 
 /*
  * Use the direct-mapped BAT registers for UMA small allocs. This
  * takes pressure off the small amount of available KVA.
  */
 #define UMA_MD_SMALL_ALLOC
 
 #else /* Book-E */
 
 /* Use the direct map for UMA small allocs on powerpc64. */
 #ifdef __powerpc64__
 #define UMA_MD_SMALL_ALLOC
 #else
 #define	VM_MIN_KERNEL_ADDRESS		0xc0000000
 #define	VM_MAX_KERNEL_ADDRESS		0xffffefff
 #define	VM_MAX_SAFE_KERNEL_ADDRESS	VM_MAX_KERNEL_ADDRESS
 #endif
 
 #endif /* AIM/E500 */
 
 #if !defined(LOCORE)
 struct pmap_physseg {
 	struct pv_entry *pvent;
 	char *attrs;
 };
 #endif
 
 #ifdef __powerpc64__
 #define	VM_PHYSSEG_MAX		63	/* 1? */
 #else
 #define	VM_PHYSSEG_MAX		16	/* 1? */
 #endif
 
 #define	PHYS_AVAIL_SZ	256	/* Allows up to 16GB Ram on pSeries with
 				 * logical memory block size of 64MB.
 				 * For more Ram increase the lmb or this value.
 				 */
 
 /* XXX This is non-sensical.  Phys avail should hold contiguous regions. */
 #define	PHYS_AVAIL_ENTRIES	PHYS_AVAIL_SZ
 
 /*
  * The physical address space is densely populated on 32-bit systems,
  * but may not be on 64-bit ones.
  */
 #ifdef __powerpc64__
 #define	VM_PHYSSEG_SPARSE
 #else
 #define	VM_PHYSSEG_DENSE
 #endif
 
 /*
  * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool
  * from which physical pages are allocated and VM_FREEPOOL_DIRECT is
  * the pool from which physical pages for small UMA objects are
  * allocated.
  */
 #define	VM_NFREEPOOL		2
 #define	VM_FREEPOOL_DEFAULT	0
 #define	VM_FREEPOOL_DIRECT	1
 
 /*
  * Create one free page list.
  */
 #define	VM_NFREELIST		1
 #define	VM_FREELIST_DEFAULT	0
 
 #ifdef __powerpc64__
 /* The largest allocation size is 16MB. */
 #define	VM_NFREEORDER		13
 #else
 /* The largest allocation size is 4MB. */
 #define	VM_NFREEORDER		11
 #endif
 
 #ifndef	VM_NRESERVLEVEL
 #ifdef __powerpc64__
 /* Enable superpage reservations: 1 level. */
 #define	VM_NRESERVLEVEL		1
 #else
 /* Disable superpage reservations. */
 #define	VM_NRESERVLEVEL		0
 #endif
 #endif
 
 #ifndef	VM_LEVEL_0_ORDER
 /* Level 0 reservations consist of 512 (RPT) or 4096 (HPT) pages. */
 #define	VM_LEVEL_0_ORDER	vm_level_0_order
 #ifndef	__ASSEMBLER__
 extern	int vm_level_0_order;
 #endif
 #endif
 
 #ifndef	VM_LEVEL_0_ORDER_MAX
 #define	VM_LEVEL_0_ORDER_MAX	12
 #endif
 
 #ifdef __powerpc64__
 #ifdef	SMP
 #define	PA_LOCK_COUNT	256
 #endif
 #endif
 
 #ifndef VM_INITIAL_PAGEIN
 #define	VM_INITIAL_PAGEIN	16
 #endif
 
 #ifndef SGROWSIZ
 #define	SGROWSIZ	(128UL*1024)		/* amount to grow stack */
 #endif
 
 /*
  * How many physical pages per kmem arena virtual page.
  */
 #ifndef VM_KMEM_SIZE_SCALE
 #define	VM_KMEM_SIZE_SCALE	(3)
 #endif
 
 /*
  * Optional floor (in bytes) on the size of the kmem arena.
  */
 #ifndef VM_KMEM_SIZE_MIN
 #define	VM_KMEM_SIZE_MIN	(12 * 1024 * 1024)
 #endif
 
 /*
  * Optional ceiling (in bytes) on the size of the kmem arena: 40% of the
  * usable KVA space.
  */
 #ifndef VM_KMEM_SIZE_MAX
 #define VM_KMEM_SIZE_MAX	((VM_MAX_SAFE_KERNEL_ADDRESS - \
     VM_MIN_KERNEL_ADDRESS + 1) * 2 / 5)
 #endif
 
 #ifdef __powerpc64__
 #define	ZERO_REGION_SIZE	(2 * 1024 * 1024)	/* 2MB */
 #else
 #define	ZERO_REGION_SIZE	(64 * 1024)	/* 64KB */
 #endif
 
 /*
  * Use a fairly large batch size since we expect ppc64 systems to have lots of
  * memory.
  */
 #ifdef __powerpc64__
-#define	VM_BATCHQUEUE_SIZE	31
+#define	VM_BATCHQUEUE_SIZE	63
 #endif
 
 /*
  * On 32-bit OEA, the only purpose for which sf_buf is used is to implement
  * an opaque pointer required by the machine-independent parts of the kernel.
  * That pointer references the vm_page that is "mapped" by the sf_buf.  The
  * actual mapping is provided by the direct virtual-to-physical mapping.
  *
  * On OEA64 and Book-E, we need to do something a little more complicated. Use
  * the runtime-detected hw_direct_map to pick between the two cases. Our
  * friends in vm_machdep.c will do the same to ensure nothing gets confused.
  */
 #define	SFBUF
 #define	SFBUF_NOMD
 
 /*
  * We (usually) have a direct map of all physical memory, so provide
  * a macro to use to get the kernel VA address for a given PA. Check the
  * value of PMAP_HAS_PMAP before using.
  */
 #ifndef LOCORE
 #ifdef __powerpc64__
 #define	DMAP_BASE_ADDRESS	0xc000000000000000UL
 #define	DMAP_MIN_ADDRESS	DMAP_BASE_ADDRESS
 #define	DMAP_MAX_ADDRESS	0xc007ffffffffffffUL
 #else
 #define	DMAP_BASE_ADDRESS	0x00000000UL
 #define	DMAP_MAX_ADDRESS	0xbfffffffUL
 #endif
 #endif
 
 #if defined(__powerpc64__) || defined(BOOKE)
 /*
  * powerpc64 and Book-E will provide their own page array allocators.
  *
  * On AIM, this will allocate a single virtual array, with pages from the
  * correct memory domains.
  * On Book-E this will let us put the array in TLB1, removing the need for TLB
  * thrashing.
  *
  * VM_MIN_KERNEL_ADDRESS is just a dummy.  It will get set by the MMU driver.
  */
 #define	PA_MIN_ADDRESS		VM_MIN_KERNEL_ADDRESS
 #define	PMAP_HAS_PAGE_ARRAY	1
 #endif
 
 #if defined(__powerpc64__)
 /*
  * Need a page dump array for minidump.
  */
 #define MINIDUMP_PAGE_TRACKING	1
 #else
 /*
  * No minidump with 32-bit powerpc.
  */
 #define MINIDUMP_PAGE_TRACKING	0
 #endif
 
 #define	PMAP_HAS_DMAP	(hw_direct_map)
 #define PHYS_TO_DMAP(x) ({						\
 	KASSERT(hw_direct_map, ("Direct map not provided by PMAP"));	\
 	(x) | DMAP_BASE_ADDRESS; })
 #define DMAP_TO_PHYS(x) ({						\
 	KASSERT(hw_direct_map, ("Direct map not provided by PMAP"));	\
 	(x) &~ DMAP_BASE_ADDRESS; })
 
 /*
  * No non-transparent large page support in the pmap.
  */
 #define	PMAP_HAS_LARGEPAGES	0
 
 #endif /* _MACHINE_VMPARAM_H_ */
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 2b7bc6a5b66e..797207205f42 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1,5609 +1,5622 @@
 /*-
  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
  *
  * Copyright (c) 1991 Regents of the University of California.
  * All rights reserved.
  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. Neither the name of the University nor the names of its contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
  */
 
 /*-
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  * All rights reserved.
  *
  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
  *
  * Permission to use, copy, modify and distribute this software and
  * its documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
  *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  *
  * Carnegie Mellon requests users of this software to return to
  *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
  *
  * any improvements or extensions that they make and grant Carnegie the
  * rights to redistribute these changes.
  */
 
 /*
  *	Resident memory management module.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include "opt_vm.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/counter.h>
 #include <sys/domainset.h>
 #include <sys/kernel.h>
 #include <sys/limits.h>
 #include <sys/linker.h>
 #include <sys/lock.h>
 #include <sys/malloc.h>
 #include <sys/mman.h>
 #include <sys/msgbuf.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
 #include <sys/rwlock.h>
 #include <sys/sleepqueue.h>
 #include <sys/sbuf.h>
 #include <sys/sched.h>
 #include <sys/smp.h>
 #include <sys/sysctl.h>
 #include <sys/vmmeter.h>
 #include <sys/vnode.h>
 
 #include <vm/vm.h>
 #include <vm/pmap.h>
 #include <vm/vm_param.h>
 #include <vm/vm_domainset.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
 #include <vm/vm_pageout.h>
 #include <vm/vm_phys.h>
 #include <vm/vm_pagequeue.h>
 #include <vm/vm_pager.h>
 #include <vm/vm_radix.h>
 #include <vm/vm_reserv.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_dumpset.h>
 #include <vm/uma.h>
 #include <vm/uma_int.h>
 
 #include <machine/md_var.h>
 
 struct vm_domain vm_dom[MAXMEMDOM];
 
 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]);
 
 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
 
 struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
 /* The following fields are protected by the domainset lock. */
 domainset_t __exclusive_cache_line vm_min_domains;
 domainset_t __exclusive_cache_line vm_severe_domains;
 static int vm_min_waiters;
 static int vm_severe_waiters;
 static int vm_pageproc_waiters;
 
 static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
     "VM page statistics");
 
 static COUNTER_U64_DEFINE_EARLY(pqstate_commit_retries);
 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries,
     CTLFLAG_RD, &pqstate_commit_retries,
     "Number of failed per-page atomic queue state updates");
 
 static COUNTER_U64_DEFINE_EARLY(queue_ops);
 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops,
     CTLFLAG_RD, &queue_ops,
     "Number of batched queue operations");
 
 static COUNTER_U64_DEFINE_EARLY(queue_nops);
 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_nops,
     CTLFLAG_RD, &queue_nops,
     "Number of batched queue operations with no effects");
 
 /*
  * bogus page -- for I/O to/from partially complete buffers,
  * or for paging into sparsely invalid regions.
  */
 vm_page_t bogus_page;
 
 vm_page_t vm_page_array;
 long vm_page_array_size;
 long first_page;
 
 struct bitset *vm_page_dump;
 long vm_page_dump_pages;
 
 static TAILQ_HEAD(, vm_page) blacklist_head;
 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
 
 static uma_zone_t fakepg_zone;
 
 static void vm_page_alloc_check(vm_page_t m);
 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m,
     vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked);
 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
 static bool vm_page_free_prep(vm_page_t m);
 static void vm_page_free_toq(vm_page_t m);
 static void vm_page_init(void *dummy);
 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
     vm_pindex_t pindex, vm_page_t mpred);
 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
     vm_page_t mpred);
 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
     const uint16_t nflag);
 static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
     vm_page_t m_run, vm_paddr_t high);
 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse);
 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
     int req);
 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain,
     int flags);
 static void vm_page_zone_release(void *arg, void **store, int cnt);
 
 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
 
 static void
 vm_page_init(void *dummy)
 {
 
 	fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
 	bogus_page = vm_page_alloc_noobj(VM_ALLOC_WIRED);
 }
 
 /*
  * The cache page zone is initialized later since we need to be able to allocate
  * pages before UMA is fully initialized.
  */
 static void
 vm_page_init_cache_zones(void *dummy __unused)
 {
 	struct vm_domain *vmd;
 	struct vm_pgcache *pgcache;
 	int cache, domain, maxcache, pool;
 
 	maxcache = 0;
 	TUNABLE_INT_FETCH("vm.pgcache_zone_max_pcpu", &maxcache);
 	maxcache *= mp_ncpus;
 	for (domain = 0; domain < vm_ndomains; domain++) {
 		vmd = VM_DOMAIN(domain);
 		for (pool = 0; pool < VM_NFREEPOOL; pool++) {
 			pgcache = &vmd->vmd_pgcache[pool];
 			pgcache->domain = domain;
 			pgcache->pool = pool;
 			pgcache->zone = uma_zcache_create("vm pgcache",
 			    PAGE_SIZE, NULL, NULL, NULL, NULL,
 			    vm_page_zone_import, vm_page_zone_release, pgcache,
 			    UMA_ZONE_VM);
 
 			/*
 			 * Limit each pool's zone to 0.1% of the pages in the
 			 * domain.
 			 */
 			cache = maxcache != 0 ? maxcache :
 			    vmd->vmd_page_count / 1000;
 			uma_zone_set_maxcache(pgcache->zone, cache);
 		}
 	}
 }
 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL);
 
 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
 #if PAGE_SIZE == 32768
 #ifdef CTASSERT
 CTASSERT(sizeof(u_long) >= 8);
 #endif
 #endif
 
 /*
  *	vm_set_page_size:
  *
  *	Sets the page size, perhaps based upon the memory
  *	size.  Must be called before any use of page-size
  *	dependent functions.
  */
 void
 vm_set_page_size(void)
 {
 	if (vm_cnt.v_page_size == 0)
 		vm_cnt.v_page_size = PAGE_SIZE;
 	if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0)
 		panic("vm_set_page_size: page size not a power of two");
 }
 
 /*
  *	vm_page_blacklist_next:
  *
  *	Find the next entry in the provided string of blacklist
  *	addresses.  Entries are separated by space, comma, or newline.
  *	If an invalid integer is encountered then the rest of the
  *	string is skipped.  Updates the list pointer to the next
  *	character, or NULL if the string is exhausted or invalid.
  */
 static vm_paddr_t
 vm_page_blacklist_next(char **list, char *end)
 {
 	vm_paddr_t bad;
 	char *cp, *pos;
 
 	if (list == NULL || *list == NULL)
 		return (0);
 	if (**list =='\0') {
 		*list = NULL;
 		return (0);
 	}
 
 	/*
 	 * If there's no end pointer then the buffer is coming from
 	 * the kenv and we know it's null-terminated.
 	 */
 	if (end == NULL)
 		end = *list + strlen(*list);
 
 	/* Ensure that strtoq() won't walk off the end */
 	if (*end != '\0') {
 		if (*end == '\n' || *end == ' ' || *end  == ',')
 			*end = '\0';
 		else {
 			printf("Blacklist not terminated, skipping\n");
 			*list = NULL;
 			return (0);
 		}
 	}
 
 	for (pos = *list; *pos != '\0'; pos = cp) {
 		bad = strtoq(pos, &cp, 0);
 		if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') {
 			if (bad == 0) {
 				if (++cp < end)
 					continue;
 				else
 					break;
 			}
 		} else
 			break;
 		if (*cp == '\0' || ++cp >= end)
 			*list = NULL;
 		else
 			*list = cp;
 		return (trunc_page(bad));
 	}
 	printf("Garbage in RAM blacklist, skipping\n");
 	*list = NULL;
 	return (0);
 }
 
 bool
 vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
 {
 	struct vm_domain *vmd;
 	vm_page_t m;
 	int ret;
 
 	m = vm_phys_paddr_to_vm_page(pa);
 	if (m == NULL)
 		return (true); /* page does not exist, no failure */
 
 	vmd = vm_pagequeue_domain(m);
 	vm_domain_free_lock(vmd);
 	ret = vm_phys_unfree_page(m);
 	vm_domain_free_unlock(vmd);
 	if (ret != 0) {
 		vm_domain_freecnt_inc(vmd, -1);
 		TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
 		if (verbose)
 			printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa);
 	}
 	return (ret);
 }
 
 /*
  *	vm_page_blacklist_check:
  *
  *	Iterate through the provided string of blacklist addresses, pulling
  *	each entry out of the physical allocator free list and putting it
  *	onto a list for reporting via the vm.page_blacklist sysctl.
  */
 static void
 vm_page_blacklist_check(char *list, char *end)
 {
 	vm_paddr_t pa;
 	char *next;
 
 	next = list;
 	while (next != NULL) {
 		if ((pa = vm_page_blacklist_next(&next, end)) == 0)
 			continue;
 		vm_page_blacklist_add(pa, bootverbose);
 	}
 }
 
 /*
  *	vm_page_blacklist_load:
  *
  *	Search for a special module named "ram_blacklist".  It'll be a
  *	plain text file provided by the user via the loader directive
  *	of the same name.
  */
 static void
 vm_page_blacklist_load(char **list, char **end)
 {
 	void *mod;
 	u_char *ptr;
 	u_int len;
 
 	mod = NULL;
 	ptr = NULL;
 
 	mod = preload_search_by_type("ram_blacklist");
 	if (mod != NULL) {
 		ptr = preload_fetch_addr(mod);
 		len = preload_fetch_size(mod);
         }
 	*list = ptr;
 	if (ptr != NULL)
 		*end = ptr + len;
 	else
 		*end = NULL;
 	return;
 }
 
 static int
 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
 {
 	vm_page_t m;
 	struct sbuf sbuf;
 	int error, first;
 
 	first = 1;
 	error = sysctl_wire_old_buffer(req, 0);
 	if (error != 0)
 		return (error);
 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
 	TAILQ_FOREACH(m, &blacklist_head, listq) {
 		sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",",
 		    (uintmax_t)m->phys_addr);
 		first = 0;
 	}
 	error = sbuf_finish(&sbuf);
 	sbuf_delete(&sbuf);
 	return (error);
 }
 
 /*
  * Initialize a dummy page for use in scans of the specified paging queue.
  * In principle, this function only needs to set the flag PG_MARKER.
  * Nonetheless, it write busies the page as a safety precaution.
  */
 void
 vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags)
 {
 
 	bzero(marker, sizeof(*marker));
 	marker->flags = PG_MARKER;
 	marker->a.flags = aflags;
 	marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
 	marker->a.queue = queue;
 }
 
 static void
 vm_page_domain_init(int domain)
 {
 	struct vm_domain *vmd;
 	struct vm_pagequeue *pq;
 	int i;
 
 	vmd = VM_DOMAIN(domain);
 	bzero(vmd, sizeof(*vmd));
 	*__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
 	    "vm inactive pagequeue";
 	*__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
 	    "vm active pagequeue";
 	*__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) =
 	    "vm laundry pagequeue";
 	*__DECONST(const char **,
 	    &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
 	    "vm unswappable pagequeue";
 	vmd->vmd_domain = domain;
 	vmd->vmd_page_count = 0;
 	vmd->vmd_free_count = 0;
 	vmd->vmd_segs = 0;
 	vmd->vmd_oom = FALSE;
 	for (i = 0; i < PQ_COUNT; i++) {
 		pq = &vmd->vmd_pagequeues[i];
 		TAILQ_INIT(&pq->pq_pl);
 		mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
 		    MTX_DEF | MTX_DUPOK);
 		pq->pq_pdpages = 0;
 		vm_page_init_marker(&vmd->vmd_markers[i], i, 0);
 	}
 	mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
 	mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
 	snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain);
 
 	/*
 	 * inacthead is used to provide FIFO ordering for LRU-bypassing
 	 * insertions.
 	 */
 	vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED);
 	TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
 	    &vmd->vmd_inacthead, plinks.q);
 
 	/*
 	 * The clock pages are used to implement active queue scanning without
 	 * requeues.  Scans start at clock[0], which is advanced after the scan
 	 * ends.  When the two clock hands meet, they are reset and scanning
 	 * resumes from the head of the queue.
 	 */
 	vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED);
 	vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED);
 	TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
 	    &vmd->vmd_clock[0], plinks.q);
 	TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
 	    &vmd->vmd_clock[1], plinks.q);
 }
 
 /*
  * Initialize a physical page in preparation for adding it to the free
  * lists.
  */
 void
 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind)
 {
 
 	m->object = NULL;
 	m->ref_count = 0;
 	m->busy_lock = VPB_FREED;
 	m->flags = m->a.flags = 0;
 	m->phys_addr = pa;
 	m->a.queue = PQ_NONE;
 	m->psind = 0;
 	m->segind = segind;
 	m->order = VM_NFREEORDER;
 	m->pool = VM_FREEPOOL_DEFAULT;
 	m->valid = m->dirty = 0;
 	pmap_page_init(m);
 }
 
 #ifndef PMAP_HAS_PAGE_ARRAY
 static vm_paddr_t
 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range)
 {
 	vm_paddr_t new_end;
 
 	/*
 	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
 	 * However, because this page is allocated from KVM, out-of-bounds
 	 * accesses using the direct map will not be trapped.
 	 */
 	*vaddr += PAGE_SIZE;
 
 	/*
 	 * Allocate physical memory for the page structures, and map it.
 	 */
 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
 	vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end,
 	    VM_PROT_READ | VM_PROT_WRITE);
 	vm_page_array_size = page_range;
 
 	return (new_end);
 }
 #endif
 
 /*
  *	vm_page_startup:
  *
  *	Initializes the resident memory module.  Allocates physical memory for
  *	bootstrapping UMA and some data structures that are used to manage
  *	physical pages.  Initializes these structures, and populates the free
  *	page queues.
  */
 vm_offset_t
 vm_page_startup(vm_offset_t vaddr)
 {
 	struct vm_phys_seg *seg;
 	struct vm_domain *vmd;
 	vm_page_t m;
 	char *list, *listend;
 	vm_paddr_t end, high_avail, low_avail, new_end, size;
 	vm_paddr_t page_range __unused;
 	vm_paddr_t last_pa, pa, startp, endp;
 	u_long pagecount;
 #if MINIDUMP_PAGE_TRACKING
 	u_long vm_page_dump_size;
 #endif
 	int biggestone, i, segind;
 #ifdef WITNESS
 	vm_offset_t mapped;
 	int witness_size;
 #endif
 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
 	long ii;
 #endif
 
 	vaddr = round_page(vaddr);
 
 	vm_phys_early_startup();
 	biggestone = vm_phys_avail_largest();
 	end = phys_avail[biggestone+1];
 
 	/*
 	 * Initialize the page and queue locks.
 	 */
 	mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
 	for (i = 0; i < PA_LOCK_COUNT; i++)
 		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
 	for (i = 0; i < vm_ndomains; i++)
 		vm_page_domain_init(i);
 
 	new_end = end;
 #ifdef WITNESS
 	witness_size = round_page(witness_startup_count());
 	new_end -= witness_size;
 	mapped = pmap_map(&vaddr, new_end, new_end + witness_size,
 	    VM_PROT_READ | VM_PROT_WRITE);
 	bzero((void *)mapped, witness_size);
 	witness_startup((void *)mapped);
 #endif
 
 #if MINIDUMP_PAGE_TRACKING
 	/*
 	 * Allocate a bitmap to indicate that a random physical page
 	 * needs to be included in a minidump.
 	 *
 	 * The amd64 port needs this to indicate which direct map pages
 	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
 	 *
 	 * However, i386 still needs this workspace internally within the
 	 * minidump code.  In theory, they are not needed on i386, but are
 	 * included should the sf_buf code decide to use them.
 	 */
 	last_pa = 0;
 	vm_page_dump_pages = 0;
 	for (i = 0; dump_avail[i + 1] != 0; i += 2) {
 		vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) -
 		    dump_avail[i] / PAGE_SIZE;
 		if (dump_avail[i + 1] > last_pa)
 			last_pa = dump_avail[i + 1];
 	}
 	vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages));
 	new_end -= vm_page_dump_size;
 	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
 	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
 	bzero((void *)vm_page_dump, vm_page_dump_size);
 #else
 	(void)last_pa;
 #endif
 #if defined(__aarch64__) || defined(__amd64__) || \
     defined(__riscv) || defined(__powerpc64__)
 	/*
 	 * Include the UMA bootstrap pages, witness pages and vm_page_dump
 	 * in a crash dump.  When pmap_map() uses the direct map, they are
 	 * not automatically included.
 	 */
 	for (pa = new_end; pa < end; pa += PAGE_SIZE)
 		dump_add_page(pa);
 #endif
 	phys_avail[biggestone + 1] = new_end;
 #ifdef __amd64__
 	/*
 	 * Request that the physical pages underlying the message buffer be
 	 * included in a crash dump.  Since the message buffer is accessed
 	 * through the direct map, they are not automatically included.
 	 */
 	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
 	last_pa = pa + round_page(msgbufsize);
 	while (pa < last_pa) {
 		dump_add_page(pa);
 		pa += PAGE_SIZE;
 	}
 #endif
 	/*
 	 * Compute the number of pages of memory that will be available for
 	 * use, taking into account the overhead of a page structure per page.
 	 * In other words, solve
 	 *	"available physical memory" - round_page(page_range *
 	 *	    sizeof(struct vm_page)) = page_range * PAGE_SIZE 
 	 * for page_range.  
 	 */
 	low_avail = phys_avail[0];
 	high_avail = phys_avail[1];
 	for (i = 0; i < vm_phys_nsegs; i++) {
 		if (vm_phys_segs[i].start < low_avail)
 			low_avail = vm_phys_segs[i].start;
 		if (vm_phys_segs[i].end > high_avail)
 			high_avail = vm_phys_segs[i].end;
 	}
 	/* Skip the first chunk.  It is already accounted for. */
 	for (i = 2; phys_avail[i + 1] != 0; i += 2) {
 		if (phys_avail[i] < low_avail)
 			low_avail = phys_avail[i];
 		if (phys_avail[i + 1] > high_avail)
 			high_avail = phys_avail[i + 1];
 	}
 	first_page = low_avail / PAGE_SIZE;
 #ifdef VM_PHYSSEG_SPARSE
 	size = 0;
 	for (i = 0; i < vm_phys_nsegs; i++)
 		size += vm_phys_segs[i].end - vm_phys_segs[i].start;
 	for (i = 0; phys_avail[i + 1] != 0; i += 2)
 		size += phys_avail[i + 1] - phys_avail[i];
 #elif defined(VM_PHYSSEG_DENSE)
 	size = high_avail - low_avail;
 #else
 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
 #endif
 
 #ifdef PMAP_HAS_PAGE_ARRAY
 	pmap_page_array_startup(size / PAGE_SIZE);
 	biggestone = vm_phys_avail_largest();
 	end = new_end = phys_avail[biggestone + 1];
 #else
 #ifdef VM_PHYSSEG_DENSE
 	/*
 	 * In the VM_PHYSSEG_DENSE case, the number of pages can account for
 	 * the overhead of a page structure per page only if vm_page_array is
 	 * allocated from the last physical memory chunk.  Otherwise, we must
 	 * allocate page structures representing the physical memory
 	 * underlying vm_page_array, even though they will not be used.
 	 */
 	if (new_end != high_avail)
 		page_range = size / PAGE_SIZE;
 	else
 #endif
 	{
 		page_range = size / (PAGE_SIZE + sizeof(struct vm_page));
 
 		/*
 		 * If the partial bytes remaining are large enough for
 		 * a page (PAGE_SIZE) without a corresponding
 		 * 'struct vm_page', then new_end will contain an
 		 * extra page after subtracting the length of the VM
 		 * page array.  Compensate by subtracting an extra
 		 * page from new_end.
 		 */
 		if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) {
 			if (new_end == high_avail)
 				high_avail -= PAGE_SIZE;
 			new_end -= PAGE_SIZE;
 		}
 	}
 	end = new_end;
 	new_end = vm_page_array_alloc(&vaddr, end, page_range);
 #endif
 
 #if VM_NRESERVLEVEL > 0
 	/*
 	 * Allocate physical memory for the reservation management system's
 	 * data structures, and map it.
 	 */
 	new_end = vm_reserv_startup(&vaddr, new_end);
 #endif
 #if defined(__aarch64__) || defined(__amd64__) || \
     defined(__riscv) || defined(__powerpc64__)
 	/*
 	 * Include vm_page_array and vm_reserv_array in a crash dump.
 	 */
 	for (pa = new_end; pa < end; pa += PAGE_SIZE)
 		dump_add_page(pa);
 #endif
 	phys_avail[biggestone + 1] = new_end;
 
 	/*
 	 * Add physical memory segments corresponding to the available
 	 * physical pages.
 	 */
 	for (i = 0; phys_avail[i + 1] != 0; i += 2)
 		if (vm_phys_avail_size(i) != 0)
 			vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
 
 	/*
 	 * Initialize the physical memory allocator.
 	 */
 	vm_phys_init();
 
 	/*
 	 * Initialize the page structures and add every available page to the
 	 * physical memory allocator's free lists.
 	 */
 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
 	for (ii = 0; ii < vm_page_array_size; ii++) {
 		m = &vm_page_array[ii];
 		vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0);
 		m->flags = PG_FICTITIOUS;
 	}
 #endif
 	vm_cnt.v_page_count = 0;
 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
 		seg = &vm_phys_segs[segind];
 		for (m = seg->first_page, pa = seg->start; pa < seg->end;
 		    m++, pa += PAGE_SIZE)
 			vm_page_init_page(m, pa, segind);
 
 		/*
 		 * Add the segment's pages that are covered by one of
 		 * phys_avail's ranges to the free lists.
 		 */
 		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
 			if (seg->end <= phys_avail[i] ||
 			    seg->start >= phys_avail[i + 1])
 				continue;
 
 			startp = MAX(seg->start, phys_avail[i]);
 			endp = MIN(seg->end, phys_avail[i + 1]);
 			pagecount = (u_long)atop(endp - startp);
 			if (pagecount == 0)
 				continue;
 
 			m = seg->first_page + atop(startp - seg->start);
 			vmd = VM_DOMAIN(seg->domain);
 			vm_domain_free_lock(vmd);
 			vm_phys_enqueue_contig(m, pagecount);
 			vm_domain_free_unlock(vmd);
 			vm_domain_freecnt_inc(vmd, pagecount);
 			vm_cnt.v_page_count += (u_int)pagecount;
 			vmd->vmd_page_count += (u_int)pagecount;
 			vmd->vmd_segs |= 1UL << segind;
 		}
 	}
 
 	/*
 	 * Remove blacklisted pages from the physical memory allocator.
 	 */
 	TAILQ_INIT(&blacklist_head);
 	vm_page_blacklist_load(&list, &listend);
 	vm_page_blacklist_check(list, listend);
 
 	list = kern_getenv("vm.blacklist");
 	vm_page_blacklist_check(list, NULL);
 
 	freeenv(list);
 #if VM_NRESERVLEVEL > 0
 	/*
 	 * Initialize the reservation management system.
 	 */
 	vm_reserv_init();
 #endif
 
 	return (vaddr);
 }
 
 void
 vm_page_reference(vm_page_t m)
 {
 
 	vm_page_aflag_set(m, PGA_REFERENCED);
 }
 
 /*
  *	vm_page_trybusy
  *
  *	Helper routine for grab functions to trylock busy.
  *
  *	Returns true on success and false on failure.
  */
 static bool
 vm_page_trybusy(vm_page_t m, int allocflags)
 {
 
 	if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0)
 		return (vm_page_trysbusy(m));
 	else
 		return (vm_page_tryxbusy(m));
 }
 
 /*
  *	vm_page_tryacquire
  *
  *	Helper routine for grab functions to trylock busy and wire.
  *
  *	Returns true on success and false on failure.
  */
 static inline bool
 vm_page_tryacquire(vm_page_t m, int allocflags)
 {
 	bool locked;
 
 	locked = vm_page_trybusy(m, allocflags);
 	if (locked && (allocflags & VM_ALLOC_WIRED) != 0)
 		vm_page_wire(m);
 	return (locked);
 }
 
 /*
  *	vm_page_busy_acquire:
  *
  *	Acquire the busy lock as described by VM_ALLOC_* flags.  Will loop
  *	and drop the object lock if necessary.
  */
 bool
 vm_page_busy_acquire(vm_page_t m, int allocflags)
 {
 	vm_object_t obj;
 	bool locked;
 
 	/*
 	 * The page-specific object must be cached because page
 	 * identity can change during the sleep, causing the
 	 * re-lock of a different object.
 	 * It is assumed that a reference to the object is already
 	 * held by the callers.
 	 */
 	obj = atomic_load_ptr(&m->object);
 	for (;;) {
 		if (vm_page_tryacquire(m, allocflags))
 			return (true);
 		if ((allocflags & VM_ALLOC_NOWAIT) != 0)
 			return (false);
 		if (obj != NULL)
 			locked = VM_OBJECT_WOWNED(obj);
 		else
 			locked = false;
 		MPASS(locked || vm_page_wired(m));
 		if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags,
 		    locked) && locked)
 			VM_OBJECT_WLOCK(obj);
 		if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
 			return (false);
 		KASSERT(m->object == obj || m->object == NULL,
 		    ("vm_page_busy_acquire: page %p does not belong to %p",
 		    m, obj));
 	}
 }
 
 /*
  *	vm_page_busy_downgrade:
  *
  *	Downgrade an exclusive busy page into a single shared busy page.
  */
 void
 vm_page_busy_downgrade(vm_page_t m)
 {
 	u_int x;
 
 	vm_page_assert_xbusied(m);
 
 	x = vm_page_busy_fetch(m);
 	for (;;) {
 		if (atomic_fcmpset_rel_int(&m->busy_lock,
 		    &x, VPB_SHARERS_WORD(1)))
 			break;
 	}
 	if ((x & VPB_BIT_WAITERS) != 0)
 		wakeup(m);
 }
 
 /*
  *
  *	vm_page_busy_tryupgrade:
  *
  *	Attempt to upgrade a single shared busy into an exclusive busy.
  */
 int
 vm_page_busy_tryupgrade(vm_page_t m)
 {
 	u_int ce, x;
 
 	vm_page_assert_sbusied(m);
 
 	x = vm_page_busy_fetch(m);
 	ce = VPB_CURTHREAD_EXCLUSIVE;
 	for (;;) {
 		if (VPB_SHARERS(x) > 1)
 			return (0);
 		KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
 		    ("vm_page_busy_tryupgrade: invalid lock state"));
 		if (!atomic_fcmpset_acq_int(&m->busy_lock, &x,
 		    ce | (x & VPB_BIT_WAITERS)))
 			continue;
 		return (1);
 	}
 }
 
 /*
  *	vm_page_sbusied:
  *
  *	Return a positive value if the page is shared busied, 0 otherwise.
  */
 int
 vm_page_sbusied(vm_page_t m)
 {
 	u_int x;
 
 	x = vm_page_busy_fetch(m);
 	return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
 }
 
 /*
  *	vm_page_sunbusy:
  *
  *	Shared unbusy a page.
  */
 void
 vm_page_sunbusy(vm_page_t m)
 {
 	u_int x;
 
 	vm_page_assert_sbusied(m);
 
 	x = vm_page_busy_fetch(m);
 	for (;;) {
 		KASSERT(x != VPB_FREED,
 		    ("vm_page_sunbusy: Unlocking freed page."));
 		if (VPB_SHARERS(x) > 1) {
 			if (atomic_fcmpset_int(&m->busy_lock, &x,
 			    x - VPB_ONE_SHARER))
 				break;
 			continue;
 		}
 		KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
 		    ("vm_page_sunbusy: invalid lock state"));
 		if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
 			continue;
 		if ((x & VPB_BIT_WAITERS) == 0)
 			break;
 		wakeup(m);
 		break;
 	}
 }
 
 /*
  *	vm_page_busy_sleep:
  *
  *	Sleep if the page is busy, using the page pointer as wchan.
  *	This is used to implement the hard-path of the busying mechanism.
  *
  *	If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function
  *	will not sleep if the page is shared-busy.
  *
  *	The object lock must be held on entry.
  *
  *	Returns true if it slept and dropped the object lock, or false
  *	if there was no sleep and the lock is still held.
  */
 bool
 vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags)
 {
 	vm_object_t obj;
 
 	obj = m->object;
 	VM_OBJECT_ASSERT_LOCKED(obj);
 
 	return (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, allocflags,
 	    true));
 }
 
 /*
  *	vm_page_busy_sleep_unlocked:
  *
  *	Sleep if the page is busy, using the page pointer as wchan.
  *	This is used to implement the hard-path of busying mechanism.
  *
  *	If VM_ALLOC_IGN_SBUSY is specified in allocflags, the function
  *	will not sleep if the page is shared-busy.
  *
  *	The object lock must not be held on entry.  The operation will
  *	return if the page changes identity.
  */
 void
 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
     const char *wmesg, int allocflags)
 {
 	VM_OBJECT_ASSERT_UNLOCKED(obj);
 
 	(void)_vm_page_busy_sleep(obj, m, pindex, wmesg, allocflags, false);
 }
 
 /*
  *	_vm_page_busy_sleep:
  *
  *	Internal busy sleep function.  Verifies the page identity and
  *	lockstate against parameters.  Returns true if it sleeps and
  *	false otherwise.
  *
  *	allocflags uses VM_ALLOC_* flags to specify the lock required.
  *
  *	If locked is true the lock will be dropped for any true returns
  *	and held for any false returns.
  */
 static bool
 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
     const char *wmesg, int allocflags, bool locked)
 {
 	bool xsleep;
 	u_int x;
 
 	/*
 	 * If the object is busy we must wait for that to drain to zero
 	 * before trying the page again.
 	 */
 	if (obj != NULL && vm_object_busied(obj)) {
 		if (locked)
 			VM_OBJECT_DROP(obj);
 		vm_object_busy_wait(obj, wmesg);
 		return (true);
 	}
 
 	if (!vm_page_busied(m))
 		return (false);
 
 	xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0;
 	sleepq_lock(m);
 	x = vm_page_busy_fetch(m);
 	do {
 		/*
 		 * If the page changes objects or becomes unlocked we can
 		 * simply return.
 		 */
 		if (x == VPB_UNBUSIED ||
 		    (xsleep && (x & VPB_BIT_SHARED) != 0) ||
 		    m->object != obj || m->pindex != pindex) {
 			sleepq_release(m);
 			return (false);
 		}
 		if ((x & VPB_BIT_WAITERS) != 0)
 			break;
 	} while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS));
 	if (locked)
 		VM_OBJECT_DROP(obj);
 	DROP_GIANT();
 	sleepq_add(m, NULL, wmesg, 0, 0);
 	sleepq_wait(m, PVM);
 	PICKUP_GIANT();
 	return (true);
 }
 
 /*
  *	vm_page_trysbusy:
  *
  *	Try to shared busy a page.
  *	If the operation succeeds 1 is returned otherwise 0.
  *	The operation never sleeps.
  */
 int
 vm_page_trysbusy(vm_page_t m)
 {
 	vm_object_t obj;
 	u_int x;
 
 	obj = m->object;
 	x = vm_page_busy_fetch(m);
 	for (;;) {
 		if ((x & VPB_BIT_SHARED) == 0)
 			return (0);
 		/*
 		 * Reduce the window for transient busies that will trigger
 		 * false negatives in vm_page_ps_test().
 		 */
 		if (obj != NULL && vm_object_busied(obj))
 			return (0);
 		if (atomic_fcmpset_acq_int(&m->busy_lock, &x,
 		    x + VPB_ONE_SHARER))
 			break;
 	}
 
 	/* Refetch the object now that we're guaranteed that it is stable. */
 	obj = m->object;
 	if (obj != NULL && vm_object_busied(obj)) {
 		vm_page_sunbusy(m);
 		return (0);
 	}
 	return (1);
 }
 
 /*
  *	vm_page_tryxbusy:
  *
  *	Try to exclusive busy a page.
  *	If the operation succeeds 1 is returned otherwise 0.
  *	The operation never sleeps.
  */
 int
 vm_page_tryxbusy(vm_page_t m)
 {
 	vm_object_t obj;
 
         if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED,
             VPB_CURTHREAD_EXCLUSIVE) == 0)
 		return (0);
 
 	obj = m->object;
 	if (obj != NULL && vm_object_busied(obj)) {
 		vm_page_xunbusy(m);
 		return (0);
 	}
 	return (1);
 }
 
 static void
 vm_page_xunbusy_hard_tail(vm_page_t m)
 {
 	atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
 	/* Wake the waiter. */
 	wakeup(m);
 }
 
 /*
  *	vm_page_xunbusy_hard:
  *
  *	Called when unbusy has failed because there is a waiter.
  */
 void
 vm_page_xunbusy_hard(vm_page_t m)
 {
 	vm_page_assert_xbusied(m);
 	vm_page_xunbusy_hard_tail(m);
 }
 
 void
 vm_page_xunbusy_hard_unchecked(vm_page_t m)
 {
 	vm_page_assert_xbusied_unchecked(m);
 	vm_page_xunbusy_hard_tail(m);
 }
 
 static void
 vm_page_busy_free(vm_page_t m)
 {
 	u_int x;
 
 	atomic_thread_fence_rel();
 	x = atomic_swap_int(&m->busy_lock, VPB_FREED);
 	if ((x & VPB_BIT_WAITERS) != 0)
 		wakeup(m);
 }
 
 /*
  *	vm_page_unhold_pages:
  *
  *	Unhold each of the pages that is referenced by the given array.
  */
 void
 vm_page_unhold_pages(vm_page_t *ma, int count)
 {
 
 	for (; count != 0; count--) {
 		vm_page_unwire(*ma, PQ_ACTIVE);
 		ma++;
 	}
 }
 
 vm_page_t
 PHYS_TO_VM_PAGE(vm_paddr_t pa)
 {
 	vm_page_t m;
 
 #ifdef VM_PHYSSEG_SPARSE
 	m = vm_phys_paddr_to_vm_page(pa);
 	if (m == NULL)
 		m = vm_phys_fictitious_to_vm_page(pa);
 	return (m);
 #elif defined(VM_PHYSSEG_DENSE)
 	long pi;
 
 	pi = atop(pa);
 	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
 		m = &vm_page_array[pi - first_page];
 		return (m);
 	}
 	return (vm_phys_fictitious_to_vm_page(pa));
 #else
 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
 #endif
 }
 
 /*
  *	vm_page_getfake:
  *
  *	Create a fictitious page with the specified physical address and
  *	memory attribute.  The memory attribute is the only the machine-
  *	dependent aspect of a fictitious page that must be initialized.
  */
 vm_page_t
 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
 {
 	vm_page_t m;
 
 	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
 	vm_page_initfake(m, paddr, memattr);
 	return (m);
 }
 
 void
 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
 {
 
 	if ((m->flags & PG_FICTITIOUS) != 0) {
 		/*
 		 * The page's memattr might have changed since the
 		 * previous initialization.  Update the pmap to the
 		 * new memattr.
 		 */
 		goto memattr;
 	}
 	m->phys_addr = paddr;
 	m->a.queue = PQ_NONE;
 	/* Fictitious pages don't use "segind". */
 	m->flags = PG_FICTITIOUS;
 	/* Fictitious pages don't use "order" or "pool". */
 	m->oflags = VPO_UNMANAGED;
 	m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
 	/* Fictitious pages are unevictable. */
 	m->ref_count = 1;
 	pmap_page_init(m);
 memattr:
 	pmap_page_set_memattr(m, memattr);
 }
 
 /*
  *	vm_page_putfake:
  *
  *	Release a fictitious page.
  */
 void
 vm_page_putfake(vm_page_t m)
 {
 
 	KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
 	    ("vm_page_putfake: bad page %p", m));
 	vm_page_assert_xbusied(m);
 	vm_page_busy_free(m);
 	uma_zfree(fakepg_zone, m);
 }
 
 /*
  *	vm_page_updatefake:
  *
  *	Update the given fictitious page to the specified physical address and
  *	memory attribute.
  */
 void
 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
 {
 
 	KASSERT((m->flags & PG_FICTITIOUS) != 0,
 	    ("vm_page_updatefake: bad page %p", m));
 	m->phys_addr = paddr;
 	pmap_page_set_memattr(m, memattr);
 }
 
 /*
  *	vm_page_free:
  *
  *	Free a page.
  */
 void
 vm_page_free(vm_page_t m)
 {
 
 	m->flags &= ~PG_ZERO;
 	vm_page_free_toq(m);
 }
 
 /*
  *	vm_page_free_zero:
  *
  *	Free a page to the zerod-pages queue
  */
 void
 vm_page_free_zero(vm_page_t m)
 {
 
 	m->flags |= PG_ZERO;
 	vm_page_free_toq(m);
 }
 
 /*
  * Unbusy and handle the page queueing for a page from a getpages request that
  * was optionally read ahead or behind.
  */
 void
 vm_page_readahead_finish(vm_page_t m)
 {
 
 	/* We shouldn't put invalid pages on queues. */
 	KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m));
 
 	/*
 	 * Since the page is not the actually needed one, whether it should
 	 * be activated or deactivated is not obvious.  Empirical results
 	 * have shown that deactivating the page is usually the best choice,
 	 * unless the page is wanted by another thread.
 	 */
 	if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0)
 		vm_page_activate(m);
 	else
 		vm_page_deactivate(m);
 	vm_page_xunbusy_unchecked(m);
 }
 
 /*
  * Destroy the identity of an invalid page and free it if possible.
  * This is intended to be used when reading a page from backing store fails.
  */
 void
 vm_page_free_invalid(vm_page_t m)
 {
 
 	KASSERT(vm_page_none_valid(m), ("page %p is valid", m));
 	KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m));
 	KASSERT(m->object != NULL, ("page %p has no object", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 
 	/*
 	 * We may be attempting to free the page as part of the handling for an
 	 * I/O error, in which case the page was xbusied by a different thread.
 	 */
 	vm_page_xbusy_claim(m);
 
 	/*
 	 * If someone has wired this page while the object lock
 	 * was not held, then the thread that unwires is responsible
 	 * for freeing the page.  Otherwise just free the page now.
 	 * The wire count of this unmapped page cannot change while
 	 * we have the page xbusy and the page's object wlocked.
 	 */
 	if (vm_page_remove(m))
 		vm_page_free(m);
 }
 
 /*
  *	vm_page_dirty_KBI:		[ internal use only ]
  *
  *	Set all bits in the page's dirty field.
  *
  *	The object containing the specified page must be locked if the
  *	call is made from the machine-independent layer.
  *
  *	See vm_page_clear_dirty_mask().
  *
  *	This function should only be called by vm_page_dirty().
  */
 void
 vm_page_dirty_KBI(vm_page_t m)
 {
 
 	/* Refer to this operation by its public name. */
 	KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!"));
 	m->dirty = VM_PAGE_BITS_ALL;
 }
 
 /*
  *	vm_page_insert:		[ internal use only ]
  *
  *	Inserts the given mem entry into the object and object list.
  *
  *	The object must be locked.
  */
 int
 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
 {
 	vm_page_t mpred;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	mpred = vm_radix_lookup_le(&object->rtree, pindex);
 	return (vm_page_insert_after(m, object, pindex, mpred));
 }
 
 /*
  *	vm_page_insert_after:
  *
  *	Inserts the page "m" into the specified object at offset "pindex".
  *
  *	The page "mpred" must immediately precede the offset "pindex" within
  *	the specified object.
  *
  *	The object must be locked.
  */
 static int
 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
     vm_page_t mpred)
 {
 	vm_page_t msucc;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(m->object == NULL,
 	    ("vm_page_insert_after: page already inserted"));
 	if (mpred != NULL) {
 		KASSERT(mpred->object == object,
 		    ("vm_page_insert_after: object doesn't contain mpred"));
 		KASSERT(mpred->pindex < pindex,
 		    ("vm_page_insert_after: mpred doesn't precede pindex"));
 		msucc = TAILQ_NEXT(mpred, listq);
 	} else
 		msucc = TAILQ_FIRST(&object->memq);
 	if (msucc != NULL)
 		KASSERT(msucc->pindex > pindex,
 		    ("vm_page_insert_after: msucc doesn't succeed pindex"));
 
 	/*
 	 * Record the object/offset pair in this page.
 	 */
 	m->object = object;
 	m->pindex = pindex;
 	m->ref_count |= VPRC_OBJREF;
 
 	/*
 	 * Now link into the object's ordered list of backed pages.
 	 */
 	if (vm_radix_insert(&object->rtree, m)) {
 		m->object = NULL;
 		m->pindex = 0;
 		m->ref_count &= ~VPRC_OBJREF;
 		return (1);
 	}
 	vm_page_insert_radixdone(m, object, mpred);
 	vm_pager_page_inserted(object, m);
 	return (0);
 }
 
 /*
  *	vm_page_insert_radixdone:
  *
  *	Complete page "m" insertion into the specified object after the
  *	radix trie hooking.
  *
  *	The page "mpred" must precede the offset "m->pindex" within the
  *	specified object.
  *
  *	The object must be locked.
  */
 static void
 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(object != NULL && m->object == object,
 	    ("vm_page_insert_radixdone: page %p has inconsistent object", m));
 	KASSERT((m->ref_count & VPRC_OBJREF) != 0,
 	    ("vm_page_insert_radixdone: page %p is missing object ref", m));
 	if (mpred != NULL) {
 		KASSERT(mpred->object == object,
 		    ("vm_page_insert_radixdone: object doesn't contain mpred"));
 		KASSERT(mpred->pindex < m->pindex,
 		    ("vm_page_insert_radixdone: mpred doesn't precede pindex"));
 	}
 
 	if (mpred != NULL)
 		TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
 	else
 		TAILQ_INSERT_HEAD(&object->memq, m, listq);
 
 	/*
 	 * Show that the object has one more resident page.
 	 */
 	object->resident_page_count++;
 
 	/*
 	 * Hold the vnode until the last page is released.
 	 */
 	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
 		vhold(object->handle);
 
 	/*
 	 * Since we are inserting a new and possibly dirty page,
 	 * update the object's generation count.
 	 */
 	if (pmap_page_is_write_mapped(m))
 		vm_object_set_writeable_dirty(object);
 }
 
 /*
  * Do the work to remove a page from its object.  The caller is responsible for
  * updating the page's fields to reflect this removal.
  */
 static void
 vm_page_object_remove(vm_page_t m)
 {
 	vm_object_t object;
 	vm_page_t mrem __diagused;
 
 	vm_page_assert_xbusied(m);
 	object = m->object;
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT((m->ref_count & VPRC_OBJREF) != 0,
 	    ("page %p is missing its object ref", m));
 
 	/* Deferred free of swap space. */
 	if ((m->a.flags & PGA_SWAP_FREE) != 0)
 		vm_pager_page_unswapped(m);
 
 	vm_pager_page_removed(object, m);
 
 	m->object = NULL;
 	mrem = vm_radix_remove(&object->rtree, m->pindex);
 	KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m));
 
 	/*
 	 * Now remove from the object's list of backed pages.
 	 */
 	TAILQ_REMOVE(&object->memq, m, listq);
 
 	/*
 	 * And show that the object has one fewer resident page.
 	 */
 	object->resident_page_count--;
 
 	/*
 	 * The vnode may now be recycled.
 	 */
 	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
 		vdrop(object->handle);
 }
 
 /*
  *	vm_page_remove:
  *
  *	Removes the specified page from its containing object, but does not
  *	invalidate any backing storage.  Returns true if the object's reference
  *	was the last reference to the page, and false otherwise.
  *
  *	The object must be locked and the page must be exclusively busied.
  *	The exclusive busy will be released on return.  If this is not the
  *	final ref and the caller does not hold a wire reference it may not
  *	continue to access the page.
  */
 bool
 vm_page_remove(vm_page_t m)
 {
 	bool dropped;
 
 	dropped = vm_page_remove_xbusy(m);
 	vm_page_xunbusy(m);
 
 	return (dropped);
 }
 
 /*
  *	vm_page_remove_xbusy
  *
  *	Removes the page but leaves the xbusy held.  Returns true if this
  *	removed the final ref and false otherwise.
  */
 bool
 vm_page_remove_xbusy(vm_page_t m)
 {
 
 	vm_page_object_remove(m);
 	return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF);
 }
 
 /*
  *	vm_page_lookup:
  *
  *	Returns the page associated with the object/offset
  *	pair specified; if none is found, NULL is returned.
  *
  *	The object must be locked.
  */
 vm_page_t
 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
 {
 
 	VM_OBJECT_ASSERT_LOCKED(object);
 	return (vm_radix_lookup(&object->rtree, pindex));
 }
 
 /*
  *	vm_page_lookup_unlocked:
  *
  *	Returns the page associated with the object/offset pair specified;
  *	if none is found, NULL is returned.  The page may be no longer be
  *	present in the object at the time that this function returns.  Only
  *	useful for opportunistic checks such as inmem().
  */
 vm_page_t
 vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex)
 {
 
 	return (vm_radix_lookup_unlocked(&object->rtree, pindex));
 }
 
 /*
  *	vm_page_relookup:
  *
  *	Returns a page that must already have been busied by
  *	the caller.  Used for bogus page replacement.
  */
 vm_page_t
 vm_page_relookup(vm_object_t object, vm_pindex_t pindex)
 {
 	vm_page_t m;
 
 	m = vm_radix_lookup_unlocked(&object->rtree, pindex);
 	KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) &&
 	    m->object == object && m->pindex == pindex,
 	    ("vm_page_relookup: Invalid page %p", m));
 	return (m);
 }
 
 /*
  * This should only be used by lockless functions for releasing transient
  * incorrect acquires.  The page may have been freed after we acquired a
  * busy lock.  In this case busy_lock == VPB_FREED and we have nothing
  * further to do.
  */
 static void
 vm_page_busy_release(vm_page_t m)
 {
 	u_int x;
 
 	x = vm_page_busy_fetch(m);
 	for (;;) {
 		if (x == VPB_FREED)
 			break;
 		if ((x & VPB_BIT_SHARED) != 0 && VPB_SHARERS(x) > 1) {
 			if (atomic_fcmpset_int(&m->busy_lock, &x,
 			    x - VPB_ONE_SHARER))
 				break;
 			continue;
 		}
 		KASSERT((x & VPB_BIT_SHARED) != 0 ||
 		    (x & ~VPB_BIT_WAITERS) == VPB_CURTHREAD_EXCLUSIVE,
 		    ("vm_page_busy_release: %p xbusy not owned.", m));
 		if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
 			continue;
 		if ((x & VPB_BIT_WAITERS) != 0)
 			wakeup(m);
 		break;
 	}
 }
 
 /*
  *	vm_page_find_least:
  *
  *	Returns the page associated with the object with least pindex
  *	greater than or equal to the parameter pindex, or NULL.
  *
  *	The object must be locked.
  */
 vm_page_t
 vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
 {
 	vm_page_t m;
 
 	VM_OBJECT_ASSERT_LOCKED(object);
 	if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
 		m = vm_radix_lookup_ge(&object->rtree, pindex);
 	return (m);
 }
 
 /*
  * Returns the given page's successor (by pindex) within the object if it is
  * resident; if none is found, NULL is returned.
  *
  * The object must be locked.
  */
 vm_page_t
 vm_page_next(vm_page_t m)
 {
 	vm_page_t next;
 
 	VM_OBJECT_ASSERT_LOCKED(m->object);
 	if ((next = TAILQ_NEXT(m, listq)) != NULL) {
 		MPASS(next->object == m->object);
 		if (next->pindex != m->pindex + 1)
 			next = NULL;
 	}
 	return (next);
 }
 
 /*
  * Returns the given page's predecessor (by pindex) within the object if it is
  * resident; if none is found, NULL is returned.
  *
  * The object must be locked.
  */
 vm_page_t
 vm_page_prev(vm_page_t m)
 {
 	vm_page_t prev;
 
 	VM_OBJECT_ASSERT_LOCKED(m->object);
 	if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) {
 		MPASS(prev->object == m->object);
 		if (prev->pindex != m->pindex - 1)
 			prev = NULL;
 	}
 	return (prev);
 }
 
 /*
  * Uses the page mnew as a replacement for an existing page at index
  * pindex which must be already present in the object.
  *
  * Both pages must be exclusively busied on enter.  The old page is
  * unbusied on exit.
  *
  * A return value of true means mold is now free.  If this is not the
  * final ref and the caller does not hold a wire reference it may not
  * continue to access the page.
  */
 static bool
 vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
     vm_page_t mold)
 {
 	vm_page_t mret __diagused;
 	bool dropped;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	vm_page_assert_xbusied(mold);
 	KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0,
 	    ("vm_page_replace: page %p already in object", mnew));
 
 	/*
 	 * This function mostly follows vm_page_insert() and
 	 * vm_page_remove() without the radix, object count and vnode
 	 * dance.  Double check such functions for more comments.
 	 */
 
 	mnew->object = object;
 	mnew->pindex = pindex;
 	atomic_set_int(&mnew->ref_count, VPRC_OBJREF);
 	mret = vm_radix_replace(&object->rtree, mnew);
 	KASSERT(mret == mold,
 	    ("invalid page replacement, mold=%p, mret=%p", mold, mret));
 	KASSERT((mold->oflags & VPO_UNMANAGED) ==
 	    (mnew->oflags & VPO_UNMANAGED),
 	    ("vm_page_replace: mismatched VPO_UNMANAGED"));
 
 	/* Keep the resident page list in sorted order. */
 	TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq);
 	TAILQ_REMOVE(&object->memq, mold, listq);
 	mold->object = NULL;
 
 	/*
 	 * The object's resident_page_count does not change because we have
 	 * swapped one page for another, but the generation count should
 	 * change if the page is dirty.
 	 */
 	if (pmap_page_is_write_mapped(mnew))
 		vm_object_set_writeable_dirty(object);
 	dropped = vm_page_drop(mold, VPRC_OBJREF) == VPRC_OBJREF;
 	vm_page_xunbusy(mold);
 
 	return (dropped);
 }
 
 void
 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
     vm_page_t mold)
 {
 
 	vm_page_assert_xbusied(mnew);
 
 	if (vm_page_replace_hold(mnew, object, pindex, mold))
 		vm_page_free(mold);
 }
 
 /*
  *	vm_page_rename:
  *
  *	Move the given memory entry from its
  *	current object to the specified target object/offset.
  *
  *	Note: swap associated with the page must be invalidated by the move.  We
  *	      have to do this for several reasons:  (1) we aren't freeing the
  *	      page, (2) we are dirtying the page, (3) the VM system is probably
  *	      moving the page from object A to B, and will then later move
  *	      the backing store from A to B and we can't have a conflict.
  *
  *	Note: we *always* dirty the page.  It is necessary both for the
  *	      fact that we moved it, and because we may be invalidating
  *	      swap.
  *
  *	The objects must be locked.
  */
 int
 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
 {
 	vm_page_t mpred;
 	vm_pindex_t opidx;
 
 	VM_OBJECT_ASSERT_WLOCKED(new_object);
 
 	KASSERT(m->ref_count != 0, ("vm_page_rename: page %p has no refs", m));
 	mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex);
 	KASSERT(mpred == NULL || mpred->pindex != new_pindex,
 	    ("vm_page_rename: pindex already renamed"));
 
 	/*
 	 * Create a custom version of vm_page_insert() which does not depend
 	 * by m_prev and can cheat on the implementation aspects of the
 	 * function.
 	 */
 	opidx = m->pindex;
 	m->pindex = new_pindex;
 	if (vm_radix_insert(&new_object->rtree, m)) {
 		m->pindex = opidx;
 		return (1);
 	}
 
 	/*
 	 * The operation cannot fail anymore.  The removal must happen before
 	 * the listq iterator is tainted.
 	 */
 	m->pindex = opidx;
 	vm_page_object_remove(m);
 
 	/* Return back to the new pindex to complete vm_page_insert(). */
 	m->pindex = new_pindex;
 	m->object = new_object;
 
 	vm_page_insert_radixdone(m, new_object, mpred);
 	vm_page_dirty(m);
 	vm_pager_page_inserted(new_object, m);
 	return (0);
 }
 
 /*
  *	vm_page_alloc:
  *
  *	Allocate and return a page that is associated with the specified
  *	object and offset pair.  By default, this page is exclusive busied.
  *
  *	The caller must always specify an allocation class.
  *
  *	allocation classes:
  *	VM_ALLOC_NORMAL		normal process request
  *	VM_ALLOC_SYSTEM		system *really* needs a page
  *	VM_ALLOC_INTERRUPT	interrupt time request
  *
  *	optional allocation flags:
  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
  *				intends to allocate
  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
  *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
  *	VM_ALLOC_SBUSY		shared busy the allocated page
  *	VM_ALLOC_WIRED		wire the allocated page
  *	VM_ALLOC_ZERO		prefer a zeroed page
  */
 vm_page_t
 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
 {
 
 	return (vm_page_alloc_after(object, pindex, req,
 	    vm_radix_lookup_le(&object->rtree, pindex)));
 }
 
 vm_page_t
 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain,
     int req)
 {
 
 	return (vm_page_alloc_domain_after(object, pindex, domain, req,
 	    vm_radix_lookup_le(&object->rtree, pindex)));
 }
 
 /*
  * Allocate a page in the specified object with the given page index.  To
  * optimize insertion of the page into the object, the caller must also specifiy
  * the resident page in the object with largest index smaller than the given
  * page index, or NULL if no such page exists.
  */
 vm_page_t
 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
     int req, vm_page_t mpred)
 {
 	struct vm_domainset_iter di;
 	vm_page_t m;
 	int domain;
 
 	vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
 	do {
 		m = vm_page_alloc_domain_after(object, pindex, domain, req,
 		    mpred);
 		if (m != NULL)
 			break;
 	} while (vm_domainset_iter_page(&di, object, &domain) == 0);
 
 	return (m);
 }
 
 /*
  * Returns true if the number of free pages exceeds the minimum
  * for the request class and false otherwise.
  */
 static int
 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages)
 {
 	u_int limit, old, new;
 
 	if (req_class == VM_ALLOC_INTERRUPT)
 		limit = 0;
 	else if (req_class == VM_ALLOC_SYSTEM)
 		limit = vmd->vmd_interrupt_free_min;
 	else
 		limit = vmd->vmd_free_reserved;
 
 	/*
 	 * Attempt to reserve the pages.  Fail if we're below the limit.
 	 */
 	limit += npages;
 	old = vmd->vmd_free_count;
 	do {
 		if (old < limit)
 			return (0);
 		new = old - npages;
 	} while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0);
 
 	/* Wake the page daemon if we've crossed the threshold. */
 	if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
 		pagedaemon_wakeup(vmd->vmd_domain);
 
 	/* Only update bitsets on transitions. */
 	if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
 	    (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
 		vm_domain_set(vmd);
 
 	return (1);
 }
 
 int
 vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
 {
 	int req_class;
 
 	/*
 	 * The page daemon is allowed to dig deeper into the free page list.
 	 */
 	req_class = req & VM_ALLOC_CLASS_MASK;
 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
 		req_class = VM_ALLOC_SYSTEM;
 	return (_vm_domain_allocate(vmd, req_class, npages));
 }
 
 vm_page_t
 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
     int req, vm_page_t mpred)
 {
 	struct vm_domain *vmd;
 	vm_page_t m;
 	int flags;
 
 #define	VPA_FLAGS	(VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL |	\
 			 VM_ALLOC_NOWAIT | VM_ALLOC_NOBUSY |		\
 			 VM_ALLOC_SBUSY | VM_ALLOC_WIRED |		\
 			 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | VM_ALLOC_COUNT_MASK)
 	KASSERT((req & ~VPA_FLAGS) == 0,
 	    ("invalid request %#x", req));
 	KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
 	    ("invalid request %#x", req));
 	KASSERT(mpred == NULL || mpred->pindex < pindex,
 	    ("mpred %p doesn't precede pindex 0x%jx", mpred,
 	    (uintmax_t)pindex));
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	flags = 0;
 	m = NULL;
 	if (!vm_pager_can_alloc_page(object, pindex))
 		return (NULL);
 again:
 #if VM_NRESERVLEVEL > 0
 	/*
 	 * Can we allocate the page from a reservation?
 	 */
 	if (vm_object_reserv(object) &&
 	    (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) !=
 	    NULL) {
 		goto found;
 	}
 #endif
 	vmd = VM_DOMAIN(domain);
 	if (vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone != NULL) {
 		m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone,
 		    M_NOWAIT | M_NOVM);
 		if (m != NULL) {
 			flags |= PG_PCPU_CACHE;
 			goto found;
 		}
 	}
 	if (vm_domain_allocate(vmd, req, 1)) {
 		/*
 		 * If not, allocate it from the free page queues.
 		 */
 		vm_domain_free_lock(vmd);
 		m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 0);
 		vm_domain_free_unlock(vmd);
 		if (m == NULL) {
 			vm_domain_freecnt_inc(vmd, 1);
 #if VM_NRESERVLEVEL > 0
 			if (vm_reserv_reclaim_inactive(domain))
 				goto again;
 #endif
 		}
 	}
 	if (m == NULL) {
 		/*
 		 * Not allocatable, give up.
 		 */
 		if (vm_domain_alloc_fail(vmd, object, req))
 			goto again;
 		return (NULL);
 	}
 
 	/*
 	 * At this point we had better have found a good page.
 	 */
 found:
 	vm_page_dequeue(m);
 	vm_page_alloc_check(m);
 
 	/*
 	 * Initialize the page.  Only the PG_ZERO flag is inherited.
 	 */
 	flags |= m->flags & PG_ZERO;
 	if ((req & VM_ALLOC_NODUMP) != 0)
 		flags |= PG_NODUMP;
 	m->flags = flags;
 	m->a.flags = 0;
 	m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0;
 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
 		m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
 	else if ((req & VM_ALLOC_SBUSY) != 0)
 		m->busy_lock = VPB_SHARERS_WORD(1);
 	else
 		m->busy_lock = VPB_UNBUSIED;
 	if (req & VM_ALLOC_WIRED) {
 		vm_wire_add(1);
 		m->ref_count = 1;
 	}
 	m->a.act_count = 0;
 
 	if (vm_page_insert_after(m, object, pindex, mpred)) {
 		if (req & VM_ALLOC_WIRED) {
 			vm_wire_sub(1);
 			m->ref_count = 0;
 		}
 		KASSERT(m->object == NULL, ("page %p has object", m));
 		m->oflags = VPO_UNMANAGED;
 		m->busy_lock = VPB_UNBUSIED;
 		/* Don't change PG_ZERO. */
 		vm_page_free_toq(m);
 		if (req & VM_ALLOC_WAITFAIL) {
 			VM_OBJECT_WUNLOCK(object);
 			vm_radix_wait();
 			VM_OBJECT_WLOCK(object);
 		}
 		return (NULL);
 	}
 
 	/* Ignore device objects; the pager sets "memattr" for them. */
 	if (object->memattr != VM_MEMATTR_DEFAULT &&
 	    (object->flags & OBJ_FICTITIOUS) == 0)
 		pmap_page_set_memattr(m, object->memattr);
 
 	return (m);
 }
 
 /*
  *	vm_page_alloc_contig:
  *
  *	Allocate a contiguous set of physical pages of the given size "npages"
  *	from the free lists.  All of the physical pages must be at or above
  *	the given physical address "low" and below the given physical address
  *	"high".  The given value "alignment" determines the alignment of the
  *	first physical page in the set.  If the given value "boundary" is
  *	non-zero, then the set of physical pages cannot cross any physical
  *	address boundary that is a multiple of that value.  Both "alignment"
  *	and "boundary" must be a power of two.
  *
  *	If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
  *	then the memory attribute setting for the physical pages is configured
  *	to the object's memory attribute setting.  Otherwise, the memory
  *	attribute setting for the physical pages is configured to "memattr",
  *	overriding the object's memory attribute setting.  However, if the
  *	object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
  *	memory attribute setting for the physical pages cannot be configured
  *	to VM_MEMATTR_DEFAULT.
  *
  *	The specified object may not contain fictitious pages.
  *
  *	The caller must always specify an allocation class.
  *
  *	allocation classes:
  *	VM_ALLOC_NORMAL		normal process request
  *	VM_ALLOC_SYSTEM		system *really* needs a page
  *	VM_ALLOC_INTERRUPT	interrupt time request
  *
  *	optional allocation flags:
  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
  *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
  *	VM_ALLOC_SBUSY		shared busy the allocated page
  *	VM_ALLOC_WIRED		wire the allocated page
  *	VM_ALLOC_ZERO		prefer a zeroed page
  */
 vm_page_t
 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
     vm_paddr_t boundary, vm_memattr_t memattr)
 {
 	struct vm_domainset_iter di;
 	vm_page_t m;
 	int domain;
 
 	vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
 	do {
 		m = vm_page_alloc_contig_domain(object, pindex, domain, req,
 		    npages, low, high, alignment, boundary, memattr);
 		if (m != NULL)
 			break;
 	} while (vm_domainset_iter_page(&di, object, &domain) == 0);
 
 	return (m);
 }
 
 static vm_page_t
 vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low,
     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
 {
 	struct vm_domain *vmd;
 	vm_page_t m_ret;
 
 	/*
 	 * Can we allocate the pages without the number of free pages falling
 	 * below the lower bound for the allocation class?
 	 */
 	vmd = VM_DOMAIN(domain);
 	if (!vm_domain_allocate(vmd, req, npages))
 		return (NULL);
 	/*
 	 * Try to allocate the pages from the free page queues.
 	 */
 	vm_domain_free_lock(vmd);
 	m_ret = vm_phys_alloc_contig(domain, npages, low, high,
 	    alignment, boundary);
 	vm_domain_free_unlock(vmd);
 	if (m_ret != NULL)
 		return (m_ret);
 #if VM_NRESERVLEVEL > 0
 	/*
 	 * Try to break a reservation to allocate the pages.
 	 */
 	if ((req & VM_ALLOC_NORECLAIM) == 0) {
 		m_ret = vm_reserv_reclaim_contig(domain, npages, low,
 	            high, alignment, boundary);
 		if (m_ret != NULL)
 			return (m_ret);
 	}
 #endif
 	vm_domain_freecnt_inc(vmd, npages);
 	return (NULL);
 }
 
 vm_page_t
 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
     int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
     vm_paddr_t boundary, vm_memattr_t memattr)
 {
 	vm_page_t m, m_ret, mpred;
 	u_int busy_lock, flags, oflags;
 
 #define	VPAC_FLAGS	(VPA_FLAGS | VM_ALLOC_NORECLAIM)
 	KASSERT((req & ~VPAC_FLAGS) == 0,
 	    ("invalid request %#x", req));
 	KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
 	    ("invalid request %#x", req));
 	KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) !=
 	    (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM),
 	    ("invalid request %#x", req));
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
 	    ("vm_page_alloc_contig: object %p has fictitious pages",
 	    object));
 	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
 
 	mpred = vm_radix_lookup_le(&object->rtree, pindex);
 	KASSERT(mpred == NULL || mpred->pindex != pindex,
 	    ("vm_page_alloc_contig: pindex already allocated"));
 	for (;;) {
 #if VM_NRESERVLEVEL > 0
 		/*
 		 * Can we allocate the pages from a reservation?
 		 */
 		if (vm_object_reserv(object) &&
 		    (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req,
 		    mpred, npages, low, high, alignment, boundary)) != NULL) {
 			break;
 		}
 #endif
 		if ((m_ret = vm_page_find_contig_domain(domain, req, npages,
 		    low, high, alignment, boundary)) != NULL)
 			break;
 		if (!vm_domain_alloc_fail(VM_DOMAIN(domain), object, req))
 			return (NULL);
 	}
 	for (m = m_ret; m < &m_ret[npages]; m++) {
 		vm_page_dequeue(m);
 		vm_page_alloc_check(m);
 	}
 
 	/*
 	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
 	 */
 	flags = PG_ZERO;
 	if ((req & VM_ALLOC_NODUMP) != 0)
 		flags |= PG_NODUMP;
 	oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0;
 	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
 		busy_lock = VPB_CURTHREAD_EXCLUSIVE;
 	else if ((req & VM_ALLOC_SBUSY) != 0)
 		busy_lock = VPB_SHARERS_WORD(1);
 	else
 		busy_lock = VPB_UNBUSIED;
 	if ((req & VM_ALLOC_WIRED) != 0)
 		vm_wire_add(npages);
 	if (object->memattr != VM_MEMATTR_DEFAULT &&
 	    memattr == VM_MEMATTR_DEFAULT)
 		memattr = object->memattr;
 	for (m = m_ret; m < &m_ret[npages]; m++) {
 		m->a.flags = 0;
 		m->flags = (m->flags | PG_NODUMP) & flags;
 		m->busy_lock = busy_lock;
 		if ((req & VM_ALLOC_WIRED) != 0)
 			m->ref_count = 1;
 		m->a.act_count = 0;
 		m->oflags = oflags;
 		if (vm_page_insert_after(m, object, pindex, mpred)) {
 			if ((req & VM_ALLOC_WIRED) != 0)
 				vm_wire_sub(npages);
 			KASSERT(m->object == NULL,
 			    ("page %p has object", m));
 			mpred = m;
 			for (m = m_ret; m < &m_ret[npages]; m++) {
 				if (m <= mpred &&
 				    (req & VM_ALLOC_WIRED) != 0)
 					m->ref_count = 0;
 				m->oflags = VPO_UNMANAGED;
 				m->busy_lock = VPB_UNBUSIED;
 				/* Don't change PG_ZERO. */
 				vm_page_free_toq(m);
 			}
 			if (req & VM_ALLOC_WAITFAIL) {
 				VM_OBJECT_WUNLOCK(object);
 				vm_radix_wait();
 				VM_OBJECT_WLOCK(object);
 			}
 			return (NULL);
 		}
 		mpred = m;
 		if (memattr != VM_MEMATTR_DEFAULT)
 			pmap_page_set_memattr(m, memattr);
 		pindex++;
 	}
 	return (m_ret);
 }
 
 /*
  * Allocate a physical page that is not intended to be inserted into a VM
  * object.  If the "freelist" parameter is not equal to VM_NFREELIST, then only
  * pages from the specified vm_phys freelist will be returned.
  */
 static __always_inline vm_page_t
 _vm_page_alloc_noobj_domain(int domain, const int freelist, int req)
 {
 	struct vm_domain *vmd;
 	vm_page_t m;
 	int flags;
 
 #define	VPAN_FLAGS	(VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL |      \
 			 VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK |		\
 			 VM_ALLOC_NOBUSY | VM_ALLOC_WIRED |		\
 			 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | VM_ALLOC_COUNT_MASK)
 	KASSERT((req & ~VPAN_FLAGS) == 0,
 	    ("invalid request %#x", req));
 
 	flags = (req & VM_ALLOC_NODUMP) != 0 ? PG_NODUMP : 0;
 	vmd = VM_DOMAIN(domain);
 again:
 	if (freelist == VM_NFREELIST &&
 	    vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) {
 		m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone,
 		    M_NOWAIT | M_NOVM);
 		if (m != NULL) {
 			flags |= PG_PCPU_CACHE;
 			goto found;
 		}
 	}
 
 	if (vm_domain_allocate(vmd, req, 1)) {
 		vm_domain_free_lock(vmd);
 		if (freelist == VM_NFREELIST)
 			m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0);
 		else
 			m = vm_phys_alloc_freelist_pages(domain, freelist,
 			    VM_FREEPOOL_DIRECT, 0);
 		vm_domain_free_unlock(vmd);
 		if (m == NULL) {
 			vm_domain_freecnt_inc(vmd, 1);
 #if VM_NRESERVLEVEL > 0
 			if (freelist == VM_NFREELIST &&
 			    vm_reserv_reclaim_inactive(domain))
 				goto again;
 #endif
 		}
 	}
 	if (m == NULL) {
 		if (vm_domain_alloc_fail(vmd, NULL, req))
 			goto again;
 		return (NULL);
 	}
 
 found:
 	vm_page_dequeue(m);
 	vm_page_alloc_check(m);
 
 	/*
 	 * Consumers should not rely on a useful default pindex value.
 	 */
 	m->pindex = 0xdeadc0dedeadc0de;
 	m->flags = (m->flags & PG_ZERO) | flags;
 	m->a.flags = 0;
 	m->oflags = VPO_UNMANAGED;
 	m->busy_lock = VPB_UNBUSIED;
 	if ((req & VM_ALLOC_WIRED) != 0) {
 		vm_wire_add(1);
 		m->ref_count = 1;
 	}
 
 	if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
 		pmap_zero_page(m);
 
 	return (m);
 }
 
 vm_page_t
 vm_page_alloc_freelist(int freelist, int req)
 {
 	struct vm_domainset_iter di;
 	vm_page_t m;
 	int domain;
 
 	vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
 	do {
 		m = vm_page_alloc_freelist_domain(domain, freelist, req);
 		if (m != NULL)
 			break;
 	} while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
 
 	return (m);
 }
 
 vm_page_t
 vm_page_alloc_freelist_domain(int domain, int freelist, int req)
 {
 	KASSERT(freelist >= 0 && freelist < VM_NFREELIST,
 	    ("%s: invalid freelist %d", __func__, freelist));
 
 	return (_vm_page_alloc_noobj_domain(domain, freelist, req));
 }
 
 vm_page_t
 vm_page_alloc_noobj(int req)
 {
 	struct vm_domainset_iter di;
 	vm_page_t m;
 	int domain;
 
 	vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
 	do {
 		m = vm_page_alloc_noobj_domain(domain, req);
 		if (m != NULL)
 			break;
 	} while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
 
 	return (m);
 }
 
 vm_page_t
 vm_page_alloc_noobj_domain(int domain, int req)
 {
 	return (_vm_page_alloc_noobj_domain(domain, VM_NFREELIST, req));
 }
 
 vm_page_t
 vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     vm_memattr_t memattr)
 {
 	struct vm_domainset_iter di;
 	vm_page_t m;
 	int domain;
 
 	vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
 	do {
 		m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low,
 		    high, alignment, boundary, memattr);
 		if (m != NULL)
 			break;
 	} while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
 
 	return (m);
 }
 
 vm_page_t
 vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
     vm_memattr_t memattr)
 {
 	vm_page_t m, m_ret;
 	u_int flags;
 
 #define	VPANC_FLAGS	(VPAN_FLAGS | VM_ALLOC_NORECLAIM)
 	KASSERT((req & ~VPANC_FLAGS) == 0,
 	    ("invalid request %#x", req));
 	KASSERT((req & (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM)) !=
 	    (VM_ALLOC_WAITOK | VM_ALLOC_NORECLAIM),
 	    ("invalid request %#x", req));
 	KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
 	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
 	    ("invalid request %#x", req));
 	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
 
 	while ((m_ret = vm_page_find_contig_domain(domain, req, npages,
 	    low, high, alignment, boundary)) == NULL) {
 		if (!vm_domain_alloc_fail(VM_DOMAIN(domain), NULL, req))
 			return (NULL);
 	}
 
 	/*
 	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
 	 */
 	flags = PG_ZERO;
 	if ((req & VM_ALLOC_NODUMP) != 0)
 		flags |= PG_NODUMP;
 	if ((req & VM_ALLOC_WIRED) != 0)
 		vm_wire_add(npages);
 	for (m = m_ret; m < &m_ret[npages]; m++) {
 		vm_page_dequeue(m);
 		vm_page_alloc_check(m);
 
 		/*
 		 * Consumers should not rely on a useful default pindex value.
 		 */
 		m->pindex = 0xdeadc0dedeadc0de;
 		m->a.flags = 0;
 		m->flags = (m->flags | PG_NODUMP) & flags;
 		m->busy_lock = VPB_UNBUSIED;
 		if ((req & VM_ALLOC_WIRED) != 0)
 			m->ref_count = 1;
 		m->a.act_count = 0;
 		m->oflags = VPO_UNMANAGED;
 
 		/*
 		 * Zero the page before updating any mappings since the page is
 		 * not yet shared with any devices which might require the
 		 * non-default memory attribute.  pmap_page_set_memattr()
 		 * flushes data caches before returning.
 		 */
 		if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
 		if (memattr != VM_MEMATTR_DEFAULT)
 			pmap_page_set_memattr(m, memattr);
 	}
 	return (m_ret);
 }
 
 /*
  * Check a page that has been freshly dequeued from a freelist.
  */
 static void
 vm_page_alloc_check(vm_page_t m)
 {
 
 	KASSERT(m->object == NULL, ("page %p has object", m));
 	KASSERT(m->a.queue == PQ_NONE &&
 	    (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
 	    ("page %p has unexpected queue %d, flags %#x",
 	    m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK)));
 	KASSERT(m->ref_count == 0, ("page %p has references", m));
 	KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m));
 	KASSERT(m->dirty == 0, ("page %p is dirty", m));
 	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
 	    ("page %p has unexpected memattr %d",
 	    m, pmap_page_get_memattr(m)));
 	KASSERT(vm_page_none_valid(m), ("free page %p is valid", m));
 	pmap_vm_page_alloc_check(m);
 }
 
 static int
 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags)
 {
 	struct vm_domain *vmd;
 	struct vm_pgcache *pgcache;
 	int i;
 
 	pgcache = arg;
 	vmd = VM_DOMAIN(pgcache->domain);
 
 	/*
 	 * The page daemon should avoid creating extra memory pressure since its
 	 * main purpose is to replenish the store of free pages.
 	 */
 	if (vmd->vmd_severeset || curproc == pageproc ||
 	    !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
 		return (0);
 	domain = vmd->vmd_domain;
 	vm_domain_free_lock(vmd);
 	i = vm_phys_alloc_npages(domain, pgcache->pool, cnt,
 	    (vm_page_t *)store);
 	vm_domain_free_unlock(vmd);
 	if (cnt != i)
 		vm_domain_freecnt_inc(vmd, cnt - i);
 
 	return (i);
 }
 
 static void
 vm_page_zone_release(void *arg, void **store, int cnt)
 {
 	struct vm_domain *vmd;
 	struct vm_pgcache *pgcache;
 	vm_page_t m;
 	int i;
 
 	pgcache = arg;
 	vmd = VM_DOMAIN(pgcache->domain);
 	vm_domain_free_lock(vmd);
 	for (i = 0; i < cnt; i++) {
 		m = (vm_page_t)store[i];
 		vm_phys_free_pages(m, 0);
 	}
 	vm_domain_free_unlock(vmd);
 	vm_domain_freecnt_inc(vmd, cnt);
 }
 
 #define	VPSC_ANY	0	/* No restrictions. */
 #define	VPSC_NORESERV	1	/* Skip reservations; implies VPSC_NOSUPER. */
 #define	VPSC_NOSUPER	2	/* Skip superpages. */
 
 /*
  *	vm_page_scan_contig:
  *
  *	Scan vm_page_array[] between the specified entries "m_start" and
  *	"m_end" for a run of contiguous physical pages that satisfy the
  *	specified conditions, and return the lowest page in the run.  The
  *	specified "alignment" determines the alignment of the lowest physical
  *	page in the run.  If the specified "boundary" is non-zero, then the
  *	run of physical pages cannot span a physical address that is a
  *	multiple of "boundary".
  *
  *	"m_end" is never dereferenced, so it need not point to a vm_page
  *	structure within vm_page_array[].
  *
  *	"npages" must be greater than zero.  "m_start" and "m_end" must not
  *	span a hole (or discontiguity) in the physical address space.  Both
  *	"alignment" and "boundary" must be a power of two.
  */
 vm_page_t
 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
     u_long alignment, vm_paddr_t boundary, int options)
 {
 	vm_object_t object;
 	vm_paddr_t pa;
 	vm_page_t m, m_run;
 #if VM_NRESERVLEVEL > 0
 	int level;
 #endif
 	int m_inc, order, run_ext, run_len;
 
 	KASSERT(npages > 0, ("npages is 0"));
 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
 	m_run = NULL;
 	run_len = 0;
 	for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
 		KASSERT((m->flags & PG_MARKER) == 0,
 		    ("page %p is PG_MARKER", m));
 		KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1,
 		    ("fictitious page %p has invalid ref count", m));
 
 		/*
 		 * If the current page would be the start of a run, check its
 		 * physical address against the end, alignment, and boundary
 		 * conditions.  If it doesn't satisfy these conditions, either
 		 * terminate the scan or advance to the next page that
 		 * satisfies the failed condition.
 		 */
 		if (run_len == 0) {
 			KASSERT(m_run == NULL, ("m_run != NULL"));
 			if (m + npages > m_end)
 				break;
 			pa = VM_PAGE_TO_PHYS(m);
 			if (!vm_addr_align_ok(pa, alignment)) {
 				m_inc = atop(roundup2(pa, alignment) - pa);
 				continue;
 			}
 			if (!vm_addr_bound_ok(pa, ptoa(npages), boundary)) {
 				m_inc = atop(roundup2(pa, boundary) - pa);
 				continue;
 			}
 		} else
 			KASSERT(m_run != NULL, ("m_run == NULL"));
 
 retry:
 		m_inc = 1;
 		if (vm_page_wired(m))
 			run_ext = 0;
 #if VM_NRESERVLEVEL > 0
 		else if ((level = vm_reserv_level(m)) >= 0 &&
 		    (options & VPSC_NORESERV) != 0) {
 			run_ext = 0;
 			/* Advance to the end of the reservation. */
 			pa = VM_PAGE_TO_PHYS(m);
 			m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) -
 			    pa);
 		}
 #endif
 		else if ((object = atomic_load_ptr(&m->object)) != NULL) {
 			/*
 			 * The page is considered eligible for relocation if
 			 * and only if it could be laundered or reclaimed by
 			 * the page daemon.
 			 */
 			VM_OBJECT_RLOCK(object);
 			if (object != m->object) {
 				VM_OBJECT_RUNLOCK(object);
 				goto retry;
 			}
 			/* Don't care: PG_NODUMP, PG_ZERO. */
 			if ((object->flags & OBJ_SWAP) == 0 &&
 			    object->type != OBJT_VNODE) {
 				run_ext = 0;
 #if VM_NRESERVLEVEL > 0
 			} else if ((options & VPSC_NOSUPER) != 0 &&
 			    (level = vm_reserv_level_iffullpop(m)) >= 0) {
 				run_ext = 0;
 				/* Advance to the end of the superpage. */
 				pa = VM_PAGE_TO_PHYS(m);
 				m_inc = atop(roundup2(pa + 1,
 				    vm_reserv_size(level)) - pa);
 #endif
 			} else if (object->memattr == VM_MEMATTR_DEFAULT &&
 			    vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) {
 				/*
 				 * The page is allocated but eligible for
 				 * relocation.  Extend the current run by one
 				 * page.
 				 */
 				KASSERT(pmap_page_get_memattr(m) ==
 				    VM_MEMATTR_DEFAULT,
 				    ("page %p has an unexpected memattr", m));
 				KASSERT((m->oflags & (VPO_SWAPINPROG |
 				    VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
 				    ("page %p has unexpected oflags", m));
 				/* Don't care: PGA_NOSYNC. */
 				run_ext = 1;
 			} else
 				run_ext = 0;
 			VM_OBJECT_RUNLOCK(object);
 #if VM_NRESERVLEVEL > 0
 		} else if (level >= 0) {
 			/*
 			 * The page is reserved but not yet allocated.  In
 			 * other words, it is still free.  Extend the current
 			 * run by one page.
 			 */
 			run_ext = 1;
 #endif
 		} else if ((order = m->order) < VM_NFREEORDER) {
 			/*
 			 * The page is enqueued in the physical memory
 			 * allocator's free page queues.  Moreover, it is the
 			 * first page in a power-of-two-sized run of
 			 * contiguous free pages.  Add these pages to the end
 			 * of the current run, and jump ahead.
 			 */
 			run_ext = 1 << order;
 			m_inc = 1 << order;
 		} else {
 			/*
 			 * Skip the page for one of the following reasons: (1)
 			 * It is enqueued in the physical memory allocator's
 			 * free page queues.  However, it is not the first
 			 * page in a run of contiguous free pages.  (This case
 			 * rarely occurs because the scan is performed in
 			 * ascending order.) (2) It is not reserved, and it is
 			 * transitioning from free to allocated.  (Conversely,
 			 * the transition from allocated to free for managed
 			 * pages is blocked by the page busy lock.) (3) It is
 			 * allocated but not contained by an object and not
 			 * wired, e.g., allocated by Xen's balloon driver.
 			 */
 			run_ext = 0;
 		}
 
 		/*
 		 * Extend or reset the current run of pages.
 		 */
 		if (run_ext > 0) {
 			if (run_len == 0)
 				m_run = m;
 			run_len += run_ext;
 		} else {
 			if (run_len > 0) {
 				m_run = NULL;
 				run_len = 0;
 			}
 		}
 	}
 	if (run_len >= npages)
 		return (m_run);
 	return (NULL);
 }
 
 /*
  *	vm_page_reclaim_run:
  *
  *	Try to relocate each of the allocated virtual pages within the
  *	specified run of physical pages to a new physical address.  Free the
  *	physical pages underlying the relocated virtual pages.  A virtual page
  *	is relocatable if and only if it could be laundered or reclaimed by
  *	the page daemon.  Whenever possible, a virtual page is relocated to a
  *	physical address above "high".
  *
  *	Returns 0 if every physical page within the run was already free or
  *	just freed by a successful relocation.  Otherwise, returns a non-zero
  *	value indicating why the last attempt to relocate a virtual page was
  *	unsuccessful.
  *
  *	"req_class" must be an allocation class.
  */
 static int
 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
     vm_paddr_t high)
 {
 	struct vm_domain *vmd;
 	struct spglist free;
 	vm_object_t object;
 	vm_paddr_t pa;
 	vm_page_t m, m_end, m_new;
 	int error, order, req;
 
 	KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class,
 	    ("req_class is not an allocation class"));
 	SLIST_INIT(&free);
 	error = 0;
 	m = m_run;
 	m_end = m_run + npages;
 	for (; error == 0 && m < m_end; m++) {
 		KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
 		    ("page %p is PG_FICTITIOUS or PG_MARKER", m));
 
 		/*
 		 * Racily check for wirings.  Races are handled once the object
 		 * lock is held and the page is unmapped.
 		 */
 		if (vm_page_wired(m))
 			error = EBUSY;
 		else if ((object = atomic_load_ptr(&m->object)) != NULL) {
 			/*
 			 * The page is relocated if and only if it could be
 			 * laundered or reclaimed by the page daemon.
 			 */
 			VM_OBJECT_WLOCK(object);
 			/* Don't care: PG_NODUMP, PG_ZERO. */
 			if (m->object != object ||
 			    ((object->flags & OBJ_SWAP) == 0 &&
 			    object->type != OBJT_VNODE))
 				error = EINVAL;
 			else if (object->memattr != VM_MEMATTR_DEFAULT)
 				error = EINVAL;
 			else if (vm_page_queue(m) != PQ_NONE &&
 			    vm_page_tryxbusy(m) != 0) {
 				if (vm_page_wired(m)) {
 					vm_page_xunbusy(m);
 					error = EBUSY;
 					goto unlock;
 				}
 				KASSERT(pmap_page_get_memattr(m) ==
 				    VM_MEMATTR_DEFAULT,
 				    ("page %p has an unexpected memattr", m));
 				KASSERT(m->oflags == 0,
 				    ("page %p has unexpected oflags", m));
 				/* Don't care: PGA_NOSYNC. */
 				if (!vm_page_none_valid(m)) {
 					/*
 					 * First, try to allocate a new page
 					 * that is above "high".  Failing
 					 * that, try to allocate a new page
 					 * that is below "m_run".  Allocate
 					 * the new page between the end of
 					 * "m_run" and "high" only as a last
 					 * resort.
 					 */
 					req = req_class;
 					if ((m->flags & PG_NODUMP) != 0)
 						req |= VM_ALLOC_NODUMP;
 					if (trunc_page(high) !=
 					    ~(vm_paddr_t)PAGE_MASK) {
 						m_new =
 						    vm_page_alloc_noobj_contig(
 						    req, 1, round_page(high),
 						    ~(vm_paddr_t)0, PAGE_SIZE,
 						    0, VM_MEMATTR_DEFAULT);
 					} else
 						m_new = NULL;
 					if (m_new == NULL) {
 						pa = VM_PAGE_TO_PHYS(m_run);
 						m_new =
 						    vm_page_alloc_noobj_contig(
 						    req, 1, 0, pa - 1,
 						    PAGE_SIZE, 0,
 						    VM_MEMATTR_DEFAULT);
 					}
 					if (m_new == NULL) {
 						pa += ptoa(npages);
 						m_new =
 						    vm_page_alloc_noobj_contig(
 						    req, 1, pa, high, PAGE_SIZE,
 						    0, VM_MEMATTR_DEFAULT);
 					}
 					if (m_new == NULL) {
 						vm_page_xunbusy(m);
 						error = ENOMEM;
 						goto unlock;
 					}
 
 					/*
 					 * Unmap the page and check for new
 					 * wirings that may have been acquired
 					 * through a pmap lookup.
 					 */
 					if (object->ref_count != 0 &&
 					    !vm_page_try_remove_all(m)) {
 						vm_page_xunbusy(m);
 						vm_page_free(m_new);
 						error = EBUSY;
 						goto unlock;
 					}
 
 					/*
 					 * Replace "m" with the new page.  For
 					 * vm_page_replace(), "m" must be busy
 					 * and dequeued.  Finally, change "m"
 					 * as if vm_page_free() was called.
 					 */
 					m_new->a.flags = m->a.flags &
 					    ~PGA_QUEUE_STATE_MASK;
 					KASSERT(m_new->oflags == VPO_UNMANAGED,
 					    ("page %p is managed", m_new));
 					m_new->oflags = 0;
 					pmap_copy_page(m, m_new);
 					m_new->valid = m->valid;
 					m_new->dirty = m->dirty;
 					m->flags &= ~PG_ZERO;
 					vm_page_dequeue(m);
 					if (vm_page_replace_hold(m_new, object,
 					    m->pindex, m) &&
 					    vm_page_free_prep(m))
 						SLIST_INSERT_HEAD(&free, m,
 						    plinks.s.ss);
 
 					/*
 					 * The new page must be deactivated
 					 * before the object is unlocked.
 					 */
 					vm_page_deactivate(m_new);
 				} else {
 					m->flags &= ~PG_ZERO;
 					vm_page_dequeue(m);
 					if (vm_page_free_prep(m))
 						SLIST_INSERT_HEAD(&free, m,
 						    plinks.s.ss);
 					KASSERT(m->dirty == 0,
 					    ("page %p is dirty", m));
 				}
 			} else
 				error = EBUSY;
 unlock:
 			VM_OBJECT_WUNLOCK(object);
 		} else {
 			MPASS(vm_page_domain(m) == domain);
 			vmd = VM_DOMAIN(domain);
 			vm_domain_free_lock(vmd);
 			order = m->order;
 			if (order < VM_NFREEORDER) {
 				/*
 				 * The page is enqueued in the physical memory
 				 * allocator's free page queues.  Moreover, it
 				 * is the first page in a power-of-two-sized
 				 * run of contiguous free pages.  Jump ahead
 				 * to the last page within that run, and
 				 * continue from there.
 				 */
 				m += (1 << order) - 1;
 			}
 #if VM_NRESERVLEVEL > 0
 			else if (vm_reserv_is_page_free(m))
 				order = 0;
 #endif
 			vm_domain_free_unlock(vmd);
 			if (order == VM_NFREEORDER)
 				error = EINVAL;
 		}
 	}
 	if ((m = SLIST_FIRST(&free)) != NULL) {
 		int cnt;
 
 		vmd = VM_DOMAIN(domain);
 		cnt = 0;
 		vm_domain_free_lock(vmd);
 		do {
 			MPASS(vm_page_domain(m) == domain);
 			SLIST_REMOVE_HEAD(&free, plinks.s.ss);
 			vm_phys_free_pages(m, 0);
 			cnt++;
 		} while ((m = SLIST_FIRST(&free)) != NULL);
 		vm_domain_free_unlock(vmd);
 		vm_domain_freecnt_inc(vmd, cnt);
 	}
 	return (error);
 }
 
 #define	NRUNS	16
 
 CTASSERT(powerof2(NRUNS));
 
 #define	RUN_INDEX(count)	((count) & (NRUNS - 1))
 
 #define	MIN_RECLAIM	8
 
 /*
  *	vm_page_reclaim_contig:
  *
  *	Reclaim allocated, contiguous physical memory satisfying the specified
  *	conditions by relocating the virtual pages using that physical memory.
  *	Returns true if reclamation is successful and false otherwise.  Since
  *	relocation requires the allocation of physical pages, reclamation may
  *	fail due to a shortage of free pages.  When reclamation fails, callers
  *	are expected to perform vm_wait() before retrying a failed allocation
  *	operation, e.g., vm_page_alloc_contig().
  *
  *	The caller must always specify an allocation class through "req".
  *
  *	allocation classes:
  *	VM_ALLOC_NORMAL		normal process request
  *	VM_ALLOC_SYSTEM		system *really* needs a page
  *	VM_ALLOC_INTERRUPT	interrupt time request
  *
  *	The optional allocation flags are ignored.
  *
  *	"npages" must be greater than zero.  Both "alignment" and "boundary"
  *	must be a power of two.
  */
 bool
 vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
 {
 	struct vm_domain *vmd;
 	vm_paddr_t curr_low;
 	vm_page_t m_run, m_runs[NRUNS];
 	u_long count, minalign, reclaimed;
 	int error, i, options, req_class;
 
 	KASSERT(npages > 0, ("npages is 0"));
 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
 
 	/*
 	 * The caller will attempt an allocation after some runs have been
 	 * reclaimed and added to the vm_phys buddy lists.  Due to limitations
 	 * of vm_phys_alloc_contig(), round up the requested length to the next
 	 * power of two or maximum chunk size, and ensure that each run is
 	 * suitably aligned.
 	 */
 	minalign = 1ul << imin(flsl(npages - 1), VM_NFREEORDER - 1);
 	npages = roundup2(npages, minalign);
 	if (alignment < ptoa(minalign))
 		alignment = ptoa(minalign);
 
 	/*
 	 * The page daemon is allowed to dig deeper into the free page list.
 	 */
 	req_class = req & VM_ALLOC_CLASS_MASK;
 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
 		req_class = VM_ALLOC_SYSTEM;
 
 	/*
 	 * Return if the number of free pages cannot satisfy the requested
 	 * allocation.
 	 */
 	vmd = VM_DOMAIN(domain);
 	count = vmd->vmd_free_count;
 	if (count < npages + vmd->vmd_free_reserved || (count < npages +
 	    vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
 	    (count < npages && req_class == VM_ALLOC_INTERRUPT))
 		return (false);
 
 	/*
 	 * Scan up to three times, relaxing the restrictions ("options") on
 	 * the reclamation of reservations and superpages each time.
 	 */
 	for (options = VPSC_NORESERV;;) {
 		/*
 		 * Find the highest runs that satisfy the given constraints
 		 * and restrictions, and record them in "m_runs".
 		 */
 		curr_low = low;
 		count = 0;
 		for (;;) {
 			m_run = vm_phys_scan_contig(domain, npages, curr_low,
 			    high, alignment, boundary, options);
 			if (m_run == NULL)
 				break;
 			curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages);
 			m_runs[RUN_INDEX(count)] = m_run;
 			count++;
 		}
 
 		/*
 		 * Reclaim the highest runs in LIFO (descending) order until
 		 * the number of reclaimed pages, "reclaimed", is at least
 		 * MIN_RECLAIM.  Reset "reclaimed" each time because each
 		 * reclamation is idempotent, and runs will (likely) recur
 		 * from one scan to the next as restrictions are relaxed.
 		 */
 		reclaimed = 0;
 		for (i = 0; count > 0 && i < NRUNS; i++) {
 			count--;
 			m_run = m_runs[RUN_INDEX(count)];
 			error = vm_page_reclaim_run(req_class, domain, npages,
 			    m_run, high);
 			if (error == 0) {
 				reclaimed += npages;
 				if (reclaimed >= MIN_RECLAIM)
 					return (true);
 			}
 		}
 
 		/*
 		 * Either relax the restrictions on the next scan or return if
 		 * the last scan had no restrictions.
 		 */
 		if (options == VPSC_NORESERV)
 			options = VPSC_NOSUPER;
 		else if (options == VPSC_NOSUPER)
 			options = VPSC_ANY;
 		else if (options == VPSC_ANY)
 			return (reclaimed != 0);
 	}
 }
 
 bool
 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
     u_long alignment, vm_paddr_t boundary)
 {
 	struct vm_domainset_iter di;
 	int domain;
 	bool ret;
 
 	vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
 	do {
 		ret = vm_page_reclaim_contig_domain(domain, req, npages, low,
 		    high, alignment, boundary);
 		if (ret)
 			break;
 	} while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
 
 	return (ret);
 }
 
 /*
  * Set the domain in the appropriate page level domainset.
  */
 void
 vm_domain_set(struct vm_domain *vmd)
 {
 
 	mtx_lock(&vm_domainset_lock);
 	if (!vmd->vmd_minset && vm_paging_min(vmd)) {
 		vmd->vmd_minset = 1;
 		DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains);
 	}
 	if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
 		vmd->vmd_severeset = 1;
 		DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains);
 	}
 	mtx_unlock(&vm_domainset_lock);
 }
 
 /*
  * Clear the domain from the appropriate page level domainset.
  */
 void
 vm_domain_clear(struct vm_domain *vmd)
 {
 
 	mtx_lock(&vm_domainset_lock);
 	if (vmd->vmd_minset && !vm_paging_min(vmd)) {
 		vmd->vmd_minset = 0;
 		DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
 		if (vm_min_waiters != 0) {
 			vm_min_waiters = 0;
 			wakeup(&vm_min_domains);
 		}
 	}
 	if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
 		vmd->vmd_severeset = 0;
 		DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
 		if (vm_severe_waiters != 0) {
 			vm_severe_waiters = 0;
 			wakeup(&vm_severe_domains);
 		}
 	}
 
 	/*
 	 * If pageout daemon needs pages, then tell it that there are
 	 * some free.
 	 */
 	if (vmd->vmd_pageout_pages_needed &&
 	    vmd->vmd_free_count >= vmd->vmd_pageout_free_min) {
 		wakeup(&vmd->vmd_pageout_pages_needed);
 		vmd->vmd_pageout_pages_needed = 0;
 	}
 
 	/* See comments in vm_wait_doms(). */
 	if (vm_pageproc_waiters) {
 		vm_pageproc_waiters = 0;
 		wakeup(&vm_pageproc_waiters);
 	}
 	mtx_unlock(&vm_domainset_lock);
 }
 
 /*
  * Wait for free pages to exceed the min threshold globally.
  */
 void
 vm_wait_min(void)
 {
 
 	mtx_lock(&vm_domainset_lock);
 	while (vm_page_count_min()) {
 		vm_min_waiters++;
 		msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
 	}
 	mtx_unlock(&vm_domainset_lock);
 }
 
 /*
  * Wait for free pages to exceed the severe threshold globally.
  */
 void
 vm_wait_severe(void)
 {
 
 	mtx_lock(&vm_domainset_lock);
 	while (vm_page_count_severe()) {
 		vm_severe_waiters++;
 		msleep(&vm_severe_domains, &vm_domainset_lock, PVM,
 		    "vmwait", 0);
 	}
 	mtx_unlock(&vm_domainset_lock);
 }
 
 u_int
 vm_wait_count(void)
 {
 
 	return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters);
 }
 
 int
 vm_wait_doms(const domainset_t *wdoms, int mflags)
 {
 	int error;
 
 	error = 0;
 
 	/*
 	 * We use racey wakeup synchronization to avoid expensive global
 	 * locking for the pageproc when sleeping with a non-specific vm_wait.
 	 * To handle this, we only sleep for one tick in this instance.  It
 	 * is expected that most allocations for the pageproc will come from
 	 * kmem or vm_page_grab* which will use the more specific and
 	 * race-free vm_wait_domain().
 	 */
 	if (curproc == pageproc) {
 		mtx_lock(&vm_domainset_lock);
 		vm_pageproc_waiters++;
 		error = msleep(&vm_pageproc_waiters, &vm_domainset_lock,
 		    PVM | PDROP | mflags, "pageprocwait", 1);
 	} else {
 		/*
 		 * XXX Ideally we would wait only until the allocation could
 		 * be satisfied.  This condition can cause new allocators to
 		 * consume all freed pages while old allocators wait.
 		 */
 		mtx_lock(&vm_domainset_lock);
 		if (vm_page_count_min_set(wdoms)) {
 			if (pageproc == NULL)
 				panic("vm_wait in early boot");
 			vm_min_waiters++;
 			error = msleep(&vm_min_domains, &vm_domainset_lock,
 			    PVM | PDROP | mflags, "vmwait", 0);
 		} else
 			mtx_unlock(&vm_domainset_lock);
 	}
 	return (error);
 }
 
 /*
  *	vm_wait_domain:
  *
  *	Sleep until free pages are available for allocation.
  *	- Called in various places after failed memory allocations.
  */
 void
 vm_wait_domain(int domain)
 {
 	struct vm_domain *vmd;
 	domainset_t wdom;
 
 	vmd = VM_DOMAIN(domain);
 	vm_domain_free_assert_unlocked(vmd);
 
 	if (curproc == pageproc) {
 		mtx_lock(&vm_domainset_lock);
 		if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) {
 			vmd->vmd_pageout_pages_needed = 1;
 			msleep(&vmd->vmd_pageout_pages_needed,
 			    &vm_domainset_lock, PDROP | PSWP, "VMWait", 0);
 		} else
 			mtx_unlock(&vm_domainset_lock);
 	} else {
 		DOMAINSET_ZERO(&wdom);
 		DOMAINSET_SET(vmd->vmd_domain, &wdom);
 		vm_wait_doms(&wdom, 0);
 	}
 }
 
 static int
 vm_wait_flags(vm_object_t obj, int mflags)
 {
 	struct domainset *d;
 
 	d = NULL;
 
 	/*
 	 * Carefully fetch pointers only once: the struct domainset
 	 * itself is ummutable but the pointer might change.
 	 */
 	if (obj != NULL)
 		d = obj->domain.dr_policy;
 	if (d == NULL)
 		d = curthread->td_domain.dr_policy;
 
 	return (vm_wait_doms(&d->ds_mask, mflags));
 }
 
 /*
  *	vm_wait:
  *
  *	Sleep until free pages are available for allocation in the
  *	affinity domains of the obj.  If obj is NULL, the domain set
  *	for the calling thread is used.
  *	Called in various places after failed memory allocations.
  */
 void
 vm_wait(vm_object_t obj)
 {
 	(void)vm_wait_flags(obj, 0);
 }
 
 int
 vm_wait_intr(vm_object_t obj)
 {
 	return (vm_wait_flags(obj, PCATCH));
 }
 
 /*
  *	vm_domain_alloc_fail:
  *
  *	Called when a page allocation function fails.  Informs the
  *	pagedaemon and performs the requested wait.  Requires the
  *	domain_free and object lock on entry.  Returns with the
  *	object lock held and free lock released.  Returns an error when
  *	retry is necessary.
  *
  */
 static int
 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
 {
 
 	vm_domain_free_assert_unlocked(vmd);
 
 	atomic_add_int(&vmd->vmd_pageout_deficit,
 	    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
 	if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
 		if (object != NULL) 
 			VM_OBJECT_WUNLOCK(object);
 		vm_wait_domain(vmd->vmd_domain);
 		if (object != NULL) 
 			VM_OBJECT_WLOCK(object);
 		if (req & VM_ALLOC_WAITOK)
 			return (EAGAIN);
 	}
 
 	return (0);
 }
 
 /*
  *	vm_waitpfault:
  *
  *	Sleep until free pages are available for allocation.
  *	- Called only in vm_fault so that processes page faulting
  *	  can be easily tracked.
  *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
  *	  processes will be able to grab memory first.  Do not change
  *	  this balance without careful testing first.
  */
 void
 vm_waitpfault(struct domainset *dset, int timo)
 {
 
 	/*
 	 * XXX Ideally we would wait only until the allocation could
 	 * be satisfied.  This condition can cause new allocators to
 	 * consume all freed pages while old allocators wait.
 	 */
 	mtx_lock(&vm_domainset_lock);
 	if (vm_page_count_min_set(&dset->ds_mask)) {
 		vm_min_waiters++;
 		msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP,
 		    "pfault", timo);
 	} else
 		mtx_unlock(&vm_domainset_lock);
 }
 
 static struct vm_pagequeue *
 _vm_page_pagequeue(vm_page_t m, uint8_t queue)
 {
 
 	return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
 }
 
 #ifdef INVARIANTS
 static struct vm_pagequeue *
 vm_page_pagequeue(vm_page_t m)
 {
 
 	return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue));
 }
 #endif
 
 static __always_inline bool
 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
 {
 	vm_page_astate_t tmp;
 
 	tmp = *old;
 	do {
 		if (__predict_true(vm_page_astate_fcmpset(m, old, new)))
 			return (true);
 		counter_u64_add(pqstate_commit_retries, 1);
 	} while (old->_bits == tmp._bits);
 
 	return (false);
 }
 
 /*
  * Do the work of committing a queue state update that moves the page out of
  * its current queue.
  */
 static bool
 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m,
     vm_page_astate_t *old, vm_page_astate_t new)
 {
 	vm_page_t next;
 
 	vm_pagequeue_assert_locked(pq);
 	KASSERT(vm_page_pagequeue(m) == pq,
 	    ("%s: queue %p does not match page %p", __func__, pq, m));
 	KASSERT(old->queue != PQ_NONE && new.queue != old->queue,
 	    ("%s: invalid queue indices %d %d",
 	    __func__, old->queue, new.queue));
 
 	/*
 	 * Once the queue index of the page changes there is nothing
 	 * synchronizing with further updates to the page's physical
 	 * queue state.  Therefore we must speculatively remove the page
 	 * from the queue now and be prepared to roll back if the queue
 	 * state update fails.  If the page is not physically enqueued then
 	 * we just update its queue index.
 	 */
 	if ((old->flags & PGA_ENQUEUED) != 0) {
 		new.flags &= ~PGA_ENQUEUED;
 		next = TAILQ_NEXT(m, plinks.q);
 		TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 		vm_pagequeue_cnt_dec(pq);
 		if (!vm_page_pqstate_fcmpset(m, old, new)) {
 			if (next == NULL)
 				TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
 			else
 				TAILQ_INSERT_BEFORE(next, m, plinks.q);
 			vm_pagequeue_cnt_inc(pq);
 			return (false);
 		} else {
 			return (true);
 		}
 	} else {
 		return (vm_page_pqstate_fcmpset(m, old, new));
 	}
 }
 
 static bool
 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old,
     vm_page_astate_t new)
 {
 	struct vm_pagequeue *pq;
 	vm_page_astate_t as;
 	bool ret;
 
 	pq = _vm_page_pagequeue(m, old->queue);
 
 	/*
 	 * The queue field and PGA_ENQUEUED flag are stable only so long as the
 	 * corresponding page queue lock is held.
 	 */
 	vm_pagequeue_lock(pq);
 	as = vm_page_astate_load(m);
 	if (__predict_false(as._bits != old->_bits)) {
 		*old = as;
 		ret = false;
 	} else {
 		ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new);
 	}
 	vm_pagequeue_unlock(pq);
 	return (ret);
 }
 
 /*
  * Commit a queue state update that enqueues or requeues a page.
  */
 static bool
 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m,
     vm_page_astate_t *old, vm_page_astate_t new)
 {
 	struct vm_domain *vmd;
 
 	vm_pagequeue_assert_locked(pq);
 	KASSERT(old->queue != PQ_NONE && new.queue == old->queue,
 	    ("%s: invalid queue indices %d %d",
 	    __func__, old->queue, new.queue));
 
 	new.flags |= PGA_ENQUEUED;
 	if (!vm_page_pqstate_fcmpset(m, old, new))
 		return (false);
 
 	if ((old->flags & PGA_ENQUEUED) != 0)
 		TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 	else
 		vm_pagequeue_cnt_inc(pq);
 
 	/*
 	 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE.  In particular, if
 	 * both flags are set in close succession, only PGA_REQUEUE_HEAD will be
 	 * applied, even if it was set first.
 	 */
 	if ((old->flags & PGA_REQUEUE_HEAD) != 0) {
 		vmd = vm_pagequeue_domain(m);
 		KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE],
 		    ("%s: invalid page queue for page %p", __func__, m));
 		TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
 	} else {
 		TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
 	}
 	return (true);
 }
 
 /*
  * Commit a queue state update that encodes a request for a deferred queue
  * operation.
  */
 static bool
 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old,
     vm_page_astate_t new)
 {
 
 	KASSERT(old->queue == new.queue || new.queue != PQ_NONE,
 	    ("%s: invalid state, queue %d flags %x",
 	    __func__, new.queue, new.flags));
 
 	if (old->_bits != new._bits &&
 	    !vm_page_pqstate_fcmpset(m, old, new))
 		return (false);
 	vm_page_pqbatch_submit(m, new.queue);
 	return (true);
 }
 
 /*
  * A generic queue state update function.  This handles more cases than the
  * specialized functions above.
  */
 bool
 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
 {
 
 	if (old->_bits == new._bits)
 		return (true);
 
 	if (old->queue != PQ_NONE && new.queue != old->queue) {
 		if (!vm_page_pqstate_commit_dequeue(m, old, new))
 			return (false);
 		if (new.queue != PQ_NONE)
 			vm_page_pqbatch_submit(m, new.queue);
 	} else {
 		if (!vm_page_pqstate_fcmpset(m, old, new))
 			return (false);
 		if (new.queue != PQ_NONE &&
 		    ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0)
 			vm_page_pqbatch_submit(m, new.queue);
 	}
 	return (true);
 }
 
 /*
  * Apply deferred queue state updates to a page.
  */
 static inline void
 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue)
 {
 	vm_page_astate_t new, old;
 
 	CRITICAL_ASSERT(curthread);
 	vm_pagequeue_assert_locked(pq);
 	KASSERT(queue < PQ_COUNT,
 	    ("%s: invalid queue index %d", __func__, queue));
 	KASSERT(pq == _vm_page_pagequeue(m, queue),
 	    ("%s: page %p does not belong to queue %p", __func__, m, pq));
 
 	for (old = vm_page_astate_load(m);;) {
 		if (__predict_false(old.queue != queue ||
 		    (old.flags & PGA_QUEUE_OP_MASK) == 0)) {
 			counter_u64_add(queue_nops, 1);
 			break;
 		}
 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 		    ("%s: page %p is unmanaged", __func__, m));
 
 		new = old;
 		if ((old.flags & PGA_DEQUEUE) != 0) {
 			new.flags &= ~PGA_QUEUE_OP_MASK;
 			new.queue = PQ_NONE;
 			if (__predict_true(_vm_page_pqstate_commit_dequeue(pq,
 			    m, &old, new))) {
 				counter_u64_add(queue_ops, 1);
 				break;
 			}
 		} else {
 			new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD);
 			if (__predict_true(_vm_page_pqstate_commit_requeue(pq,
 			    m, &old, new))) {
 				counter_u64_add(queue_ops, 1);
 				break;
 			}
 		}
 	}
 }
 
 static void
 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
     uint8_t queue)
 {
 	int i;
 
 	for (i = 0; i < bq->bq_cnt; i++)
 		vm_pqbatch_process_page(pq, bq->bq_pa[i], queue);
 	vm_batchqueue_init(bq);
 }
 
 /*
  *	vm_page_pqbatch_submit:		[ internal use only ]
  *
  *	Enqueue a page in the specified page queue's batched work queue.
  *	The caller must have encoded the requested operation in the page
  *	structure's a.flags field.
  */
 void
 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
 {
 	struct vm_batchqueue *bq;
 	struct vm_pagequeue *pq;
-	int domain;
+	int domain, slots_remaining;
 
 	KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
 
 	domain = vm_page_domain(m);
 	critical_enter();
 	bq = DPCPU_PTR(pqbatch[domain][queue]);
-	if (vm_batchqueue_insert(bq, m)) {
+	slots_remaining = vm_batchqueue_insert(bq, m);
+	if (slots_remaining > (VM_BATCHQUEUE_SIZE >> 1)) {
+		/* keep building the bq */
+		critical_exit();
+		return;
+	} else if (slots_remaining > 0 ) {
+		/* Try to process the bq if we can get the lock */
+		pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
+		if (vm_pagequeue_trylock(pq)) {
+			vm_pqbatch_process(pq, bq, queue);
+			vm_pagequeue_unlock(pq);
+		}
 		critical_exit();
 		return;
 	}
 	critical_exit();
 
+	/* if we make it here, the bq is full so wait for the lock */
+
 	pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
 	vm_pagequeue_lock(pq);
 	critical_enter();
 	bq = DPCPU_PTR(pqbatch[domain][queue]);
 	vm_pqbatch_process(pq, bq, queue);
 	vm_pqbatch_process_page(pq, m, queue);
 	vm_pagequeue_unlock(pq);
 	critical_exit();
 }
 
 /*
  *	vm_page_pqbatch_drain:		[ internal use only ]
  *
  *	Force all per-CPU page queue batch queues to be drained.  This is
  *	intended for use in severe memory shortages, to ensure that pages
  *	do not remain stuck in the batch queues.
  */
 void
 vm_page_pqbatch_drain(void)
 {
 	struct thread *td;
 	struct vm_domain *vmd;
 	struct vm_pagequeue *pq;
 	int cpu, domain, queue;
 
 	td = curthread;
 	CPU_FOREACH(cpu) {
 		thread_lock(td);
 		sched_bind(td, cpu);
 		thread_unlock(td);
 
 		for (domain = 0; domain < vm_ndomains; domain++) {
 			vmd = VM_DOMAIN(domain);
 			for (queue = 0; queue < PQ_COUNT; queue++) {
 				pq = &vmd->vmd_pagequeues[queue];
 				vm_pagequeue_lock(pq);
 				critical_enter();
 				vm_pqbatch_process(pq,
 				    DPCPU_PTR(pqbatch[domain][queue]), queue);
 				critical_exit();
 				vm_pagequeue_unlock(pq);
 			}
 		}
 	}
 	thread_lock(td);
 	sched_unbind(td);
 	thread_unlock(td);
 }
 
 /*
  *	vm_page_dequeue_deferred:	[ internal use only ]
  *
  *	Request removal of the given page from its current page
  *	queue.  Physical removal from the queue may be deferred
  *	indefinitely.
  */
 void
 vm_page_dequeue_deferred(vm_page_t m)
 {
 	vm_page_astate_t new, old;
 
 	old = vm_page_astate_load(m);
 	do {
 		if (old.queue == PQ_NONE) {
 			KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0,
 			    ("%s: page %p has unexpected queue state",
 			    __func__, m));
 			break;
 		}
 		new = old;
 		new.flags |= PGA_DEQUEUE;
 	} while (!vm_page_pqstate_commit_request(m, &old, new));
 }
 
 /*
  *	vm_page_dequeue:
  *
  *	Remove the page from whichever page queue it's in, if any, before
  *	returning.
  */
 void
 vm_page_dequeue(vm_page_t m)
 {
 	vm_page_astate_t new, old;
 
 	old = vm_page_astate_load(m);
 	do {
 		if (old.queue == PQ_NONE) {
 			KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0,
 			    ("%s: page %p has unexpected queue state",
 			    __func__, m));
 			break;
 		}
 		new = old;
 		new.flags &= ~PGA_QUEUE_OP_MASK;
 		new.queue = PQ_NONE;
 	} while (!vm_page_pqstate_commit_dequeue(m, &old, new));
 
 }
 
 /*
  * Schedule the given page for insertion into the specified page queue.
  * Physical insertion of the page may be deferred indefinitely.
  */
 static void
 vm_page_enqueue(vm_page_t m, uint8_t queue)
 {
 
 	KASSERT(m->a.queue == PQ_NONE &&
 	    (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
 	    ("%s: page %p is already enqueued", __func__, m));
 	KASSERT(m->ref_count > 0,
 	    ("%s: page %p does not carry any references", __func__, m));
 
 	m->a.queue = queue;
 	if ((m->a.flags & PGA_REQUEUE) == 0)
 		vm_page_aflag_set(m, PGA_REQUEUE);
 	vm_page_pqbatch_submit(m, queue);
 }
 
 /*
  *	vm_page_free_prep:
  *
  *	Prepares the given page to be put on the free list,
  *	disassociating it from any VM object. The caller may return
  *	the page to the free list only if this function returns true.
  *
  *	The object, if it exists, must be locked, and then the page must
  *	be xbusy.  Otherwise the page must be not busied.  A managed
  *	page must be unmapped.
  */
 static bool
 vm_page_free_prep(vm_page_t m)
 {
 
 	/*
 	 * Synchronize with threads that have dropped a reference to this
 	 * page.
 	 */
 	atomic_thread_fence_acq();
 
 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP)
 	if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) {
 		uint64_t *p;
 		int i;
 		p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 		for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++)
 			KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx",
 			    m, i, (uintmax_t)*p));
 	}
 #endif
 	if ((m->oflags & VPO_UNMANAGED) == 0) {
 		KASSERT(!pmap_page_is_mapped(m),
 		    ("vm_page_free_prep: freeing mapped page %p", m));
 		KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0,
 		    ("vm_page_free_prep: mapping flags set in page %p", m));
 	} else {
 		KASSERT(m->a.queue == PQ_NONE,
 		    ("vm_page_free_prep: unmanaged page %p is queued", m));
 	}
 	VM_CNT_INC(v_tfree);
 
 	if (m->object != NULL) {
 		KASSERT(((m->oflags & VPO_UNMANAGED) != 0) ==
 		    ((m->object->flags & OBJ_UNMANAGED) != 0),
 		    ("vm_page_free_prep: managed flag mismatch for page %p",
 		    m));
 		vm_page_assert_xbusied(m);
 
 		/*
 		 * The object reference can be released without an atomic
 		 * operation.
 		 */
 		KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
 		    m->ref_count == VPRC_OBJREF,
 		    ("vm_page_free_prep: page %p has unexpected ref_count %u",
 		    m, m->ref_count));
 		vm_page_object_remove(m);
 		m->ref_count -= VPRC_OBJREF;
 	} else
 		vm_page_assert_unbusied(m);
 
 	vm_page_busy_free(m);
 
 	/*
 	 * If fictitious remove object association and
 	 * return.
 	 */
 	if ((m->flags & PG_FICTITIOUS) != 0) {
 		KASSERT(m->ref_count == 1,
 		    ("fictitious page %p is referenced", m));
 		KASSERT(m->a.queue == PQ_NONE,
 		    ("fictitious page %p is queued", m));
 		return (false);
 	}
 
 	/*
 	 * Pages need not be dequeued before they are returned to the physical
 	 * memory allocator, but they must at least be marked for a deferred
 	 * dequeue.
 	 */
 	if ((m->oflags & VPO_UNMANAGED) == 0)
 		vm_page_dequeue_deferred(m);
 
 	m->valid = 0;
 	vm_page_undirty(m);
 
 	if (m->ref_count != 0)
 		panic("vm_page_free_prep: page %p has references", m);
 
 	/*
 	 * Restore the default memory attribute to the page.
 	 */
 	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
 		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
 
 #if VM_NRESERVLEVEL > 0
 	/*
 	 * Determine whether the page belongs to a reservation.  If the page was
 	 * allocated from a per-CPU cache, it cannot belong to a reservation, so
 	 * as an optimization, we avoid the check in that case.
 	 */
 	if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m))
 		return (false);
 #endif
 
 	return (true);
 }
 
 /*
  *	vm_page_free_toq:
  *
  *	Returns the given page to the free list, disassociating it
  *	from any VM object.
  *
  *	The object must be locked.  The page must be exclusively busied if it
  *	belongs to an object.
  */
 static void
 vm_page_free_toq(vm_page_t m)
 {
 	struct vm_domain *vmd;
 	uma_zone_t zone;
 
 	if (!vm_page_free_prep(m))
 		return;
 
 	vmd = vm_pagequeue_domain(m);
 	zone = vmd->vmd_pgcache[m->pool].zone;
 	if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
 		uma_zfree(zone, m);
 		return;
 	}
 	vm_domain_free_lock(vmd);
 	vm_phys_free_pages(m, 0);
 	vm_domain_free_unlock(vmd);
 	vm_domain_freecnt_inc(vmd, 1);
 }
 
 /*
  *	vm_page_free_pages_toq:
  *
  *	Returns a list of pages to the free list, disassociating it
  *	from any VM object.  In other words, this is equivalent to
  *	calling vm_page_free_toq() for each page of a list of VM objects.
  */
 void
 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
 {
 	vm_page_t m;
 	int count;
 
 	if (SLIST_EMPTY(free))
 		return;
 
 	count = 0;
 	while ((m = SLIST_FIRST(free)) != NULL) {
 		count++;
 		SLIST_REMOVE_HEAD(free, plinks.s.ss);
 		vm_page_free_toq(m);
 	}
 
 	if (update_wire_count)
 		vm_wire_sub(count);
 }
 
 /*
  * Mark this page as wired down.  For managed pages, this prevents reclamation
  * by the page daemon, or when the containing object, if any, is destroyed.
  */
 void
 vm_page_wire(vm_page_t m)
 {
 	u_int old;
 
 #ifdef INVARIANTS
 	if (m->object != NULL && !vm_page_busied(m) &&
 	    !vm_object_busied(m->object))
 		VM_OBJECT_ASSERT_LOCKED(m->object);
 #endif
 	KASSERT((m->flags & PG_FICTITIOUS) == 0 ||
 	    VPRC_WIRE_COUNT(m->ref_count) >= 1,
 	    ("vm_page_wire: fictitious page %p has zero wirings", m));
 
 	old = atomic_fetchadd_int(&m->ref_count, 1);
 	KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX,
 	    ("vm_page_wire: counter overflow for page %p", m));
 	if (VPRC_WIRE_COUNT(old) == 0) {
 		if ((m->oflags & VPO_UNMANAGED) == 0)
 			vm_page_aflag_set(m, PGA_DEQUEUE);
 		vm_wire_add(1);
 	}
 }
 
 /*
  * Attempt to wire a mapped page following a pmap lookup of that page.
  * This may fail if a thread is concurrently tearing down mappings of the page.
  * The transient failure is acceptable because it translates to the
  * failure of the caller pmap_extract_and_hold(), which should be then
  * followed by the vm_fault() fallback, see e.g. vm_fault_quick_hold_pages().
  */
 bool
 vm_page_wire_mapped(vm_page_t m)
 {
 	u_int old;
 
 	old = m->ref_count;
 	do {
 		KASSERT(old > 0,
 		    ("vm_page_wire_mapped: wiring unreferenced page %p", m));
 		if ((old & VPRC_BLOCKED) != 0)
 			return (false);
 	} while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1));
 
 	if (VPRC_WIRE_COUNT(old) == 0) {
 		if ((m->oflags & VPO_UNMANAGED) == 0)
 			vm_page_aflag_set(m, PGA_DEQUEUE);
 		vm_wire_add(1);
 	}
 	return (true);
 }
 
 /*
  * Release a wiring reference to a managed page.  If the page still belongs to
  * an object, update its position in the page queues to reflect the reference.
  * If the wiring was the last reference to the page, free the page.
  */
 static void
 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse)
 {
 	u_int old;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("%s: page %p is unmanaged", __func__, m));
 
 	/*
 	 * Update LRU state before releasing the wiring reference.
 	 * Use a release store when updating the reference count to
 	 * synchronize with vm_page_free_prep().
 	 */
 	old = m->ref_count;
 	do {
 		KASSERT(VPRC_WIRE_COUNT(old) > 0,
 		    ("vm_page_unwire: wire count underflow for page %p", m));
 
 		if (old > VPRC_OBJREF + 1) {
 			/*
 			 * The page has at least one other wiring reference.  An
 			 * earlier iteration of this loop may have called
 			 * vm_page_release_toq() and cleared PGA_DEQUEUE, so
 			 * re-set it if necessary.
 			 */
 			if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0)
 				vm_page_aflag_set(m, PGA_DEQUEUE);
 		} else if (old == VPRC_OBJREF + 1) {
 			/*
 			 * This is the last wiring.  Clear PGA_DEQUEUE and
 			 * update the page's queue state to reflect the
 			 * reference.  If the page does not belong to an object
 			 * (i.e., the VPRC_OBJREF bit is clear), we only need to
 			 * clear leftover queue state.
 			 */
 			vm_page_release_toq(m, nqueue, noreuse);
 		} else if (old == 1) {
 			vm_page_aflag_clear(m, PGA_DEQUEUE);
 		}
 	} while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1));
 
 	if (VPRC_WIRE_COUNT(old) == 1) {
 		vm_wire_sub(1);
 		if (old == 1)
 			vm_page_free(m);
 	}
 }
 
 /*
  * Release one wiring of the specified page, potentially allowing it to be
  * paged out.
  *
  * Only managed pages belonging to an object can be paged out.  If the number
  * of wirings transitions to zero and the page is eligible for page out, then
  * the page is added to the specified paging queue.  If the released wiring
  * represented the last reference to the page, the page is freed.
  */
 void
 vm_page_unwire(vm_page_t m, uint8_t nqueue)
 {
 
 	KASSERT(nqueue < PQ_COUNT,
 	    ("vm_page_unwire: invalid queue %u request for page %p",
 	    nqueue, m));
 
 	if ((m->oflags & VPO_UNMANAGED) != 0) {
 		if (vm_page_unwire_noq(m) && m->ref_count == 0)
 			vm_page_free(m);
 		return;
 	}
 	vm_page_unwire_managed(m, nqueue, false);
 }
 
 /*
  * Unwire a page without (re-)inserting it into a page queue.  It is up
  * to the caller to enqueue, requeue, or free the page as appropriate.
  * In most cases involving managed pages, vm_page_unwire() should be used
  * instead.
  */
 bool
 vm_page_unwire_noq(vm_page_t m)
 {
 	u_int old;
 
 	old = vm_page_drop(m, 1);
 	KASSERT(VPRC_WIRE_COUNT(old) != 0,
 	    ("%s: counter underflow for page %p", __func__,  m));
 	KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1,
 	    ("%s: missing ref on fictitious page %p", __func__, m));
 
 	if (VPRC_WIRE_COUNT(old) > 1)
 		return (false);
 	if ((m->oflags & VPO_UNMANAGED) == 0)
 		vm_page_aflag_clear(m, PGA_DEQUEUE);
 	vm_wire_sub(1);
 	return (true);
 }
 
 /*
  * Ensure that the page ends up in the specified page queue.  If the page is
  * active or being moved to the active queue, ensure that its act_count is
  * at least ACT_INIT but do not otherwise mess with it.
  */
 static __always_inline void
 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag)
 {
 	vm_page_astate_t old, new;
 
 	KASSERT(m->ref_count > 0,
 	    ("%s: page %p does not carry any references", __func__, m));
 	KASSERT(nflag == PGA_REQUEUE || nflag == PGA_REQUEUE_HEAD,
 	    ("%s: invalid flags %x", __func__, nflag));
 
 	if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m))
 		return;
 
 	old = vm_page_astate_load(m);
 	do {
 		if ((old.flags & PGA_DEQUEUE) != 0)
 			break;
 		new = old;
 		new.flags &= ~PGA_QUEUE_OP_MASK;
 		if (nqueue == PQ_ACTIVE)
 			new.act_count = max(old.act_count, ACT_INIT);
 		if (old.queue == nqueue) {
 			/*
 			 * There is no need to requeue pages already in the
 			 * active queue.
 			 */
 			if (nqueue != PQ_ACTIVE ||
 			    (old.flags & PGA_ENQUEUED) == 0)
 				new.flags |= nflag;
 		} else {
 			new.flags |= nflag;
 			new.queue = nqueue;
 		}
 	} while (!vm_page_pqstate_commit(m, &old, new));
 }
 
 /*
  * Put the specified page on the active list (if appropriate).
  */
 void
 vm_page_activate(vm_page_t m)
 {
 
 	vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE);
 }
 
 /*
  * Move the specified page to the tail of the inactive queue, or requeue
  * the page if it is already in the inactive queue.
  */
 void
 vm_page_deactivate(vm_page_t m)
 {
 
 	vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE);
 }
 
 void
 vm_page_deactivate_noreuse(vm_page_t m)
 {
 
 	vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD);
 }
 
 /*
  * Put a page in the laundry, or requeue it if it is already there.
  */
 void
 vm_page_launder(vm_page_t m)
 {
 
 	vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE);
 }
 
 /*
  * Put a page in the PQ_UNSWAPPABLE holding queue.
  */
 void
 vm_page_unswappable(vm_page_t m)
 {
 
 	VM_OBJECT_ASSERT_LOCKED(m->object);
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("page %p already unswappable", m));
 
 	vm_page_dequeue(m);
 	vm_page_enqueue(m, PQ_UNSWAPPABLE);
 }
 
 /*
  * Release a page back to the page queues in preparation for unwiring.
  */
 static void
 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse)
 {
 	vm_page_astate_t old, new;
 	uint16_t nflag;
 
 	/*
 	 * Use a check of the valid bits to determine whether we should
 	 * accelerate reclamation of the page.  The object lock might not be
 	 * held here, in which case the check is racy.  At worst we will either
 	 * accelerate reclamation of a valid page and violate LRU, or
 	 * unnecessarily defer reclamation of an invalid page.
 	 *
 	 * If we were asked to not cache the page, place it near the head of the
 	 * inactive queue so that is reclaimed sooner.
 	 */
 	if (noreuse || vm_page_none_valid(m)) {
 		nqueue = PQ_INACTIVE;
 		nflag = PGA_REQUEUE_HEAD;
 	} else {
 		nflag = PGA_REQUEUE;
 	}
 
 	old = vm_page_astate_load(m);
 	do {
 		new = old;
 
 		/*
 		 * If the page is already in the active queue and we are not
 		 * trying to accelerate reclamation, simply mark it as
 		 * referenced and avoid any queue operations.
 		 */
 		new.flags &= ~PGA_QUEUE_OP_MASK;
 		if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE &&
 		    (old.flags & PGA_ENQUEUED) != 0)
 			new.flags |= PGA_REFERENCED;
 		else {
 			new.flags |= nflag;
 			new.queue = nqueue;
 		}
 	} while (!vm_page_pqstate_commit(m, &old, new));
 }
 
 /*
  * Unwire a page and either attempt to free it or re-add it to the page queues.
  */
 void
 vm_page_release(vm_page_t m, int flags)
 {
 	vm_object_t object;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("vm_page_release: page %p is unmanaged", m));
 
 	if ((flags & VPR_TRYFREE) != 0) {
 		for (;;) {
 			object = atomic_load_ptr(&m->object);
 			if (object == NULL)
 				break;
 			/* Depends on type-stability. */
 			if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object))
 				break;
 			if (object == m->object) {
 				vm_page_release_locked(m, flags);
 				VM_OBJECT_WUNLOCK(object);
 				return;
 			}
 			VM_OBJECT_WUNLOCK(object);
 		}
 	}
 	vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0);
 }
 
 /* See vm_page_release(). */
 void
 vm_page_release_locked(vm_page_t m, int flags)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("vm_page_release_locked: page %p is unmanaged", m));
 
 	if (vm_page_unwire_noq(m)) {
 		if ((flags & VPR_TRYFREE) != 0 &&
 		    (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
 		    m->dirty == 0 && vm_page_tryxbusy(m)) {
 			/*
 			 * An unlocked lookup may have wired the page before the
 			 * busy lock was acquired, in which case the page must
 			 * not be freed.
 			 */
 			if (__predict_true(!vm_page_wired(m))) {
 				vm_page_free(m);
 				return;
 			}
 			vm_page_xunbusy(m);
 		} else {
 			vm_page_release_toq(m, PQ_INACTIVE, flags != 0);
 		}
 	}
 }
 
 static bool
 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t))
 {
 	u_int old;
 
 	KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0,
 	    ("vm_page_try_blocked_op: page %p has no object", m));
 	KASSERT(vm_page_busied(m),
 	    ("vm_page_try_blocked_op: page %p is not busy", m));
 	VM_OBJECT_ASSERT_LOCKED(m->object);
 
 	old = m->ref_count;
 	do {
 		KASSERT(old != 0,
 		    ("vm_page_try_blocked_op: page %p has no references", m));
 		if (VPRC_WIRE_COUNT(old) != 0)
 			return (false);
 	} while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED));
 
 	(op)(m);
 
 	/*
 	 * If the object is read-locked, new wirings may be created via an
 	 * object lookup.
 	 */
 	old = vm_page_drop(m, VPRC_BLOCKED);
 	KASSERT(!VM_OBJECT_WOWNED(m->object) ||
 	    old == (VPRC_BLOCKED | VPRC_OBJREF),
 	    ("vm_page_try_blocked_op: unexpected refcount value %u for %p",
 	    old, m));
 	return (true);
 }
 
 /*
  * Atomically check for wirings and remove all mappings of the page.
  */
 bool
 vm_page_try_remove_all(vm_page_t m)
 {
 
 	return (vm_page_try_blocked_op(m, pmap_remove_all));
 }
 
 /*
  * Atomically check for wirings and remove all writeable mappings of the page.
  */
 bool
 vm_page_try_remove_write(vm_page_t m)
 {
 
 	return (vm_page_try_blocked_op(m, pmap_remove_write));
 }
 
 /*
  * vm_page_advise
  *
  * 	Apply the specified advice to the given page.
  */
 void
 vm_page_advise(vm_page_t m, int advice)
 {
 
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	vm_page_assert_xbusied(m);
 
 	if (advice == MADV_FREE)
 		/*
 		 * Mark the page clean.  This will allow the page to be freed
 		 * without first paging it out.  MADV_FREE pages are often
 		 * quickly reused by malloc(3), so we do not do anything that
 		 * would result in a page fault on a later access.
 		 */
 		vm_page_undirty(m);
 	else if (advice != MADV_DONTNEED) {
 		if (advice == MADV_WILLNEED)
 			vm_page_activate(m);
 		return;
 	}
 
 	if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
 		vm_page_dirty(m);
 
 	/*
 	 * Clear any references to the page.  Otherwise, the page daemon will
 	 * immediately reactivate the page.
 	 */
 	vm_page_aflag_clear(m, PGA_REFERENCED);
 
 	/*
 	 * Place clean pages near the head of the inactive queue rather than
 	 * the tail, thus defeating the queue's LRU operation and ensuring that
 	 * the page will be reused quickly.  Dirty pages not already in the
 	 * laundry are moved there.
 	 */
 	if (m->dirty == 0)
 		vm_page_deactivate_noreuse(m);
 	else if (!vm_page_in_laundry(m))
 		vm_page_launder(m);
 }
 
 /*
  *	vm_page_grab_release
  *
  *	Helper routine for grab functions to release busy on return.
  */
 static inline void
 vm_page_grab_release(vm_page_t m, int allocflags)
 {
 
 	if ((allocflags & VM_ALLOC_NOBUSY) != 0) {
 		if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
 			vm_page_sunbusy(m);
 		else
 			vm_page_xunbusy(m);
 	}
 }
 
 /*
  *	vm_page_grab_sleep
  *
  *	Sleep for busy according to VM_ALLOC_ parameters.  Returns true
  *	if the caller should retry and false otherwise.
  *
  *	If the object is locked on entry the object will be unlocked with
  *	false returns and still locked but possibly having been dropped
  *	with true returns.
  */
 static bool
 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex,
     const char *wmesg, int allocflags, bool locked)
 {
 
 	if ((allocflags & VM_ALLOC_NOWAIT) != 0)
 		return (false);
 
 	/*
 	 * Reference the page before unlocking and sleeping so that
 	 * the page daemon is less likely to reclaim it.
 	 */
 	if (locked && (allocflags & VM_ALLOC_NOCREAT) == 0)
 		vm_page_reference(m);
 
 	if (_vm_page_busy_sleep(object, m, pindex, wmesg, allocflags, locked) &&
 	    locked)
 		VM_OBJECT_WLOCK(object);
 	if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
 		return (false);
 
 	return (true);
 }
 
 /*
  * Assert that the grab flags are valid.
  */
 static inline void
 vm_page_grab_check(int allocflags)
 {
 
 	KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 ||
 	    (allocflags & VM_ALLOC_WIRED) != 0,
 	    ("vm_page_grab*: the pages must be busied or wired"));
 
 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
 	    ("vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
 }
 
 /*
  * Calculate the page allocation flags for grab.
  */
 static inline int
 vm_page_grab_pflags(int allocflags)
 {
 	int pflags;
 
 	pflags = allocflags &
 	    ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL |
 	    VM_ALLOC_NOBUSY | VM_ALLOC_IGN_SBUSY);
 	if ((allocflags & VM_ALLOC_NOWAIT) == 0)
 		pflags |= VM_ALLOC_WAITFAIL;
 	if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
 		pflags |= VM_ALLOC_SBUSY;
 
 	return (pflags);
 }
 
 /*
  * Grab a page, waiting until we are waken up due to the page
  * changing state.  We keep on waiting, if the page continues
  * to be in the object.  If the page doesn't exist, first allocate it
  * and then conditionally zero it.
  *
  * This routine may sleep.
  *
  * The object must be locked on entry.  The lock will, however, be released
  * and reacquired if the routine sleeps.
  */
 vm_page_t
 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
 {
 	vm_page_t m;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	vm_page_grab_check(allocflags);
 
 retrylookup:
 	if ((m = vm_page_lookup(object, pindex)) != NULL) {
 		if (!vm_page_tryacquire(m, allocflags)) {
 			if (vm_page_grab_sleep(object, m, pindex, "pgrbwt",
 			    allocflags, true))
 				goto retrylookup;
 			return (NULL);
 		}
 		goto out;
 	}
 	if ((allocflags & VM_ALLOC_NOCREAT) != 0)
 		return (NULL);
 	m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags));
 	if (m == NULL) {
 		if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0)
 			return (NULL);
 		goto retrylookup;
 	}
 	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
 		pmap_zero_page(m);
 
 out:
 	vm_page_grab_release(m, allocflags);
 
 	return (m);
 }
 
 /*
  * Locklessly attempt to acquire a page given a (object, pindex) tuple
  * and an optional previous page to avoid the radix lookup.  The resulting
  * page will be validated against the identity tuple and busied or wired
  * as requested.  A NULL *mp return guarantees that the page was not in
  * radix at the time of the call but callers must perform higher level
  * synchronization or retry the operation under a lock if they require
  * an atomic answer.  This is the only lock free validation routine,
  * other routines can depend on the resulting page state.
  *
  * The return value indicates whether the operation failed due to caller
  * flags.  The return is tri-state with mp:
  *
  * (true, *mp != NULL) - The operation was successful.
  * (true, *mp == NULL) - The page was not found in tree.
  * (false, *mp == NULL) - WAITFAIL or NOWAIT prevented acquisition.
  */
 static bool
 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex,
     vm_page_t prev, vm_page_t *mp, int allocflags)
 {
 	vm_page_t m;
 
 	vm_page_grab_check(allocflags);
 	MPASS(prev == NULL || vm_page_busied(prev) || vm_page_wired(prev));
 
 	*mp = NULL;
 	for (;;) {
 		/*
 		 * We may see a false NULL here because the previous page
 		 * has been removed or just inserted and the list is loaded
 		 * without barriers.  Switch to radix to verify.
 		 */
 		if (prev == NULL || (m = TAILQ_NEXT(prev, listq)) == NULL ||
 		    QMD_IS_TRASHED(m) || m->pindex != pindex ||
 		    atomic_load_ptr(&m->object) != object) {
 			prev = NULL;
 			/*
 			 * This guarantees the result is instantaneously
 			 * correct.
 			 */
 			m = vm_radix_lookup_unlocked(&object->rtree, pindex);
 		}
 		if (m == NULL)
 			return (true);
 		if (vm_page_trybusy(m, allocflags)) {
 			if (m->object == object && m->pindex == pindex)
 				break;
 			/* relookup. */
 			vm_page_busy_release(m);
 			cpu_spinwait();
 			continue;
 		}
 		if (!vm_page_grab_sleep(object, m, pindex, "pgnslp",
 		    allocflags, false))
 			return (false);
 	}
 	if ((allocflags & VM_ALLOC_WIRED) != 0)
 		vm_page_wire(m);
 	vm_page_grab_release(m, allocflags);
 	*mp = m;
 	return (true);
 }
 
 /*
  * Try to locklessly grab a page and fall back to the object lock if NOCREAT
  * is not set.
  */
 vm_page_t
 vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags)
 {
 	vm_page_t m;
 
 	vm_page_grab_check(allocflags);
 
 	if (!vm_page_acquire_unlocked(object, pindex, NULL, &m, allocflags))
 		return (NULL);
 	if (m != NULL)
 		return (m);
 
 	/*
 	 * The radix lockless lookup should never return a false negative
 	 * errors.  If the user specifies NOCREAT they are guaranteed there
 	 * was no page present at the instant of the call.  A NOCREAT caller
 	 * must handle create races gracefully.
 	 */
 	if ((allocflags & VM_ALLOC_NOCREAT) != 0)
 		return (NULL);
 
 	VM_OBJECT_WLOCK(object);
 	m = vm_page_grab(object, pindex, allocflags);
 	VM_OBJECT_WUNLOCK(object);
 
 	return (m);
 }
 
 /*
  * Grab a page and make it valid, paging in if necessary.  Pages missing from
  * their pager are zero filled and validated.  If a VM_ALLOC_COUNT is supplied
  * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought
  * in simultaneously.  Additional pages will be left on a paging queue but
  * will neither be wired nor busy regardless of allocflags.
  */
 int
 vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
 {
 	vm_page_t m;
 	vm_page_t ma[VM_INITIAL_PAGEIN];
 	int after, i, pflags, rv;
 
 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
 	    ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
 	KASSERT((allocflags &
 	    (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0,
 	    ("vm_page_grab_valid: Invalid flags 0x%X", allocflags));
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY |
 	    VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY);
 	pflags |= VM_ALLOC_WAITFAIL;
 
 retrylookup:
 	if ((m = vm_page_lookup(object, pindex)) != NULL) {
 		/*
 		 * If the page is fully valid it can only become invalid
 		 * with the object lock held.  If it is not valid it can
 		 * become valid with the busy lock held.  Therefore, we
 		 * may unnecessarily lock the exclusive busy here if we
 		 * race with I/O completion not using the object lock.
 		 * However, we will not end up with an invalid page and a
 		 * shared lock.
 		 */
 		if (!vm_page_trybusy(m,
 		    vm_page_all_valid(m) ? allocflags : 0)) {
 			(void)vm_page_grab_sleep(object, m, pindex, "pgrbwt",
 			    allocflags, true);
 			goto retrylookup;
 		}
 		if (vm_page_all_valid(m))
 			goto out;
 		if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
 			vm_page_busy_release(m);
 			*mp = NULL;
 			return (VM_PAGER_FAIL);
 		}
 	} else if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
 		*mp = NULL;
 		return (VM_PAGER_FAIL);
 	} else if ((m = vm_page_alloc(object, pindex, pflags)) == NULL) {
 		if (!vm_pager_can_alloc_page(object, pindex))
 			return (VM_PAGER_AGAIN);
 		goto retrylookup;
 	}
 
 	vm_page_assert_xbusied(m);
 	if (vm_pager_has_page(object, pindex, NULL, &after)) {
 		after = MIN(after, VM_INITIAL_PAGEIN);
 		after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT);
 		after = MAX(after, 1);
 		ma[0] = m;
 		for (i = 1; i < after; i++) {
 			if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
 				if (vm_page_any_valid(ma[i]) ||
 				    !vm_page_tryxbusy(ma[i]))
 					break;
 			} else {
 				ma[i] = vm_page_alloc(object, m->pindex + i,
 				    VM_ALLOC_NORMAL);
 				if (ma[i] == NULL)
 					break;
 			}
 		}
 		after = i;
 		vm_object_pip_add(object, after);
 		VM_OBJECT_WUNLOCK(object);
 		rv = vm_pager_get_pages(object, ma, after, NULL, NULL);
 		VM_OBJECT_WLOCK(object);
 		vm_object_pip_wakeupn(object, after);
 		/* Pager may have replaced a page. */
 		m = ma[0];
 		if (rv != VM_PAGER_OK) {
 			for (i = 0; i < after; i++) {
 				if (!vm_page_wired(ma[i]))
 					vm_page_free(ma[i]);
 				else
 					vm_page_xunbusy(ma[i]);
 			}
 			*mp = NULL;
 			return (rv);
 		}
 		for (i = 1; i < after; i++)
 			vm_page_readahead_finish(ma[i]);
 		MPASS(vm_page_all_valid(m));
 	} else {
 		vm_page_zero_invalid(m, TRUE);
 	}
 out:
 	if ((allocflags & VM_ALLOC_WIRED) != 0)
 		vm_page_wire(m);
 	if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m))
 		vm_page_busy_downgrade(m);
 	else if ((allocflags & VM_ALLOC_NOBUSY) != 0)
 		vm_page_busy_release(m);
 	*mp = m;
 	return (VM_PAGER_OK);
 }
 
 /*
  * Locklessly grab a valid page.  If the page is not valid or not yet
  * allocated this will fall back to the object lock method.
  */
 int
 vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
     vm_pindex_t pindex, int allocflags)
 {
 	vm_page_t m;
 	int flags;
 	int error;
 
 	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
 	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
 	    ("vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY "
 	    "mismatch"));
 	KASSERT((allocflags &
 	    (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0,
 	    ("vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags));
 
 	/*
 	 * Attempt a lockless lookup and busy.  We need at least an sbusy
 	 * before we can inspect the valid field and return a wired page.
 	 */
 	flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
 	if (!vm_page_acquire_unlocked(object, pindex, NULL, mp, flags))
 		return (VM_PAGER_FAIL);
 	if ((m = *mp) != NULL) {
 		if (vm_page_all_valid(m)) {
 			if ((allocflags & VM_ALLOC_WIRED) != 0)
 				vm_page_wire(m);
 			vm_page_grab_release(m, allocflags);
 			return (VM_PAGER_OK);
 		}
 		vm_page_busy_release(m);
 	}
 	if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
 		*mp = NULL;
 		return (VM_PAGER_FAIL);
 	}
 	VM_OBJECT_WLOCK(object);
 	error = vm_page_grab_valid(mp, object, pindex, allocflags);
 	VM_OBJECT_WUNLOCK(object);
 
 	return (error);
 }
 
 /*
  * Return the specified range of pages from the given object.  For each
  * page offset within the range, if a page already exists within the object
  * at that offset and it is busy, then wait for it to change state.  If,
  * instead, the page doesn't exist, then allocate it.
  *
  * The caller must always specify an allocation class.
  *
  * allocation classes:
  *	VM_ALLOC_NORMAL		normal process request
  *	VM_ALLOC_SYSTEM		system *really* needs the pages
  *
  * The caller must always specify that the pages are to be busied and/or
  * wired.
  *
  * optional allocation flags:
  *	VM_ALLOC_IGN_SBUSY	do not sleep on soft busy pages
  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
  *	VM_ALLOC_NOWAIT		do not sleep
  *	VM_ALLOC_SBUSY		set page to sbusy state
  *	VM_ALLOC_WIRED		wire the pages
  *	VM_ALLOC_ZERO		zero and validate any invalid pages
  *
  * If VM_ALLOC_NOWAIT is not specified, this routine may sleep.  Otherwise, it
  * may return a partial prefix of the requested range.
  */
 int
 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
     vm_page_t *ma, int count)
 {
 	vm_page_t m, mpred;
 	int pflags;
 	int i;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0,
 	    ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
 	KASSERT(count > 0,
 	    ("vm_page_grab_pages: invalid page count %d", count));
 	vm_page_grab_check(allocflags);
 
 	pflags = vm_page_grab_pflags(allocflags);
 	i = 0;
 retrylookup:
 	m = vm_radix_lookup_le(&object->rtree, pindex + i);
 	if (m == NULL || m->pindex != pindex + i) {
 		mpred = m;
 		m = NULL;
 	} else
 		mpred = TAILQ_PREV(m, pglist, listq);
 	for (; i < count; i++) {
 		if (m != NULL) {
 			if (!vm_page_tryacquire(m, allocflags)) {
 				if (vm_page_grab_sleep(object, m, pindex + i,
 				    "grbmaw", allocflags, true))
 					goto retrylookup;
 				break;
 			}
 		} else {
 			if ((allocflags & VM_ALLOC_NOCREAT) != 0)
 				break;
 			m = vm_page_alloc_after(object, pindex + i,
 			    pflags | VM_ALLOC_COUNT(count - i), mpred);
 			if (m == NULL) {
 				if ((allocflags & (VM_ALLOC_NOWAIT |
 				    VM_ALLOC_WAITFAIL)) != 0)
 					break;
 				goto retrylookup;
 			}
 		}
 		if (vm_page_none_valid(m) &&
 		    (allocflags & VM_ALLOC_ZERO) != 0) {
 			if ((m->flags & PG_ZERO) == 0)
 				pmap_zero_page(m);
 			vm_page_valid(m);
 		}
 		vm_page_grab_release(m, allocflags);
 		ma[i] = mpred = m;
 		m = vm_page_next(m);
 	}
 	return (i);
 }
 
 /*
  * Unlocked variant of vm_page_grab_pages().  This accepts the same flags
  * and will fall back to the locked variant to handle allocation.
  */
 int
 vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
     int allocflags, vm_page_t *ma, int count)
 {
 	vm_page_t m, pred;
 	int flags;
 	int i;
 
 	KASSERT(count > 0,
 	    ("vm_page_grab_pages_unlocked: invalid page count %d", count));
 	vm_page_grab_check(allocflags);
 
 	/*
 	 * Modify flags for lockless acquire to hold the page until we
 	 * set it valid if necessary.
 	 */
 	flags = allocflags & ~VM_ALLOC_NOBUSY;
 	pred = NULL;
 	for (i = 0; i < count; i++, pindex++) {
 		if (!vm_page_acquire_unlocked(object, pindex, pred, &m, flags))
 			return (i);
 		if (m == NULL)
 			break;
 		if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) {
 			if ((m->flags & PG_ZERO) == 0)
 				pmap_zero_page(m);
 			vm_page_valid(m);
 		}
 		/* m will still be wired or busy according to flags. */
 		vm_page_grab_release(m, allocflags);
 		pred = ma[i] = m;
 	}
 	if (i == count || (allocflags & VM_ALLOC_NOCREAT) != 0)
 		return (i);
 	count -= i;
 	VM_OBJECT_WLOCK(object);
 	i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count);
 	VM_OBJECT_WUNLOCK(object);
 
 	return (i);
 }
 
 /*
  * Mapping function for valid or dirty bits in a page.
  *
  * Inputs are required to range within a page.
  */
 vm_page_bits_t
 vm_page_bits(int base, int size)
 {
 	int first_bit;
 	int last_bit;
 
 	KASSERT(
 	    base + size <= PAGE_SIZE,
 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
 	);
 
 	if (size == 0)		/* handle degenerate case */
 		return (0);
 
 	first_bit = base >> DEV_BSHIFT;
 	last_bit = (base + size - 1) >> DEV_BSHIFT;
 
 	return (((vm_page_bits_t)2 << last_bit) -
 	    ((vm_page_bits_t)1 << first_bit));
 }
 
 void
 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set)
 {
 
 #if PAGE_SIZE == 32768
 	atomic_set_64((uint64_t *)bits, set);
 #elif PAGE_SIZE == 16384
 	atomic_set_32((uint32_t *)bits, set);
 #elif (PAGE_SIZE == 8192) && defined(atomic_set_16)
 	atomic_set_16((uint16_t *)bits, set);
 #elif (PAGE_SIZE == 4096) && defined(atomic_set_8)
 	atomic_set_8((uint8_t *)bits, set);
 #else		/* PAGE_SIZE <= 8192 */
 	uintptr_t addr;
 	int shift;
 
 	addr = (uintptr_t)bits;
 	/*
 	 * Use a trick to perform a 32-bit atomic on the
 	 * containing aligned word, to not depend on the existence
 	 * of atomic_{set, clear}_{8, 16}.
 	 */
 	shift = addr & (sizeof(uint32_t) - 1);
 #if BYTE_ORDER == BIG_ENDIAN
 	shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
 #else
 	shift *= NBBY;
 #endif
 	addr &= ~(sizeof(uint32_t) - 1);
 	atomic_set_32((uint32_t *)addr, set << shift);
 #endif		/* PAGE_SIZE */
 }
 
 static inline void
 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear)
 {
 
 #if PAGE_SIZE == 32768
 	atomic_clear_64((uint64_t *)bits, clear);
 #elif PAGE_SIZE == 16384
 	atomic_clear_32((uint32_t *)bits, clear);
 #elif (PAGE_SIZE == 8192) && defined(atomic_clear_16)
 	atomic_clear_16((uint16_t *)bits, clear);
 #elif (PAGE_SIZE == 4096) && defined(atomic_clear_8)
 	atomic_clear_8((uint8_t *)bits, clear);
 #else		/* PAGE_SIZE <= 8192 */
 	uintptr_t addr;
 	int shift;
 
 	addr = (uintptr_t)bits;
 	/*
 	 * Use a trick to perform a 32-bit atomic on the
 	 * containing aligned word, to not depend on the existence
 	 * of atomic_{set, clear}_{8, 16}.
 	 */
 	shift = addr & (sizeof(uint32_t) - 1);
 #if BYTE_ORDER == BIG_ENDIAN
 	shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
 #else
 	shift *= NBBY;
 #endif
 	addr &= ~(sizeof(uint32_t) - 1);
 	atomic_clear_32((uint32_t *)addr, clear << shift);
 #endif		/* PAGE_SIZE */
 }
 
 static inline vm_page_bits_t
 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits)
 {
 #if PAGE_SIZE == 32768
 	uint64_t old;
 
 	old = *bits;
 	while (atomic_fcmpset_64(bits, &old, newbits) == 0);
 	return (old);
 #elif PAGE_SIZE == 16384
 	uint32_t old;
 
 	old = *bits;
 	while (atomic_fcmpset_32(bits, &old, newbits) == 0);
 	return (old);
 #elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16)
 	uint16_t old;
 
 	old = *bits;
 	while (atomic_fcmpset_16(bits, &old, newbits) == 0);
 	return (old);
 #elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8)
 	uint8_t old;
 
 	old = *bits;
 	while (atomic_fcmpset_8(bits, &old, newbits) == 0);
 	return (old);
 #else		/* PAGE_SIZE <= 4096*/
 	uintptr_t addr;
 	uint32_t old, new, mask;
 	int shift;
 
 	addr = (uintptr_t)bits;
 	/*
 	 * Use a trick to perform a 32-bit atomic on the
 	 * containing aligned word, to not depend on the existence
 	 * of atomic_{set, swap, clear}_{8, 16}.
 	 */
 	shift = addr & (sizeof(uint32_t) - 1);
 #if BYTE_ORDER == BIG_ENDIAN
 	shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
 #else
 	shift *= NBBY;
 #endif
 	addr &= ~(sizeof(uint32_t) - 1);
 	mask = VM_PAGE_BITS_ALL << shift;
 
 	old = *bits;
 	do {
 		new = old & ~mask;
 		new |= newbits << shift;
 	} while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0);
 	return (old >> shift);
 #endif		/* PAGE_SIZE */
 }
 
 /*
  *	vm_page_set_valid_range:
  *
  *	Sets portions of a page valid.  The arguments are expected
  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
  *	of any partial chunks touched by the range.  The invalid portion of
  *	such chunks will be zeroed.
  *
  *	(base + size) must be less then or equal to PAGE_SIZE.
  */
 void
 vm_page_set_valid_range(vm_page_t m, int base, int size)
 {
 	int endoff, frag;
 	vm_page_bits_t pagebits;
 
 	vm_page_assert_busied(m);
 	if (size == 0)	/* handle degenerate case */
 		return;
 
 	/*
 	 * If the base is not DEV_BSIZE aligned and the valid
 	 * bit is clear, we have to zero out a portion of the
 	 * first block.
 	 */
 	if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
 		pmap_zero_page_area(m, frag, base - frag);
 
 	/*
 	 * If the ending offset is not DEV_BSIZE aligned and the
 	 * valid bit is clear, we have to zero out a portion of
 	 * the last block.
 	 */
 	endoff = base + size;
 	if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
 		pmap_zero_page_area(m, endoff,
 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
 
 	/*
 	 * Assert that no previously invalid block that is now being validated
 	 * is already dirty.
 	 */
 	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
 	    ("vm_page_set_valid_range: page %p is dirty", m));
 
 	/*
 	 * Set valid bits inclusive of any overlap.
 	 */
 	pagebits = vm_page_bits(base, size);
 	if (vm_page_xbusied(m))
 		m->valid |= pagebits;
 	else
 		vm_page_bits_set(m, &m->valid, pagebits);
 }
 
 /*
  * Set the page dirty bits and free the invalid swap space if
  * present.  Returns the previous dirty bits.
  */
 vm_page_bits_t
 vm_page_set_dirty(vm_page_t m)
 {
 	vm_page_bits_t old;
 
 	VM_PAGE_OBJECT_BUSY_ASSERT(m);
 
 	if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) {
 		old = m->dirty;
 		m->dirty = VM_PAGE_BITS_ALL;
 	} else
 		old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL);
 	if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0)
 		vm_pager_page_unswapped(m);
 
 	return (old);
 }
 
 /*
  * Clear the given bits from the specified page's dirty field.
  */
 static __inline void
 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
 {
 
 	vm_page_assert_busied(m);
 
 	/*
 	 * If the page is xbusied and not write mapped we are the
 	 * only thread that can modify dirty bits.  Otherwise, The pmap
 	 * layer can call vm_page_dirty() without holding a distinguished
 	 * lock.  The combination of page busy and atomic operations
 	 * suffice to guarantee consistency of the page dirty field.
 	 */
 	if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
 		m->dirty &= ~pagebits;
 	else
 		vm_page_bits_clear(m, &m->dirty, pagebits);
 }
 
 /*
  *	vm_page_set_validclean:
  *
  *	Sets portions of a page valid and clean.  The arguments are expected
  *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
  *	of any partial chunks touched by the range.  The invalid portion of
  *	such chunks will be zero'd.
  *
  *	(base + size) must be less then or equal to PAGE_SIZE.
  */
 void
 vm_page_set_validclean(vm_page_t m, int base, int size)
 {
 	vm_page_bits_t oldvalid, pagebits;
 	int endoff, frag;
 
 	vm_page_assert_busied(m);
 	if (size == 0)	/* handle degenerate case */
 		return;
 
 	/*
 	 * If the base is not DEV_BSIZE aligned and the valid
 	 * bit is clear, we have to zero out a portion of the
 	 * first block.
 	 */
 	if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
 	    (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
 		pmap_zero_page_area(m, frag, base - frag);
 
 	/*
 	 * If the ending offset is not DEV_BSIZE aligned and the
 	 * valid bit is clear, we have to zero out a portion of
 	 * the last block.
 	 */
 	endoff = base + size;
 	if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
 	    (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
 		pmap_zero_page_area(m, endoff,
 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
 
 	/*
 	 * Set valid, clear dirty bits.  If validating the entire
 	 * page we can safely clear the pmap modify bit.  We also
 	 * use this opportunity to clear the PGA_NOSYNC flag.  If a process
 	 * takes a write fault on a MAP_NOSYNC memory area the flag will
 	 * be set again.
 	 *
 	 * We set valid bits inclusive of any overlap, but we can only
 	 * clear dirty bits for DEV_BSIZE chunks that are fully within
 	 * the range.
 	 */
 	oldvalid = m->valid;
 	pagebits = vm_page_bits(base, size);
 	if (vm_page_xbusied(m))
 		m->valid |= pagebits;
 	else
 		vm_page_bits_set(m, &m->valid, pagebits);
 #if 0	/* NOT YET */
 	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
 		frag = DEV_BSIZE - frag;
 		base += frag;
 		size -= frag;
 		if (size < 0)
 			size = 0;
 	}
 	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
 #endif
 	if (base == 0 && size == PAGE_SIZE) {
 		/*
 		 * The page can only be modified within the pmap if it is
 		 * mapped, and it can only be mapped if it was previously
 		 * fully valid.
 		 */
 		if (oldvalid == VM_PAGE_BITS_ALL)
 			/*
 			 * Perform the pmap_clear_modify() first.  Otherwise,
 			 * a concurrent pmap operation, such as
 			 * pmap_protect(), could clear a modification in the
 			 * pmap and set the dirty field on the page before
 			 * pmap_clear_modify() had begun and after the dirty
 			 * field was cleared here.
 			 */
 			pmap_clear_modify(m);
 		m->dirty = 0;
 		vm_page_aflag_clear(m, PGA_NOSYNC);
 	} else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m))
 		m->dirty &= ~pagebits;
 	else
 		vm_page_clear_dirty_mask(m, pagebits);
 }
 
 void
 vm_page_clear_dirty(vm_page_t m, int base, int size)
 {
 
 	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
 }
 
 /*
  *	vm_page_set_invalid:
  *
  *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
  *	valid and dirty bits for the effected areas are cleared.
  */
 void
 vm_page_set_invalid(vm_page_t m, int base, int size)
 {
 	vm_page_bits_t bits;
 	vm_object_t object;
 
 	/*
 	 * The object lock is required so that pages can't be mapped
 	 * read-only while we're in the process of invalidating them.
 	 */
 	object = m->object;
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	vm_page_assert_busied(m);
 
 	if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
 	    size >= object->un_pager.vnp.vnp_size)
 		bits = VM_PAGE_BITS_ALL;
 	else
 		bits = vm_page_bits(base, size);
 	if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0)
 		pmap_remove_all(m);
 	KASSERT((bits == 0 && vm_page_all_valid(m)) ||
 	    !pmap_page_is_mapped(m),
 	    ("vm_page_set_invalid: page %p is mapped", m));
 	if (vm_page_xbusied(m)) {
 		m->valid &= ~bits;
 		m->dirty &= ~bits;
 	} else {
 		vm_page_bits_clear(m, &m->valid, bits);
 		vm_page_bits_clear(m, &m->dirty, bits);
 	}
 }
 
 /*
  *	vm_page_invalid:
  *
  *	Invalidates the entire page.  The page must be busy, unmapped, and
  *	the enclosing object must be locked.  The object locks protects
  *	against concurrent read-only pmap enter which is done without
  *	busy.
  */
 void
 vm_page_invalid(vm_page_t m)
 {
 
 	vm_page_assert_busied(m);
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
 	MPASS(!pmap_page_is_mapped(m));
 
 	if (vm_page_xbusied(m))
 		m->valid = 0;
 	else
 		vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL);
 }
 
 /*
  * vm_page_zero_invalid()
  *
  *	The kernel assumes that the invalid portions of a page contain
  *	garbage, but such pages can be mapped into memory by user code.
  *	When this occurs, we must zero out the non-valid portions of the
  *	page so user code sees what it expects.
  *
  *	Pages are most often semi-valid when the end of a file is mapped
  *	into memory and the file's size is not page aligned.
  */
 void
 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
 {
 	int b;
 	int i;
 
 	/*
 	 * Scan the valid bits looking for invalid sections that
 	 * must be zeroed.  Invalid sub-DEV_BSIZE'd areas ( where the
 	 * valid bit may be set ) have already been zeroed by
 	 * vm_page_set_validclean().
 	 */
 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
 		    (m->valid & ((vm_page_bits_t)1 << i))) {
 			if (i > b) {
 				pmap_zero_page_area(m,
 				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
 			}
 			b = i + 1;
 		}
 	}
 
 	/*
 	 * setvalid is TRUE when we can safely set the zero'd areas
 	 * as being valid.  We can do this if there are no cache consistency
 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
 	 */
 	if (setvalid)
 		vm_page_valid(m);
 }
 
 /*
  *	vm_page_is_valid:
  *
  *	Is (partial) page valid?  Note that the case where size == 0
  *	will return FALSE in the degenerate case where the page is
  *	entirely invalid, and TRUE otherwise.
  *
  *	Some callers envoke this routine without the busy lock held and
  *	handle races via higher level locks.  Typical callers should
  *	hold a busy lock to prevent invalidation.
  */
 int
 vm_page_is_valid(vm_page_t m, int base, int size)
 {
 	vm_page_bits_t bits;
 
 	bits = vm_page_bits(base, size);
 	return (vm_page_any_valid(m) && (m->valid & bits) == bits);
 }
 
 /*
  * Returns true if all of the specified predicates are true for the entire
  * (super)page and false otherwise.
  */
 bool
 vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m)
 {
 	vm_object_t object;
 	int i, npages;
 
 	object = m->object;
 	if (skip_m != NULL && skip_m->object != object)
 		return (false);
 	VM_OBJECT_ASSERT_LOCKED(object);
 	npages = atop(pagesizes[m->psind]);
 
 	/*
 	 * The physically contiguous pages that make up a superpage, i.e., a
 	 * page with a page size index ("psind") greater than zero, will
 	 * occupy adjacent entries in vm_page_array[].
 	 */
 	for (i = 0; i < npages; i++) {
 		/* Always test object consistency, including "skip_m". */
 		if (m[i].object != object)
 			return (false);
 		if (&m[i] == skip_m)
 			continue;
 		if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i]))
 			return (false);
 		if ((flags & PS_ALL_DIRTY) != 0) {
 			/*
 			 * Calling vm_page_test_dirty() or pmap_is_modified()
 			 * might stop this case from spuriously returning
 			 * "false".  However, that would require a write lock
 			 * on the object containing "m[i]".
 			 */
 			if (m[i].dirty != VM_PAGE_BITS_ALL)
 				return (false);
 		}
 		if ((flags & PS_ALL_VALID) != 0 &&
 		    m[i].valid != VM_PAGE_BITS_ALL)
 			return (false);
 	}
 	return (true);
 }
 
 /*
  * Set the page's dirty bits if the page is modified.
  */
 void
 vm_page_test_dirty(vm_page_t m)
 {
 
 	vm_page_assert_busied(m);
 	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
 		vm_page_dirty(m);
 }
 
 void
 vm_page_valid(vm_page_t m)
 {
 
 	vm_page_assert_busied(m);
 	if (vm_page_xbusied(m))
 		m->valid = VM_PAGE_BITS_ALL;
 	else
 		vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL);
 }
 
 void
 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
 {
 
 	mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
 }
 
 void
 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
 {
 
 	mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
 }
 
 int
 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
 {
 
 	return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
 }
 
 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
 void
 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
 {
 
 	vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
 }
 
 void
 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
 {
 
 	mtx_assert_(vm_page_lockptr(m), a, file, line);
 }
 #endif
 
 #ifdef INVARIANTS
 void
 vm_page_object_busy_assert(vm_page_t m)
 {
 
 	/*
 	 * Certain of the page's fields may only be modified by the
 	 * holder of a page or object busy.
 	 */
 	if (m->object != NULL && !vm_page_busied(m))
 		VM_OBJECT_ASSERT_BUSY(m->object);
 }
 
 void
 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits)
 {
 
 	if ((bits & PGA_WRITEABLE) == 0)
 		return;
 
 	/*
 	 * The PGA_WRITEABLE flag can only be set if the page is
 	 * managed, is exclusively busied or the object is locked.
 	 * Currently, this flag is only set by pmap_enter().
 	 */
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("PGA_WRITEABLE on unmanaged page"));
 	if (!vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_BUSY(m->object);
 }
 #endif
 
 #include "opt_ddb.h"
 #ifdef DDB
 #include <sys/kernel.h>
 
 #include <ddb/ddb.h>
 
 DB_SHOW_COMMAND_FLAGS(page, vm_page_print_page_info, DB_CMD_MEMSAFE)
 {
 
 	db_printf("vm_cnt.v_free_count: %d\n", vm_free_count());
 	db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count());
 	db_printf("vm_cnt.v_active_count: %d\n", vm_active_count());
 	db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count());
 	db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count());
 	db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved);
 	db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min);
 	db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);
 	db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target);
 }
 
 DB_SHOW_COMMAND_FLAGS(pageq, vm_page_print_pageq_info, DB_CMD_MEMSAFE)
 {
 	int dom;
 
 	db_printf("pq_free %d\n", vm_free_count());
 	for (dom = 0; dom < vm_ndomains; dom++) {
 		db_printf(
     "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n",
 		    dom,
 		    vm_dom[dom].vmd_page_count,
 		    vm_dom[dom].vmd_free_count,
 		    vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
 		    vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
 		    vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt,
 		    vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt);
 	}
 }
 
 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
 {
 	vm_page_t m;
 	boolean_t phys, virt;
 
 	if (!have_addr) {
 		db_printf("show pginfo addr\n");
 		return;
 	}
 
 	phys = strchr(modif, 'p') != NULL;
 	virt = strchr(modif, 'v') != NULL;
 	if (virt)
 		m = PHYS_TO_VM_PAGE(pmap_kextract(addr));
 	else if (phys)
 		m = PHYS_TO_VM_PAGE(addr);
 	else
 		m = (vm_page_t)addr;
 	db_printf(
     "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref 0x%x\n"
     "  af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
 	    m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
 	    m->a.queue, m->ref_count, m->a.flags, m->oflags,
 	    m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty);
 }
 #endif /* DDB */
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index bb12a7e335d5..2945b53835c6 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1,2419 +1,2419 @@
 /*-
  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
  *
  * Copyright (c) 1991 Regents of the University of California.
  * All rights reserved.
  * Copyright (c) 1994 John S. Dyson
  * All rights reserved.
  * Copyright (c) 1994 David Greenman
  * All rights reserved.
  * Copyright (c) 2005 Yahoo! Technologies Norway AS
  * All rights reserved.
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. All advertising materials mentioning features or use of this software
  *    must display the following acknowledgement:
  *	This product includes software developed by the University of
  *	California, Berkeley and its contributors.
  * 4. Neither the name of the University nor the names of its contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  * All rights reserved.
  *
  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
  *
  * Permission to use, copy, modify and distribute this software and
  * its documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
  *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  *
  * Carnegie Mellon requests users of this software to return to
  *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
  *
  * any improvements or extensions that they make and grant Carnegie the
  * rights to redistribute these changes.
  */
 
 /*
  *	The proverbial page-out daemon.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include "opt_vm.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
 #include <sys/blockcount.h>
 #include <sys/eventhandler.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
 #include <sys/kthread.h>
 #include <sys/ktr.h>
 #include <sys/mount.h>
 #include <sys/racct.h>
 #include <sys/resourcevar.h>
 #include <sys/sched.h>
 #include <sys/sdt.h>
 #include <sys/signalvar.h>
 #include <sys/smp.h>
 #include <sys/time.h>
 #include <sys/vnode.h>
 #include <sys/vmmeter.h>
 #include <sys/rwlock.h>
 #include <sys/sx.h>
 #include <sys/sysctl.h>
 
 #include <vm/vm.h>
 #include <vm/vm_param.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
 #include <vm/vm_map.h>
 #include <vm/vm_pageout.h>
 #include <vm/vm_pager.h>
 #include <vm/vm_phys.h>
 #include <vm/vm_pagequeue.h>
 #include <vm/swap_pager.h>
 #include <vm/vm_extern.h>
 #include <vm/uma.h>
 
 /*
  * System initialization
  */
 
 /* the kernel process "vm_pageout"*/
 static void vm_pageout(void);
 static void vm_pageout_init(void);
 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
 static int vm_pageout_cluster(vm_page_t m);
 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
     int starting_page_shortage);
 
 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
     NULL);
 
 struct proc *pageproc;
 
 static struct kproc_desc page_kp = {
 	"pagedaemon",
 	vm_pageout,
 	&pageproc
 };
 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
     &page_kp);
 
 SDT_PROVIDER_DEFINE(vm);
 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
 
 /* Pagedaemon activity rates, in subdivisions of one second. */
 #define	VM_LAUNDER_RATE		10
 #define	VM_INACT_SCAN_RATE	10
 
 static int swapdev_enabled;
 int vm_pageout_page_count = 32;
 
 static int vm_panic_on_oom = 0;
 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
     CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
     "Panic on the given number of out-of-memory errors instead of "
     "killing the largest process");
 
 static int vm_pageout_update_period;
 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
     CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
     "Maximum active LRU update period");
 
 static int pageout_cpus_per_thread = 16;
 SYSCTL_INT(_vm, OID_AUTO, pageout_cpus_per_thread, CTLFLAG_RDTUN,
     &pageout_cpus_per_thread, 0,
     "Number of CPUs per pagedaemon worker thread");
   
 static int lowmem_period = 10;
 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
     "Low memory callback period");
 
 static int disable_swap_pageouts;
 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
     CTLFLAG_RWTUN, &disable_swap_pageouts, 0,
     "Disallow swapout of dirty pages");
 
 static int pageout_lock_miss;
 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
     CTLFLAG_RD, &pageout_lock_miss, 0,
     "vget() lock misses during pageout");
 
 static int vm_pageout_oom_seq = 12;
 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
     CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
     "back-to-back calls to oom detector to start OOM");
 
 static int act_scan_laundry_weight = 3;
 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
     &act_scan_laundry_weight, 0,
     "weight given to clean vs. dirty pages in active queue scans");
 
 static u_int vm_background_launder_rate = 4096;
 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
     &vm_background_launder_rate, 0,
     "background laundering rate, in kilobytes per second");
 
 static u_int vm_background_launder_max = 20 * 1024;
 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
     &vm_background_launder_max, 0,
     "background laundering cap, in kilobytes");
 
 u_long vm_page_max_user_wired;
 SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW,
     &vm_page_max_user_wired, 0,
     "system-wide limit to user-wired page count");
 
 static u_int isqrt(u_int num);
 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
     bool in_shortfall);
 static void vm_pageout_laundry_worker(void *arg);
 
 struct scan_state {
 	struct vm_batchqueue bq;
 	struct vm_pagequeue *pq;
 	vm_page_t	marker;
 	int		maxscan;
 	int		scanned;
 };
 
 static void
 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
     vm_page_t marker, vm_page_t after, int maxscan)
 {
 
 	vm_pagequeue_assert_locked(pq);
 	KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
 	    ("marker %p already enqueued", marker));
 
 	if (after == NULL)
 		TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
 	else
 		TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
 	vm_page_aflag_set(marker, PGA_ENQUEUED);
 
 	vm_batchqueue_init(&ss->bq);
 	ss->pq = pq;
 	ss->marker = marker;
 	ss->maxscan = maxscan;
 	ss->scanned = 0;
 	vm_pagequeue_unlock(pq);
 }
 
 static void
 vm_pageout_end_scan(struct scan_state *ss)
 {
 	struct vm_pagequeue *pq;
 
 	pq = ss->pq;
 	vm_pagequeue_assert_locked(pq);
 	KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
 	    ("marker %p not enqueued", ss->marker));
 
 	TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
 	vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
 	pq->pq_pdpages += ss->scanned;
 }
 
 /*
  * Add a small number of queued pages to a batch queue for later processing
  * without the corresponding queue lock held.  The caller must have enqueued a
  * marker page at the desired start point for the scan.  Pages will be
  * physically dequeued if the caller so requests.  Otherwise, the returned
  * batch may contain marker pages, and it is up to the caller to handle them.
  *
  * When processing the batch queue, vm_pageout_defer() must be used to
  * determine whether the page has been logically dequeued since the batch was
  * collected.
  */
 static __always_inline void
 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
 {
 	struct vm_pagequeue *pq;
 	vm_page_t m, marker, n;
 
 	marker = ss->marker;
 	pq = ss->pq;
 
 	KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
 	    ("marker %p not enqueued", ss->marker));
 
 	vm_pagequeue_lock(pq);
 	for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
 	    ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
 	    m = n, ss->scanned++) {
 		n = TAILQ_NEXT(m, plinks.q);
 		if ((m->flags & PG_MARKER) == 0) {
 			KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
 			    ("page %p not enqueued", m));
 			KASSERT((m->flags & PG_FICTITIOUS) == 0,
 			    ("Fictitious page %p cannot be in page queue", m));
 			KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 			    ("Unmanaged page %p cannot be in page queue", m));
 		} else if (dequeue)
 			continue;
 
 		(void)vm_batchqueue_insert(&ss->bq, m);
 		if (dequeue) {
 			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 			vm_page_aflag_clear(m, PGA_ENQUEUED);
 		}
 	}
 	TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
 	if (__predict_true(m != NULL))
 		TAILQ_INSERT_BEFORE(m, marker, plinks.q);
 	else
 		TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
 	if (dequeue)
 		vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
 	vm_pagequeue_unlock(pq);
 }
 
 /*
  * Return the next page to be scanned, or NULL if the scan is complete.
  */
 static __always_inline vm_page_t
 vm_pageout_next(struct scan_state *ss, const bool dequeue)
 {
 
 	if (ss->bq.bq_cnt == 0)
 		vm_pageout_collect_batch(ss, dequeue);
 	return (vm_batchqueue_pop(&ss->bq));
 }
 
 /*
  * Determine whether processing of a page should be deferred and ensure that any
  * outstanding queue operations are processed.
  */
 static __always_inline bool
 vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued)
 {
 	vm_page_astate_t as;
 
 	as = vm_page_astate_load(m);
 	if (__predict_false(as.queue != queue ||
 	    ((as.flags & PGA_ENQUEUED) != 0) != enqueued))
 		return (true);
 	if ((as.flags & PGA_QUEUE_OP_MASK) != 0) {
 		vm_page_pqbatch_submit(m, queue);
 		return (true);
 	}
 	return (false);
 }
 
 /*
  * Scan for pages at adjacent offsets within the given page's object that are
  * eligible for laundering, form a cluster of these pages and the given page,
  * and launder that cluster.
  */
 static int
 vm_pageout_cluster(vm_page_t m)
 {
 	vm_object_t object;
 	vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
 	vm_pindex_t pindex;
 	int ib, is, page_base, pageout_count;
 
 	object = m->object;
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	pindex = m->pindex;
 
 	vm_page_assert_xbusied(m);
 
 	mc[vm_pageout_page_count] = pb = ps = m;
 	pageout_count = 1;
 	page_base = vm_pageout_page_count;
 	ib = 1;
 	is = 1;
 
 	/*
 	 * We can cluster only if the page is not clean, busy, or held, and
 	 * the page is in the laundry queue.
 	 *
 	 * During heavy mmap/modification loads the pageout
 	 * daemon can really fragment the underlying file
 	 * due to flushing pages out of order and not trying to
 	 * align the clusters (which leaves sporadic out-of-order
 	 * holes).  To solve this problem we do the reverse scan
 	 * first and attempt to align our cluster, then do a 
 	 * forward scan if room remains.
 	 */
 more:
 	while (ib != 0 && pageout_count < vm_pageout_page_count) {
 		if (ib > pindex) {
 			ib = 0;
 			break;
 		}
 		if ((p = vm_page_prev(pb)) == NULL ||
 		    vm_page_tryxbusy(p) == 0) {
 			ib = 0;
 			break;
 		}
 		if (vm_page_wired(p)) {
 			ib = 0;
 			vm_page_xunbusy(p);
 			break;
 		}
 		vm_page_test_dirty(p);
 		if (p->dirty == 0) {
 			ib = 0;
 			vm_page_xunbusy(p);
 			break;
 		}
 		if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
 			vm_page_xunbusy(p);
 			ib = 0;
 			break;
 		}
 		mc[--page_base] = pb = p;
 		++pageout_count;
 		++ib;
 
 		/*
 		 * We are at an alignment boundary.  Stop here, and switch
 		 * directions.  Do not clear ib.
 		 */
 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
 			break;
 	}
 	while (pageout_count < vm_pageout_page_count && 
 	    pindex + is < object->size) {
 		if ((p = vm_page_next(ps)) == NULL ||
 		    vm_page_tryxbusy(p) == 0)
 			break;
 		if (vm_page_wired(p)) {
 			vm_page_xunbusy(p);
 			break;
 		}
 		vm_page_test_dirty(p);
 		if (p->dirty == 0) {
 			vm_page_xunbusy(p);
 			break;
 		}
 		if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
 			vm_page_xunbusy(p);
 			break;
 		}
 		mc[page_base + pageout_count] = ps = p;
 		++pageout_count;
 		++is;
 	}
 
 	/*
 	 * If we exhausted our forward scan, continue with the reverse scan
 	 * when possible, even past an alignment boundary.  This catches
 	 * boundary conditions.
 	 */
 	if (ib != 0 && pageout_count < vm_pageout_page_count)
 		goto more;
 
 	return (vm_pageout_flush(&mc[page_base], pageout_count,
 	    VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
 }
 
 /*
  * vm_pageout_flush() - launder the given pages
  *
  *	The given pages are laundered.  Note that we setup for the start of
  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
  *	reference count all in here rather then in the parent.  If we want
  *	the parent to do more sophisticated things we may have to change
  *	the ordering.
  *
  *	Returned runlen is the count of pages between mreq and first
  *	page after mreq with status VM_PAGER_AGAIN.
  *	*eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
  *	for any page in runlen set.
  */
 int
 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
     boolean_t *eio)
 {
 	vm_object_t object = mc[0]->object;
 	int pageout_status[count];
 	int numpagedout = 0;
 	int i, runlen;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	/*
 	 * Initiate I/O.  Mark the pages shared busy and verify that they're
 	 * valid and read-only.
 	 *
 	 * We do not have to fixup the clean/dirty bits here... we can
 	 * allow the pager to do it after the I/O completes.
 	 *
 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
 	 * edge case with file fragments.
 	 */
 	for (i = 0; i < count; i++) {
 		KASSERT(vm_page_all_valid(mc[i]),
 		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
 			mc[i], i, count));
 		KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
 		    ("vm_pageout_flush: writeable page %p", mc[i]));
 		vm_page_busy_downgrade(mc[i]);
 	}
 	vm_object_pip_add(object, count);
 
 	vm_pager_put_pages(object, mc, count, flags, pageout_status);
 
 	runlen = count - mreq;
 	if (eio != NULL)
 		*eio = FALSE;
 	for (i = 0; i < count; i++) {
 		vm_page_t mt = mc[i];
 
 		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
 		    !pmap_page_is_write_mapped(mt),
 		    ("vm_pageout_flush: page %p is not write protected", mt));
 		switch (pageout_status[i]) {
 		case VM_PAGER_OK:
 			/*
 			 * The page may have moved since laundering started, in
 			 * which case it should be left alone.
 			 */
 			if (vm_page_in_laundry(mt))
 				vm_page_deactivate_noreuse(mt);
 			/* FALLTHROUGH */
 		case VM_PAGER_PEND:
 			numpagedout++;
 			break;
 		case VM_PAGER_BAD:
 			/*
 			 * The page is outside the object's range.  We pretend
 			 * that the page out worked and clean the page, so the
 			 * changes will be lost if the page is reclaimed by
 			 * the page daemon.
 			 */
 			vm_page_undirty(mt);
 			if (vm_page_in_laundry(mt))
 				vm_page_deactivate_noreuse(mt);
 			break;
 		case VM_PAGER_ERROR:
 		case VM_PAGER_FAIL:
 			/*
 			 * If the page couldn't be paged out to swap because the
 			 * pager wasn't able to find space, place the page in
 			 * the PQ_UNSWAPPABLE holding queue.  This is an
 			 * optimization that prevents the page daemon from
 			 * wasting CPU cycles on pages that cannot be reclaimed
 			 * because no swap device is configured.
 			 *
 			 * Otherwise, reactivate the page so that it doesn't
 			 * clog the laundry and inactive queues.  (We will try
 			 * paging it out again later.)
 			 */
 			if ((object->flags & OBJ_SWAP) != 0 &&
 			    pageout_status[i] == VM_PAGER_FAIL) {
 				vm_page_unswappable(mt);
 				numpagedout++;
 			} else
 				vm_page_activate(mt);
 			if (eio != NULL && i >= mreq && i - mreq < runlen)
 				*eio = TRUE;
 			break;
 		case VM_PAGER_AGAIN:
 			if (i >= mreq && i - mreq < runlen)
 				runlen = i - mreq;
 			break;
 		}
 
 		/*
 		 * If the operation is still going, leave the page busy to
 		 * block all other accesses. Also, leave the paging in
 		 * progress indicator set so that we don't attempt an object
 		 * collapse.
 		 */
 		if (pageout_status[i] != VM_PAGER_PEND) {
 			vm_object_pip_wakeup(object);
 			vm_page_sunbusy(mt);
 		}
 	}
 	if (prunlen != NULL)
 		*prunlen = runlen;
 	return (numpagedout);
 }
 
 static void
 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
 {
 
 	atomic_store_rel_int(&swapdev_enabled, 1);
 }
 
 static void
 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
 {
 
 	if (swap_pager_nswapdev() == 1)
 		atomic_store_rel_int(&swapdev_enabled, 0);
 }
 
 /*
  * Attempt to acquire all of the necessary locks to launder a page and
  * then call through the clustering layer to PUTPAGES.  Wait a short
  * time for a vnode lock.
  *
  * Requires the page and object lock on entry, releases both before return.
  * Returns 0 on success and an errno otherwise.
  */
 static int
 vm_pageout_clean(vm_page_t m, int *numpagedout)
 {
 	struct vnode *vp;
 	struct mount *mp;
 	vm_object_t object;
 	vm_pindex_t pindex;
 	int error;
 
 	object = m->object;
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	error = 0;
 	vp = NULL;
 	mp = NULL;
 
 	/*
 	 * The object is already known NOT to be dead.   It
 	 * is possible for the vget() to block the whole
 	 * pageout daemon, but the new low-memory handling
 	 * code should prevent it.
 	 *
 	 * We can't wait forever for the vnode lock, we might
 	 * deadlock due to a vn_read() getting stuck in
 	 * vm_wait while holding this vnode.  We skip the 
 	 * vnode if we can't get it in a reasonable amount
 	 * of time.
 	 */
 	if (object->type == OBJT_VNODE) {
 		vm_page_xunbusy(m);
 		vp = object->handle;
 		if (vp->v_type == VREG &&
 		    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
 			mp = NULL;
 			error = EDEADLK;
 			goto unlock_all;
 		}
 		KASSERT(mp != NULL,
 		    ("vp %p with NULL v_mount", vp));
 		vm_object_reference_locked(object);
 		pindex = m->pindex;
 		VM_OBJECT_WUNLOCK(object);
 		if (vget(vp, vn_lktype_write(NULL, vp) | LK_TIMELOCK) != 0) {
 			vp = NULL;
 			error = EDEADLK;
 			goto unlock_mp;
 		}
 		VM_OBJECT_WLOCK(object);
 
 		/*
 		 * Ensure that the object and vnode were not disassociated
 		 * while locks were dropped.
 		 */
 		if (vp->v_object != object) {
 			error = ENOENT;
 			goto unlock_all;
 		}
 
 		/*
 		 * While the object was unlocked, the page may have been:
 		 * (1) moved to a different queue,
 		 * (2) reallocated to a different object,
 		 * (3) reallocated to a different offset, or
 		 * (4) cleaned.
 		 */
 		if (!vm_page_in_laundry(m) || m->object != object ||
 		    m->pindex != pindex || m->dirty == 0) {
 			error = ENXIO;
 			goto unlock_all;
 		}
 
 		/*
 		 * The page may have been busied while the object lock was
 		 * released.
 		 */
 		if (vm_page_tryxbusy(m) == 0) {
 			error = EBUSY;
 			goto unlock_all;
 		}
 	}
 
 	/*
 	 * Remove all writeable mappings, failing if the page is wired.
 	 */
 	if (!vm_page_try_remove_write(m)) {
 		vm_page_xunbusy(m);
 		error = EBUSY;
 		goto unlock_all;
 	}
 
 	/*
 	 * If a page is dirty, then it is either being washed
 	 * (but not yet cleaned) or it is still in the
 	 * laundry.  If it is still in the laundry, then we
 	 * start the cleaning operation. 
 	 */
 	if ((*numpagedout = vm_pageout_cluster(m)) == 0)
 		error = EIO;
 
 unlock_all:
 	VM_OBJECT_WUNLOCK(object);
 
 unlock_mp:
 	if (mp != NULL) {
 		if (vp != NULL)
 			vput(vp);
 		vm_object_deallocate(object);
 		vn_finished_write(mp);
 	}
 
 	return (error);
 }
 
 /*
  * Attempt to launder the specified number of pages.
  *
  * Returns the number of pages successfully laundered.
  */
 static int
 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
 {
 	struct scan_state ss;
 	struct vm_pagequeue *pq;
 	vm_object_t object;
 	vm_page_t m, marker;
 	vm_page_astate_t new, old;
 	int act_delta, error, numpagedout, queue, refs, starting_target;
 	int vnodes_skipped;
 	bool pageout_ok;
 
 	object = NULL;
 	starting_target = launder;
 	vnodes_skipped = 0;
 
 	/*
 	 * Scan the laundry queues for pages eligible to be laundered.  We stop
 	 * once the target number of dirty pages have been laundered, or once
 	 * we've reached the end of the queue.  A single iteration of this loop
 	 * may cause more than one page to be laundered because of clustering.
 	 *
 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
 	 * swap devices are configured.
 	 */
 	if (atomic_load_acq_int(&swapdev_enabled))
 		queue = PQ_UNSWAPPABLE;
 	else
 		queue = PQ_LAUNDRY;
 
 scan:
 	marker = &vmd->vmd_markers[queue];
 	pq = &vmd->vmd_pagequeues[queue];
 	vm_pagequeue_lock(pq);
 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
 	while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
 		if (__predict_false((m->flags & PG_MARKER) != 0))
 			continue;
 
 		/*
 		 * Don't touch a page that was removed from the queue after the
 		 * page queue lock was released.  Otherwise, ensure that any
 		 * pending queue operations, such as dequeues for wired pages,
 		 * are handled.
 		 */
 		if (vm_pageout_defer(m, queue, true))
 			continue;
 
 		/*
 		 * Lock the page's object.
 		 */
 		if (object == NULL || object != m->object) {
 			if (object != NULL)
 				VM_OBJECT_WUNLOCK(object);
 			object = atomic_load_ptr(&m->object);
 			if (__predict_false(object == NULL))
 				/* The page is being freed by another thread. */
 				continue;
 
 			/* Depends on type-stability. */
 			VM_OBJECT_WLOCK(object);
 			if (__predict_false(m->object != object)) {
 				VM_OBJECT_WUNLOCK(object);
 				object = NULL;
 				continue;
 			}
 		}
 
 		if (vm_page_tryxbusy(m) == 0)
 			continue;
 
 		/*
 		 * Check for wirings now that we hold the object lock and have
 		 * exclusively busied the page.  If the page is mapped, it may
 		 * still be wired by pmap lookups.  The call to
 		 * vm_page_try_remove_all() below atomically checks for such
 		 * wirings and removes mappings.  If the page is unmapped, the
 		 * wire count is guaranteed not to increase after this check.
 		 */
 		if (__predict_false(vm_page_wired(m)))
 			goto skip_page;
 
 		/*
 		 * Invalid pages can be easily freed.  They cannot be
 		 * mapped; vm_page_free() asserts this.
 		 */
 		if (vm_page_none_valid(m))
 			goto free_page;
 
 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
 
 		for (old = vm_page_astate_load(m);;) {
 			/*
 			 * Check to see if the page has been removed from the
 			 * queue since the first such check.  Leave it alone if
 			 * so, discarding any references collected by
 			 * pmap_ts_referenced().
 			 */
 			if (__predict_false(_vm_page_queue(old) == PQ_NONE))
 				goto skip_page;
 
 			new = old;
 			act_delta = refs;
 			if ((old.flags & PGA_REFERENCED) != 0) {
 				new.flags &= ~PGA_REFERENCED;
 				act_delta++;
 			}
 			if (act_delta == 0) {
 				;
 			} else if (object->ref_count != 0) {
 				/*
 				 * Increase the activation count if the page was
 				 * referenced while in the laundry queue.  This
 				 * makes it less likely that the page will be
 				 * returned prematurely to the laundry queue.
 				 */
 				new.act_count += ACT_ADVANCE +
 				    act_delta;
 				if (new.act_count > ACT_MAX)
 					new.act_count = ACT_MAX;
 
 				new.flags &= ~PGA_QUEUE_OP_MASK;
 				new.flags |= PGA_REQUEUE;
 				new.queue = PQ_ACTIVE;
 				if (!vm_page_pqstate_commit(m, &old, new))
 					continue;
 
 				/*
 				 * If this was a background laundering, count
 				 * activated pages towards our target.  The
 				 * purpose of background laundering is to ensure
 				 * that pages are eventually cycled through the
 				 * laundry queue, and an activation is a valid
 				 * way out.
 				 */
 				if (!in_shortfall)
 					launder--;
 				VM_CNT_INC(v_reactivated);
 				goto skip_page;
 			} else if ((object->flags & OBJ_DEAD) == 0) {
 				new.flags |= PGA_REQUEUE;
 				if (!vm_page_pqstate_commit(m, &old, new))
 					continue;
 				goto skip_page;
 			}
 			break;
 		}
 
 		/*
 		 * If the page appears to be clean at the machine-independent
 		 * layer, then remove all of its mappings from the pmap in
 		 * anticipation of freeing it.  If, however, any of the page's
 		 * mappings allow write access, then the page may still be
 		 * modified until the last of those mappings are removed.
 		 */
 		if (object->ref_count != 0) {
 			vm_page_test_dirty(m);
 			if (m->dirty == 0 && !vm_page_try_remove_all(m))
 				goto skip_page;
 		}
 
 		/*
 		 * Clean pages are freed, and dirty pages are paged out unless
 		 * they belong to a dead object.  Requeueing dirty pages from
 		 * dead objects is pointless, as they are being paged out and
 		 * freed by the thread that destroyed the object.
 		 */
 		if (m->dirty == 0) {
 free_page:
 			/*
 			 * Now we are guaranteed that no other threads are
 			 * manipulating the page, check for a last-second
 			 * reference.
 			 */
 			if (vm_pageout_defer(m, queue, true))
 				goto skip_page;
 			vm_page_free(m);
 			VM_CNT_INC(v_dfree);
 		} else if ((object->flags & OBJ_DEAD) == 0) {
 			if ((object->flags & OBJ_SWAP) != 0)
 				pageout_ok = disable_swap_pageouts == 0;
 			else
 				pageout_ok = true;
 			if (!pageout_ok) {
 				vm_page_launder(m);
 				goto skip_page;
 			}
 
 			/*
 			 * Form a cluster with adjacent, dirty pages from the
 			 * same object, and page out that entire cluster.
 			 *
 			 * The adjacent, dirty pages must also be in the
 			 * laundry.  However, their mappings are not checked
 			 * for new references.  Consequently, a recently
 			 * referenced page may be paged out.  However, that
 			 * page will not be prematurely reclaimed.  After page
 			 * out, the page will be placed in the inactive queue,
 			 * where any new references will be detected and the
 			 * page reactivated.
 			 */
 			error = vm_pageout_clean(m, &numpagedout);
 			if (error == 0) {
 				launder -= numpagedout;
 				ss.scanned += numpagedout;
 			} else if (error == EDEADLK) {
 				pageout_lock_miss++;
 				vnodes_skipped++;
 			}
 			object = NULL;
 		} else {
 skip_page:
 			vm_page_xunbusy(m);
 		}
 	}
 	if (object != NULL) {
 		VM_OBJECT_WUNLOCK(object);
 		object = NULL;
 	}
 	vm_pagequeue_lock(pq);
 	vm_pageout_end_scan(&ss);
 	vm_pagequeue_unlock(pq);
 
 	if (launder > 0 && queue == PQ_UNSWAPPABLE) {
 		queue = PQ_LAUNDRY;
 		goto scan;
 	}
 
 	/*
 	 * Wakeup the sync daemon if we skipped a vnode in a writeable object
 	 * and we didn't launder enough pages.
 	 */
 	if (vnodes_skipped > 0 && launder > 0)
 		(void)speedup_syncer();
 
 	return (starting_target - launder);
 }
 
 /*
  * Compute the integer square root.
  */
 static u_int
 isqrt(u_int num)
 {
 	u_int bit, root, tmp;
 
 	bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0;
 	root = 0;
 	while (bit != 0) {
 		tmp = root + bit;
 		root >>= 1;
 		if (num >= tmp) {
 			num -= tmp;
 			root += bit;
 		}
 		bit >>= 2;
 	}
 	return (root);
 }
 
 /*
  * Perform the work of the laundry thread: periodically wake up and determine
  * whether any pages need to be laundered.  If so, determine the number of pages
  * that need to be laundered, and launder them.
  */
 static void
 vm_pageout_laundry_worker(void *arg)
 {
 	struct vm_domain *vmd;
 	struct vm_pagequeue *pq;
 	uint64_t nclean, ndirty, nfreed;
 	int domain, last_target, launder, shortfall, shortfall_cycle, target;
 	bool in_shortfall;
 
 	domain = (uintptr_t)arg;
 	vmd = VM_DOMAIN(domain);
 	pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
 
 	shortfall = 0;
 	in_shortfall = false;
 	shortfall_cycle = 0;
 	last_target = target = 0;
 	nfreed = 0;
 
 	/*
 	 * Calls to these handlers are serialized by the swap syscall lock.
 	 */
 	(void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
 	    EVENTHANDLER_PRI_ANY);
 	(void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
 	    EVENTHANDLER_PRI_ANY);
 
 	/*
 	 * The pageout laundry worker is never done, so loop forever.
 	 */
 	for (;;) {
 		KASSERT(target >= 0, ("negative target %d", target));
 		KASSERT(shortfall_cycle >= 0,
 		    ("negative cycle %d", shortfall_cycle));
 		launder = 0;
 
 		/*
 		 * First determine whether we need to launder pages to meet a
 		 * shortage of free pages.
 		 */
 		if (shortfall > 0) {
 			in_shortfall = true;
 			shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
 			target = shortfall;
 		} else if (!in_shortfall)
 			goto trybackground;
 		else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
 			/*
 			 * We recently entered shortfall and began laundering
 			 * pages.  If we have completed that laundering run
 			 * (and we are no longer in shortfall) or we have met
 			 * our laundry target through other activity, then we
 			 * can stop laundering pages.
 			 */
 			in_shortfall = false;
 			target = 0;
 			goto trybackground;
 		}
 		launder = target / shortfall_cycle--;
 		goto dolaundry;
 
 		/*
 		 * There's no immediate need to launder any pages; see if we
 		 * meet the conditions to perform background laundering:
 		 *
 		 * 1. The ratio of dirty to clean inactive pages exceeds the
 		 *    background laundering threshold, or
 		 * 2. we haven't yet reached the target of the current
 		 *    background laundering run.
 		 *
 		 * The background laundering threshold is not a constant.
 		 * Instead, it is a slowly growing function of the number of
 		 * clean pages freed by the page daemon since the last
 		 * background laundering.  Thus, as the ratio of dirty to
 		 * clean inactive pages grows, the amount of memory pressure
 		 * required to trigger laundering decreases.  We ensure
 		 * that the threshold is non-zero after an inactive queue
 		 * scan, even if that scan failed to free a single clean page.
 		 */
 trybackground:
 		nclean = vmd->vmd_free_count +
 		    vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
 		ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
 		if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
 		    vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
 			target = vmd->vmd_background_launder_target;
 		}
 
 		/*
 		 * We have a non-zero background laundering target.  If we've
 		 * laundered up to our maximum without observing a page daemon
 		 * request, just stop.  This is a safety belt that ensures we
 		 * don't launder an excessive amount if memory pressure is low
 		 * and the ratio of dirty to clean pages is large.  Otherwise,
 		 * proceed at the background laundering rate.
 		 */
 		if (target > 0) {
 			if (nfreed > 0) {
 				nfreed = 0;
 				last_target = target;
 			} else if (last_target - target >=
 			    vm_background_launder_max * PAGE_SIZE / 1024) {
 				target = 0;
 			}
 			launder = vm_background_launder_rate * PAGE_SIZE / 1024;
 			launder /= VM_LAUNDER_RATE;
 			if (launder > target)
 				launder = target;
 		}
 
 dolaundry:
 		if (launder > 0) {
 			/*
 			 * Because of I/O clustering, the number of laundered
 			 * pages could exceed "target" by the maximum size of
 			 * a cluster minus one. 
 			 */
 			target -= min(vm_pageout_launder(vmd, launder,
 			    in_shortfall), target);
 			pause("laundp", hz / VM_LAUNDER_RATE);
 		}
 
 		/*
 		 * If we're not currently laundering pages and the page daemon
 		 * hasn't posted a new request, sleep until the page daemon
 		 * kicks us.
 		 */
 		vm_pagequeue_lock(pq);
 		if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
 			(void)mtx_sleep(&vmd->vmd_laundry_request,
 			    vm_pagequeue_lockptr(pq), PVM, "launds", 0);
 
 		/*
 		 * If the pagedaemon has indicated that it's in shortfall, start
 		 * a shortfall laundering unless we're already in the middle of
 		 * one.  This may preempt a background laundering.
 		 */
 		if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
 		    (!in_shortfall || shortfall_cycle == 0)) {
 			shortfall = vm_laundry_target(vmd) +
 			    vmd->vmd_pageout_deficit;
 			target = 0;
 		} else
 			shortfall = 0;
 
 		if (target == 0)
 			vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
 		nfreed += vmd->vmd_clean_pages_freed;
 		vmd->vmd_clean_pages_freed = 0;
 		vm_pagequeue_unlock(pq);
 	}
 }
 
 /*
  * Compute the number of pages we want to try to move from the
  * active queue to either the inactive or laundry queue.
  *
  * When scanning active pages during a shortage, we make clean pages
  * count more heavily towards the page shortage than dirty pages.
  * This is because dirty pages must be laundered before they can be
  * reused and thus have less utility when attempting to quickly
  * alleviate a free page shortage.  However, this weighting also
  * causes the scan to deactivate dirty pages more aggressively,
  * improving the effectiveness of clustering.
  */
 static int
 vm_pageout_active_target(struct vm_domain *vmd)
 {
 	int shortage;
 
 	shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
 	    (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
 	    vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
 	shortage *= act_scan_laundry_weight;
 	return (shortage);
 }
 
 /*
  * Scan the active queue.  If there is no shortage of inactive pages, scan a
  * small portion of the queue in order to maintain quasi-LRU.
  */
 static void
 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
 {
 	struct scan_state ss;
 	vm_object_t object;
 	vm_page_t m, marker;
 	struct vm_pagequeue *pq;
 	vm_page_astate_t old, new;
 	long min_scan;
 	int act_delta, max_scan, ps_delta, refs, scan_tick;
 	uint8_t nqueue;
 
 	marker = &vmd->vmd_markers[PQ_ACTIVE];
 	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
 	vm_pagequeue_lock(pq);
 
 	/*
 	 * If we're just idle polling attempt to visit every
 	 * active page within 'update_period' seconds.
 	 */
 	scan_tick = ticks;
 	if (vm_pageout_update_period != 0) {
 		min_scan = pq->pq_cnt;
 		min_scan *= scan_tick - vmd->vmd_last_active_scan;
 		min_scan /= hz * vm_pageout_update_period;
 	} else
 		min_scan = 0;
 	if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
 		vmd->vmd_last_active_scan = scan_tick;
 
 	/*
 	 * Scan the active queue for pages that can be deactivated.  Update
 	 * the per-page activity counter and use it to identify deactivation
 	 * candidates.  Held pages may be deactivated.
 	 *
 	 * To avoid requeuing each page that remains in the active queue, we
 	 * implement the CLOCK algorithm.  To keep the implementation of the
 	 * enqueue operation consistent for all page queues, we use two hands,
 	 * represented by marker pages. Scans begin at the first hand, which
 	 * precedes the second hand in the queue.  When the two hands meet,
 	 * they are moved back to the head and tail of the queue, respectively,
 	 * and scanning resumes.
 	 */
 	max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
 act_scan:
 	vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
 	while ((m = vm_pageout_next(&ss, false)) != NULL) {
 		if (__predict_false(m == &vmd->vmd_clock[1])) {
 			vm_pagequeue_lock(pq);
 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
 			TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
 			    plinks.q);
 			TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
 			    plinks.q);
 			max_scan -= ss.scanned;
 			vm_pageout_end_scan(&ss);
 			goto act_scan;
 		}
 		if (__predict_false((m->flags & PG_MARKER) != 0))
 			continue;
 
 		/*
 		 * Don't touch a page that was removed from the queue after the
 		 * page queue lock was released.  Otherwise, ensure that any
 		 * pending queue operations, such as dequeues for wired pages,
 		 * are handled.
 		 */
 		if (vm_pageout_defer(m, PQ_ACTIVE, true))
 			continue;
 
 		/*
 		 * A page's object pointer may be set to NULL before
 		 * the object lock is acquired.
 		 */
 		object = atomic_load_ptr(&m->object);
 		if (__predict_false(object == NULL))
 			/*
 			 * The page has been removed from its object.
 			 */
 			continue;
 
 		/* Deferred free of swap space. */
 		if ((m->a.flags & PGA_SWAP_FREE) != 0 &&
 		    VM_OBJECT_TRYWLOCK(object)) {
 			if (m->object == object)
 				vm_pager_page_unswapped(m);
 			VM_OBJECT_WUNLOCK(object);
 		}
 
 		/*
 		 * Check to see "how much" the page has been used.
 		 *
 		 * Test PGA_REFERENCED after calling pmap_ts_referenced() so
 		 * that a reference from a concurrently destroyed mapping is
 		 * observed here and now.
 		 *
 		 * Perform an unsynchronized object ref count check.  While
 		 * the page lock ensures that the page is not reallocated to
 		 * another object, in particular, one with unmanaged mappings
 		 * that cannot support pmap_ts_referenced(), two races are,
 		 * nonetheless, possible:
 		 * 1) The count was transitioning to zero, but we saw a non-
 		 *    zero value.  pmap_ts_referenced() will return zero
 		 *    because the page is not mapped.
 		 * 2) The count was transitioning to one, but we saw zero.
 		 *    This race delays the detection of a new reference.  At
 		 *    worst, we will deactivate and reactivate the page.
 		 */
 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
 
 		old = vm_page_astate_load(m);
 		do {
 			/*
 			 * Check to see if the page has been removed from the
 			 * queue since the first such check.  Leave it alone if
 			 * so, discarding any references collected by
 			 * pmap_ts_referenced().
 			 */
 			if (__predict_false(_vm_page_queue(old) == PQ_NONE)) {
 				ps_delta = 0;
 				break;
 			}
 
 			/*
 			 * Advance or decay the act_count based on recent usage.
 			 */
 			new = old;
 			act_delta = refs;
 			if ((old.flags & PGA_REFERENCED) != 0) {
 				new.flags &= ~PGA_REFERENCED;
 				act_delta++;
 			}
 			if (act_delta != 0) {
 				new.act_count += ACT_ADVANCE + act_delta;
 				if (new.act_count > ACT_MAX)
 					new.act_count = ACT_MAX;
 			} else {
 				new.act_count -= min(new.act_count,
 				    ACT_DECLINE);
 			}
 
 			if (new.act_count > 0) {
 				/*
 				 * Adjust the activation count and keep the page
 				 * in the active queue.  The count might be left
 				 * unchanged if it is saturated.  The page may
 				 * have been moved to a different queue since we
 				 * started the scan, in which case we move it
 				 * back.
 				 */
 				ps_delta = 0;
 				if (old.queue != PQ_ACTIVE) {
 					new.flags &= ~PGA_QUEUE_OP_MASK;
 					new.flags |= PGA_REQUEUE;
 					new.queue = PQ_ACTIVE;
 				}
 			} else {
 				/*
 				 * When not short for inactive pages, let dirty
 				 * pages go through the inactive queue before
 				 * moving to the laundry queue.  This gives them
 				 * some extra time to be reactivated,
 				 * potentially avoiding an expensive pageout.
 				 * However, during a page shortage, the inactive
 				 * queue is necessarily small, and so dirty
 				 * pages would only spend a trivial amount of
 				 * time in the inactive queue.  Therefore, we
 				 * might as well place them directly in the
 				 * laundry queue to reduce queuing overhead.
 				 *
 				 * Calling vm_page_test_dirty() here would
 				 * require acquisition of the object's write
 				 * lock.  However, during a page shortage,
 				 * directing dirty pages into the laundry queue
 				 * is only an optimization and not a
 				 * requirement.  Therefore, we simply rely on
 				 * the opportunistic updates to the page's dirty
 				 * field by the pmap.
 				 */
 				if (page_shortage <= 0) {
 					nqueue = PQ_INACTIVE;
 					ps_delta = 0;
 				} else if (m->dirty == 0) {
 					nqueue = PQ_INACTIVE;
 					ps_delta = act_scan_laundry_weight;
 				} else {
 					nqueue = PQ_LAUNDRY;
 					ps_delta = 1;
 				}
 
 				new.flags &= ~PGA_QUEUE_OP_MASK;
 				new.flags |= PGA_REQUEUE;
 				new.queue = nqueue;
 			}
 		} while (!vm_page_pqstate_commit(m, &old, new));
 
 		page_shortage -= ps_delta;
 	}
 	vm_pagequeue_lock(pq);
 	TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
 	TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
 	vm_pageout_end_scan(&ss);
 	vm_pagequeue_unlock(pq);
 }
 
 static int
 vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker,
     vm_page_t m)
 {
 	vm_page_astate_t as;
 
 	vm_pagequeue_assert_locked(pq);
 
 	as = vm_page_astate_load(m);
 	if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0)
 		return (0);
 	vm_page_aflag_set(m, PGA_ENQUEUED);
 	TAILQ_INSERT_BEFORE(marker, m, plinks.q);
 	return (1);
 }
 
 /*
  * Re-add stuck pages to the inactive queue.  We will examine them again
  * during the next scan.  If the queue state of a page has changed since
  * it was physically removed from the page queue in
  * vm_pageout_collect_batch(), don't do anything with that page.
  */
 static void
 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
     vm_page_t m)
 {
 	struct vm_pagequeue *pq;
 	vm_page_t marker;
 	int delta;
 
 	delta = 0;
 	marker = ss->marker;
 	pq = ss->pq;
 
 	if (m != NULL) {
-		if (vm_batchqueue_insert(bq, m))
+		if (vm_batchqueue_insert(bq, m) != 0)
 			return;
 		vm_pagequeue_lock(pq);
 		delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
 	} else
 		vm_pagequeue_lock(pq);
 	while ((m = vm_batchqueue_pop(bq)) != NULL)
 		delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
 	vm_pagequeue_cnt_add(pq, delta);
 	vm_pagequeue_unlock(pq);
 	vm_batchqueue_init(bq);
 }
 
 static void
 vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
 {
 	struct timeval start, end;
 	struct scan_state ss;
 	struct vm_batchqueue rq;
 	struct vm_page marker_page;
 	vm_page_t m, marker;
 	struct vm_pagequeue *pq;
 	vm_object_t object;
 	vm_page_astate_t old, new;
 	int act_delta, addl_page_shortage, starting_page_shortage, refs;
 
 	object = NULL;
 	vm_batchqueue_init(&rq);
 	getmicrouptime(&start);
 
 	/*
 	 * The addl_page_shortage is an estimate of the number of temporarily
 	 * stuck pages in the inactive queue.  In other words, the
 	 * number of pages from the inactive count that should be
 	 * discounted in setting the target for the active queue scan.
 	 */
 	addl_page_shortage = 0;
 
 	/*
 	 * Start scanning the inactive queue for pages that we can free.  The
 	 * scan will stop when we reach the target or we have scanned the
 	 * entire queue.  (Note that m->a.act_count is not used to make
 	 * decisions for the inactive queue, only for the active queue.)
 	 */
 	starting_page_shortage = page_shortage;
 	marker = &marker_page;
 	vm_page_init_marker(marker, PQ_INACTIVE, 0);
 	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
 	vm_pagequeue_lock(pq);
 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
 	while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
 		KASSERT((m->flags & PG_MARKER) == 0,
 		    ("marker page %p was dequeued", m));
 
 		/*
 		 * Don't touch a page that was removed from the queue after the
 		 * page queue lock was released.  Otherwise, ensure that any
 		 * pending queue operations, such as dequeues for wired pages,
 		 * are handled.
 		 */
 		if (vm_pageout_defer(m, PQ_INACTIVE, false))
 			continue;
 
 		/*
 		 * Lock the page's object.
 		 */
 		if (object == NULL || object != m->object) {
 			if (object != NULL)
 				VM_OBJECT_WUNLOCK(object);
 			object = atomic_load_ptr(&m->object);
 			if (__predict_false(object == NULL))
 				/* The page is being freed by another thread. */
 				continue;
 
 			/* Depends on type-stability. */
 			VM_OBJECT_WLOCK(object);
 			if (__predict_false(m->object != object)) {
 				VM_OBJECT_WUNLOCK(object);
 				object = NULL;
 				goto reinsert;
 			}
 		}
 
 		if (vm_page_tryxbusy(m) == 0) {
 			/*
 			 * Don't mess with busy pages.  Leave them at
 			 * the front of the queue.  Most likely, they
 			 * are being paged out and will leave the
 			 * queue shortly after the scan finishes.  So,
 			 * they ought to be discounted from the
 			 * inactive count.
 			 */
 			addl_page_shortage++;
 			goto reinsert;
 		}
 
 		/* Deferred free of swap space. */
 		if ((m->a.flags & PGA_SWAP_FREE) != 0)
 			vm_pager_page_unswapped(m);
 
 		/*
 		 * Check for wirings now that we hold the object lock and have
 		 * exclusively busied the page.  If the page is mapped, it may
 		 * still be wired by pmap lookups.  The call to
 		 * vm_page_try_remove_all() below atomically checks for such
 		 * wirings and removes mappings.  If the page is unmapped, the
 		 * wire count is guaranteed not to increase after this check.
 		 */
 		if (__predict_false(vm_page_wired(m)))
 			goto skip_page;
 
 		/*
 		 * Invalid pages can be easily freed. They cannot be
 		 * mapped, vm_page_free() asserts this.
 		 */
 		if (vm_page_none_valid(m))
 			goto free_page;
 
 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
 
 		for (old = vm_page_astate_load(m);;) {
 			/*
 			 * Check to see if the page has been removed from the
 			 * queue since the first such check.  Leave it alone if
 			 * so, discarding any references collected by
 			 * pmap_ts_referenced().
 			 */
 			if (__predict_false(_vm_page_queue(old) == PQ_NONE))
 				goto skip_page;
 
 			new = old;
 			act_delta = refs;
 			if ((old.flags & PGA_REFERENCED) != 0) {
 				new.flags &= ~PGA_REFERENCED;
 				act_delta++;
 			}
 			if (act_delta == 0) {
 				;
 			} else if (object->ref_count != 0) {
 				/*
 				 * Increase the activation count if the
 				 * page was referenced while in the
 				 * inactive queue.  This makes it less
 				 * likely that the page will be returned
 				 * prematurely to the inactive queue.
 				 */
 				new.act_count += ACT_ADVANCE +
 				    act_delta;
 				if (new.act_count > ACT_MAX)
 					new.act_count = ACT_MAX;
 
 				new.flags &= ~PGA_QUEUE_OP_MASK;
 				new.flags |= PGA_REQUEUE;
 				new.queue = PQ_ACTIVE;
 				if (!vm_page_pqstate_commit(m, &old, new))
 					continue;
 
 				VM_CNT_INC(v_reactivated);
 				goto skip_page;
 			} else if ((object->flags & OBJ_DEAD) == 0) {
 				new.queue = PQ_INACTIVE;
 				new.flags |= PGA_REQUEUE;
 				if (!vm_page_pqstate_commit(m, &old, new))
 					continue;
 				goto skip_page;
 			}
 			break;
 		}
 
 		/*
 		 * If the page appears to be clean at the machine-independent
 		 * layer, then remove all of its mappings from the pmap in
 		 * anticipation of freeing it.  If, however, any of the page's
 		 * mappings allow write access, then the page may still be
 		 * modified until the last of those mappings are removed.
 		 */
 		if (object->ref_count != 0) {
 			vm_page_test_dirty(m);
 			if (m->dirty == 0 && !vm_page_try_remove_all(m))
 				goto skip_page;
 		}
 
 		/*
 		 * Clean pages can be freed, but dirty pages must be sent back
 		 * to the laundry, unless they belong to a dead object.
 		 * Requeueing dirty pages from dead objects is pointless, as
 		 * they are being paged out and freed by the thread that
 		 * destroyed the object.
 		 */
 		if (m->dirty == 0) {
 free_page:
 			/*
 			 * Now we are guaranteed that no other threads are
 			 * manipulating the page, check for a last-second
 			 * reference that would save it from doom.
 			 */
 			if (vm_pageout_defer(m, PQ_INACTIVE, false))
 				goto skip_page;
 
 			/*
 			 * Because we dequeued the page and have already checked
 			 * for pending dequeue and enqueue requests, we can
 			 * safely disassociate the page from the inactive queue
 			 * without holding the queue lock.
 			 */
 			m->a.queue = PQ_NONE;
 			vm_page_free(m);
 			page_shortage--;
 			continue;
 		}
 		if ((object->flags & OBJ_DEAD) == 0)
 			vm_page_launder(m);
 skip_page:
 		vm_page_xunbusy(m);
 		continue;
 reinsert:
 		vm_pageout_reinsert_inactive(&ss, &rq, m);
 	}
 	if (object != NULL)
 		VM_OBJECT_WUNLOCK(object);
 	vm_pageout_reinsert_inactive(&ss, &rq, NULL);
 	vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
 	vm_pagequeue_lock(pq);
 	vm_pageout_end_scan(&ss);
 	vm_pagequeue_unlock(pq);
 
 	/*
 	 * Record the remaining shortage and the progress and rate it was made.
 	 */
 	atomic_add_int(&vmd->vmd_addl_shortage, addl_page_shortage);
 	getmicrouptime(&end);
 	timevalsub(&end, &start);
 	atomic_add_int(&vmd->vmd_inactive_us,
 	    end.tv_sec * 1000000 + end.tv_usec);
 	atomic_add_int(&vmd->vmd_inactive_freed,
 	    starting_page_shortage - page_shortage);
 }
 
 /*
  * Dispatch a number of inactive threads according to load and collect the
  * results to present a coherent view of paging activity on this domain.
  */
 static int
 vm_pageout_inactive_dispatch(struct vm_domain *vmd, int shortage)
 {
 	u_int freed, pps, slop, threads, us;
 
 	vmd->vmd_inactive_shortage = shortage;
 	slop = 0;
 
 	/*
 	 * If we have more work than we can do in a quarter of our interval, we
 	 * fire off multiple threads to process it.
 	 */
 	threads = vmd->vmd_inactive_threads;
 	if (threads > 1 && vmd->vmd_inactive_pps != 0 &&
 	    shortage > vmd->vmd_inactive_pps / VM_INACT_SCAN_RATE / 4) {
 		vmd->vmd_inactive_shortage /= threads;
 		slop = shortage % threads;
 		vm_domain_pageout_lock(vmd);
 		blockcount_acquire(&vmd->vmd_inactive_starting, threads - 1);
 		blockcount_acquire(&vmd->vmd_inactive_running, threads - 1);
 		wakeup(&vmd->vmd_inactive_shortage);
 		vm_domain_pageout_unlock(vmd);
 	}
 
 	/* Run the local thread scan. */
 	vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage + slop);
 
 	/*
 	 * Block until helper threads report results and then accumulate
 	 * totals.
 	 */
 	blockcount_wait(&vmd->vmd_inactive_running, NULL, "vmpoid", PVM);
 	freed = atomic_readandclear_int(&vmd->vmd_inactive_freed);
 	VM_CNT_ADD(v_dfree, freed);
 
 	/*
 	 * Calculate the per-thread paging rate with an exponential decay of
 	 * prior results.  Careful to avoid integer rounding errors with large
 	 * us values.
 	 */
 	us = max(atomic_readandclear_int(&vmd->vmd_inactive_us), 1);
 	if (us > 1000000)
 		/* Keep rounding to tenths */
 		pps = (freed * 10) / ((us * 10) / 1000000);
 	else
 		pps = (1000000 / us) * freed;
 	vmd->vmd_inactive_pps = (vmd->vmd_inactive_pps / 2) + (pps / 2);
 
 	return (shortage - freed);
 }
 
 /*
  * Attempt to reclaim the requested number of pages from the inactive queue.
  * Returns true if the shortage was addressed.
  */
 static int
 vm_pageout_inactive(struct vm_domain *vmd, int shortage, int *addl_shortage)
 {
 	struct vm_pagequeue *pq;
 	u_int addl_page_shortage, deficit, page_shortage;
 	u_int starting_page_shortage;
 
 	/*
 	 * vmd_pageout_deficit counts the number of pages requested in
 	 * allocations that failed because of a free page shortage.  We assume
 	 * that the allocations will be reattempted and thus include the deficit
 	 * in our scan target.
 	 */
 	deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
 	starting_page_shortage = shortage + deficit;
 
 	/*
 	 * Run the inactive scan on as many threads as is necessary.
 	 */
 	page_shortage = vm_pageout_inactive_dispatch(vmd, starting_page_shortage);
 	addl_page_shortage = atomic_readandclear_int(&vmd->vmd_addl_shortage);
 
 	/*
 	 * Wake up the laundry thread so that it can perform any needed
 	 * laundering.  If we didn't meet our target, we're in shortfall and
 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
 	 * swap devices are configured, the laundry thread has no work to do, so
 	 * don't bother waking it up.
 	 *
 	 * The laundry thread uses the number of inactive queue scans elapsed
 	 * since the last laundering to determine whether to launder again, so
 	 * keep count.
 	 */
 	if (starting_page_shortage > 0) {
 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
 		vm_pagequeue_lock(pq);
 		if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
 		    (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
 			if (page_shortage > 0) {
 				vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
 				VM_CNT_INC(v_pdshortfalls);
 			} else if (vmd->vmd_laundry_request !=
 			    VM_LAUNDRY_SHORTFALL)
 				vmd->vmd_laundry_request =
 				    VM_LAUNDRY_BACKGROUND;
 			wakeup(&vmd->vmd_laundry_request);
 		}
 		vmd->vmd_clean_pages_freed +=
 		    starting_page_shortage - page_shortage;
 		vm_pagequeue_unlock(pq);
 	}
 
 	/*
 	 * Wakeup the swapout daemon if we didn't free the targeted number of
 	 * pages.
 	 */
 	if (page_shortage > 0)
 		vm_swapout_run();
 
 	/*
 	 * If the inactive queue scan fails repeatedly to meet its
 	 * target, kill the largest process.
 	 */
 	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
 
 	/*
 	 * Reclaim pages by swapping out idle processes, if configured to do so.
 	 */
 	vm_swapout_run_idle();
 
 	/*
 	 * See the description of addl_page_shortage above.
 	 */
 	*addl_shortage = addl_page_shortage + deficit;
 
 	return (page_shortage <= 0);
 }
 
 static int vm_pageout_oom_vote;
 
 /*
  * The pagedaemon threads randlomly select one to perform the
  * OOM.  Trying to kill processes before all pagedaemons
  * failed to reach free target is premature.
  */
 static void
 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
     int starting_page_shortage)
 {
 	int old_vote;
 
 	if (starting_page_shortage <= 0 || starting_page_shortage !=
 	    page_shortage)
 		vmd->vmd_oom_seq = 0;
 	else
 		vmd->vmd_oom_seq++;
 	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
 		if (vmd->vmd_oom) {
 			vmd->vmd_oom = FALSE;
 			atomic_subtract_int(&vm_pageout_oom_vote, 1);
 		}
 		return;
 	}
 
 	/*
 	 * Do not follow the call sequence until OOM condition is
 	 * cleared.
 	 */
 	vmd->vmd_oom_seq = 0;
 
 	if (vmd->vmd_oom)
 		return;
 
 	vmd->vmd_oom = TRUE;
 	old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
 	if (old_vote != vm_ndomains - 1)
 		return;
 
 	/*
 	 * The current pagedaemon thread is the last in the quorum to
 	 * start OOM.  Initiate the selection and signaling of the
 	 * victim.
 	 */
 	vm_pageout_oom(VM_OOM_MEM);
 
 	/*
 	 * After one round of OOM terror, recall our vote.  On the
 	 * next pass, current pagedaemon would vote again if the low
 	 * memory condition is still there, due to vmd_oom being
 	 * false.
 	 */
 	vmd->vmd_oom = FALSE;
 	atomic_subtract_int(&vm_pageout_oom_vote, 1);
 }
 
 /*
  * The OOM killer is the page daemon's action of last resort when
  * memory allocation requests have been stalled for a prolonged period
  * of time because it cannot reclaim memory.  This function computes
  * the approximate number of physical pages that could be reclaimed if
  * the specified address space is destroyed.
  *
  * Private, anonymous memory owned by the address space is the
  * principal resource that we expect to recover after an OOM kill.
  * Since the physical pages mapped by the address space's COW entries
  * are typically shared pages, they are unlikely to be released and so
  * they are not counted.
  *
  * To get to the point where the page daemon runs the OOM killer, its
  * efforts to write-back vnode-backed pages may have stalled.  This
  * could be caused by a memory allocation deadlock in the write path
  * that might be resolved by an OOM kill.  Therefore, physical pages
  * belonging to vnode-backed objects are counted, because they might
  * be freed without being written out first if the address space holds
  * the last reference to an unlinked vnode.
  *
  * Similarly, physical pages belonging to OBJT_PHYS objects are
  * counted because the address space might hold the last reference to
  * the object.
  */
 static long
 vm_pageout_oom_pagecount(struct vmspace *vmspace)
 {
 	vm_map_t map;
 	vm_map_entry_t entry;
 	vm_object_t obj;
 	long res;
 
 	map = &vmspace->vm_map;
 	KASSERT(!map->system_map, ("system map"));
 	sx_assert(&map->lock, SA_LOCKED);
 	res = 0;
 	VM_MAP_ENTRY_FOREACH(entry, map) {
 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
 			continue;
 		obj = entry->object.vm_object;
 		if (obj == NULL)
 			continue;
 		if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
 		    obj->ref_count != 1)
 			continue;
 		if (obj->type == OBJT_PHYS || obj->type == OBJT_VNODE ||
 		    (obj->flags & OBJ_SWAP) != 0)
 			res += obj->resident_page_count;
 	}
 	return (res);
 }
 
 static int vm_oom_ratelim_last;
 static int vm_oom_pf_secs = 10;
 SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0,
     "");
 static struct mtx vm_oom_ratelim_mtx;
 
 void
 vm_pageout_oom(int shortage)
 {
 	const char *reason;
 	struct proc *p, *bigproc;
 	vm_offset_t size, bigsize;
 	struct thread *td;
 	struct vmspace *vm;
 	int now;
 	bool breakout;
 
 	/*
 	 * For OOM requests originating from vm_fault(), there is a high
 	 * chance that a single large process faults simultaneously in
 	 * several threads.  Also, on an active system running many
 	 * processes of middle-size, like buildworld, all of them
 	 * could fault almost simultaneously as well.
 	 *
 	 * To avoid killing too many processes, rate-limit OOMs
 	 * initiated by vm_fault() time-outs on the waits for free
 	 * pages.
 	 */
 	mtx_lock(&vm_oom_ratelim_mtx);
 	now = ticks;
 	if (shortage == VM_OOM_MEM_PF &&
 	    (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) {
 		mtx_unlock(&vm_oom_ratelim_mtx);
 		return;
 	}
 	vm_oom_ratelim_last = now;
 	mtx_unlock(&vm_oom_ratelim_mtx);
 
 	/*
 	 * We keep the process bigproc locked once we find it to keep anyone
 	 * from messing with it; however, there is a possibility of
 	 * deadlock if process B is bigproc and one of its child processes
 	 * attempts to propagate a signal to B while we are waiting for A's
 	 * lock while walking this list.  To avoid this, we don't block on
 	 * the process lock but just skip a process if it is already locked.
 	 */
 	bigproc = NULL;
 	bigsize = 0;
 	sx_slock(&allproc_lock);
 	FOREACH_PROC_IN_SYSTEM(p) {
 		PROC_LOCK(p);
 
 		/*
 		 * If this is a system, protected or killed process, skip it.
 		 */
 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
 		    P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
 		    p->p_pid == 1 || P_KILLED(p) ||
 		    (p->p_pid < 48 && swap_pager_avail != 0)) {
 			PROC_UNLOCK(p);
 			continue;
 		}
 		/*
 		 * If the process is in a non-running type state,
 		 * don't touch it.  Check all the threads individually.
 		 */
 		breakout = false;
 		FOREACH_THREAD_IN_PROC(p, td) {
 			thread_lock(td);
 			if (!TD_ON_RUNQ(td) &&
 			    !TD_IS_RUNNING(td) &&
 			    !TD_IS_SLEEPING(td) &&
 			    !TD_IS_SUSPENDED(td) &&
 			    !TD_IS_SWAPPED(td)) {
 				thread_unlock(td);
 				breakout = true;
 				break;
 			}
 			thread_unlock(td);
 		}
 		if (breakout) {
 			PROC_UNLOCK(p);
 			continue;
 		}
 		/*
 		 * get the process size
 		 */
 		vm = vmspace_acquire_ref(p);
 		if (vm == NULL) {
 			PROC_UNLOCK(p);
 			continue;
 		}
 		_PHOLD_LITE(p);
 		PROC_UNLOCK(p);
 		sx_sunlock(&allproc_lock);
 		if (!vm_map_trylock_read(&vm->vm_map)) {
 			vmspace_free(vm);
 			sx_slock(&allproc_lock);
 			PRELE(p);
 			continue;
 		}
 		size = vmspace_swap_count(vm);
 		if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF)
 			size += vm_pageout_oom_pagecount(vm);
 		vm_map_unlock_read(&vm->vm_map);
 		vmspace_free(vm);
 		sx_slock(&allproc_lock);
 
 		/*
 		 * If this process is bigger than the biggest one,
 		 * remember it.
 		 */
 		if (size > bigsize) {
 			if (bigproc != NULL)
 				PRELE(bigproc);
 			bigproc = p;
 			bigsize = size;
 		} else {
 			PRELE(p);
 		}
 	}
 	sx_sunlock(&allproc_lock);
 
 	if (bigproc != NULL) {
 		switch (shortage) {
 		case VM_OOM_MEM:
 			reason = "failed to reclaim memory";
 			break;
 		case VM_OOM_MEM_PF:
 			reason = "a thread waited too long to allocate a page";
 			break;
 		case VM_OOM_SWAPZ:
 			reason = "out of swap space";
 			break;
 		default:
 			panic("unknown OOM reason %d", shortage);
 		}
 		if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0)
 			panic("%s", reason);
 		PROC_LOCK(bigproc);
 		killproc(bigproc, reason);
 		sched_nice(bigproc, PRIO_MIN);
 		_PRELE(bigproc);
 		PROC_UNLOCK(bigproc);
 	}
 }
 
 /*
  * Signal a free page shortage to subsystems that have registered an event
  * handler.  Reclaim memory from UMA in the event of a severe shortage.
  * Return true if the free page count should be re-evaluated.
  */
 static bool
 vm_pageout_lowmem(void)
 {
 	static int lowmem_ticks = 0;
 	int last;
 	bool ret;
 
 	ret = false;
 
 	last = atomic_load_int(&lowmem_ticks);
 	while ((u_int)(ticks - last) / hz >= lowmem_period) {
 		if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
 			continue;
 
 		/*
 		 * Decrease registered cache sizes.
 		 */
 		SDT_PROBE0(vm, , , vm__lowmem_scan);
 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
 
 		/*
 		 * We do this explicitly after the caches have been
 		 * drained above.
 		 */
 		uma_reclaim(UMA_RECLAIM_TRIM);
 		ret = true;
 		break;
 	}
 
 	/*
 	 * Kick off an asynchronous reclaim of cached memory if one of the
 	 * page daemons is failing to keep up with demand.  Use the "severe"
 	 * threshold instead of "min" to ensure that we do not blow away the
 	 * caches if a subset of the NUMA domains are depleted by kernel memory
 	 * allocations; the domainset iterators automatically skip domains
 	 * below the "min" threshold on the first pass.
 	 *
 	 * UMA reclaim worker has its own rate-limiting mechanism, so don't
 	 * worry about kicking it too often.
 	 */
 	if (vm_page_count_severe())
 		uma_reclaim_wakeup();
 
 	return (ret);
 }
 
 static void
 vm_pageout_worker(void *arg)
 {
 	struct vm_domain *vmd;
 	u_int ofree;
 	int addl_shortage, domain, shortage;
 	bool target_met;
 
 	domain = (uintptr_t)arg;
 	vmd = VM_DOMAIN(domain);
 	shortage = 0;
 	target_met = true;
 
 	/*
 	 * XXXKIB It could be useful to bind pageout daemon threads to
 	 * the cores belonging to the domain, from which vm_page_array
 	 * is allocated.
 	 */
 
 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
 	vmd->vmd_last_active_scan = ticks;
 
 	/*
 	 * The pageout daemon worker is never done, so loop forever.
 	 */
 	while (TRUE) {
 		vm_domain_pageout_lock(vmd);
 
 		/*
 		 * We need to clear wanted before we check the limits.  This
 		 * prevents races with wakers who will check wanted after they
 		 * reach the limit.
 		 */
 		atomic_store_int(&vmd->vmd_pageout_wanted, 0);
 
 		/*
 		 * Might the page daemon need to run again?
 		 */
 		if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
 			/*
 			 * Yes.  If the scan failed to produce enough free
 			 * pages, sleep uninterruptibly for some time in the
 			 * hope that the laundry thread will clean some pages.
 			 */
 			vm_domain_pageout_unlock(vmd);
 			if (!target_met)
 				pause("pwait", hz / VM_INACT_SCAN_RATE);
 		} else {
 			/*
 			 * No, sleep until the next wakeup or until pages
 			 * need to have their reference stats updated.
 			 */
 			if (mtx_sleep(&vmd->vmd_pageout_wanted,
 			    vm_domain_pageout_lockptr(vmd), PDROP | PVM,
 			    "psleep", hz / VM_INACT_SCAN_RATE) == 0)
 				VM_CNT_INC(v_pdwakeups);
 		}
 
 		/* Prevent spurious wakeups by ensuring that wanted is set. */
 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
 
 		/*
 		 * Use the controller to calculate how many pages to free in
 		 * this interval, and scan the inactive queue.  If the lowmem
 		 * handlers appear to have freed up some pages, subtract the
 		 * difference from the inactive queue scan target.
 		 */
 		shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
 		if (shortage > 0) {
 			ofree = vmd->vmd_free_count;
 			if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
 				shortage -= min(vmd->vmd_free_count - ofree,
 				    (u_int)shortage);
 			target_met = vm_pageout_inactive(vmd, shortage,
 			    &addl_shortage);
 		} else
 			addl_shortage = 0;
 
 		/*
 		 * Scan the active queue.  A positive value for shortage
 		 * indicates that we must aggressively deactivate pages to avoid
 		 * a shortfall.
 		 */
 		shortage = vm_pageout_active_target(vmd) + addl_shortage;
 		vm_pageout_scan_active(vmd, shortage);
 	}
 }
 
 /*
  * vm_pageout_helper runs additional pageout daemons in times of high paging
  * activity.
  */
 static void
 vm_pageout_helper(void *arg)
 {
 	struct vm_domain *vmd;
 	int domain;
 
 	domain = (uintptr_t)arg;
 	vmd = VM_DOMAIN(domain);
 
 	vm_domain_pageout_lock(vmd);
 	for (;;) {
 		msleep(&vmd->vmd_inactive_shortage,
 		    vm_domain_pageout_lockptr(vmd), PVM, "psleep", 0);
 		blockcount_release(&vmd->vmd_inactive_starting, 1);
 
 		vm_domain_pageout_unlock(vmd);
 		vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage);
 		vm_domain_pageout_lock(vmd);
 
 		/*
 		 * Release the running count while the pageout lock is held to
 		 * prevent wakeup races.
 		 */
 		blockcount_release(&vmd->vmd_inactive_running, 1);
 	}
 }
 
 static int
 get_pageout_threads_per_domain(const struct vm_domain *vmd)
 {
 	unsigned total_pageout_threads, eligible_cpus, domain_cpus;
 
 	if (VM_DOMAIN_EMPTY(vmd->vmd_domain))
 		return (0);
 
 	/*
 	 * Semi-arbitrarily constrain pagedaemon threads to less than half the
 	 * total number of CPUs in the system as an upper limit.
 	 */
 	if (pageout_cpus_per_thread < 2)
 		pageout_cpus_per_thread = 2;
 	else if (pageout_cpus_per_thread > mp_ncpus)
 		pageout_cpus_per_thread = mp_ncpus;
 
 	total_pageout_threads = howmany(mp_ncpus, pageout_cpus_per_thread);
 	domain_cpus = CPU_COUNT(&cpuset_domain[vmd->vmd_domain]);
 
 	/* Pagedaemons are not run in empty domains. */
 	eligible_cpus = mp_ncpus;
 	for (unsigned i = 0; i < vm_ndomains; i++)
 		if (VM_DOMAIN_EMPTY(i))
 			eligible_cpus -= CPU_COUNT(&cpuset_domain[i]);
 
 	/*
 	 * Assign a portion of the total pageout threads to this domain
 	 * corresponding to the fraction of pagedaemon-eligible CPUs in the
 	 * domain.  In asymmetric NUMA systems, domains with more CPUs may be
 	 * allocated more threads than domains with fewer CPUs.
 	 */
 	return (howmany(total_pageout_threads * domain_cpus, eligible_cpus));
 }
 
 /*
  * Initialize basic pageout daemon settings.  See the comment above the
  * definition of vm_domain for some explanation of how these thresholds are
  * used.
  */
 static void
 vm_pageout_init_domain(int domain)
 {
 	struct vm_domain *vmd;
 	struct sysctl_oid *oid;
 
 	vmd = VM_DOMAIN(domain);
 	vmd->vmd_interrupt_free_min = 2;
 
 	/*
 	 * v_free_reserved needs to include enough for the largest
 	 * swap pager structures plus enough for any pv_entry structs
 	 * when paging. 
 	 */
 	vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE +
 	    vmd->vmd_interrupt_free_min;
 	vmd->vmd_free_reserved = vm_pageout_page_count +
 	    vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768;
 	vmd->vmd_free_min = vmd->vmd_page_count / 200;
 	vmd->vmd_free_severe = vmd->vmd_free_min / 2;
 	vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
 	vmd->vmd_free_min += vmd->vmd_free_reserved;
 	vmd->vmd_free_severe += vmd->vmd_free_reserved;
 	vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
 	if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
 		vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
 
 	/*
 	 * Set the default wakeup threshold to be 10% below the paging
 	 * target.  This keeps the steady state out of shortfall.
 	 */
 	vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
 
 	/*
 	 * Target amount of memory to move out of the laundry queue during a
 	 * background laundering.  This is proportional to the amount of system
 	 * memory.
 	 */
 	vmd->vmd_background_launder_target = (vmd->vmd_free_target -
 	    vmd->vmd_free_min) / 10;
 
 	/* Initialize the pageout daemon pid controller. */
 	pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
 	    vmd->vmd_free_target, PIDCTRL_BOUND,
 	    PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
 	    "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
 	pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
 
 	vmd->vmd_inactive_threads = get_pageout_threads_per_domain(vmd);
 }
 
 static void
 vm_pageout_init(void)
 {
 	u_long freecount;
 	int i;
 
 	/*
 	 * Initialize some paging parameters.
 	 */
 	if (vm_cnt.v_page_count < 2000)
 		vm_pageout_page_count = 8;
 
 	freecount = 0;
 	for (i = 0; i < vm_ndomains; i++) {
 		struct vm_domain *vmd;
 
 		vm_pageout_init_domain(i);
 		vmd = VM_DOMAIN(i);
 		vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
 		vm_cnt.v_free_target += vmd->vmd_free_target;
 		vm_cnt.v_free_min += vmd->vmd_free_min;
 		vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
 		vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
 		vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
 		vm_cnt.v_free_severe += vmd->vmd_free_severe;
 		freecount += vmd->vmd_free_count;
 	}
 
 	/*
 	 * Set interval in seconds for active scan.  We want to visit each
 	 * page at least once every ten minutes.  This is to prevent worst
 	 * case paging behaviors with stale active LRU.
 	 */
 	if (vm_pageout_update_period == 0)
 		vm_pageout_update_period = 600;
 
 	/*
 	 * Set the maximum number of user-wired virtual pages.  Historically the
 	 * main source of such pages was mlock(2) and mlockall(2).  Hypervisors
 	 * may also request user-wired memory.
 	 */
 	if (vm_page_max_user_wired == 0)
 		vm_page_max_user_wired = 4 * freecount / 5;
 }
 
 /*
  *     vm_pageout is the high level pageout daemon.
  */
 static void
 vm_pageout(void)
 {
 	struct proc *p;
 	struct thread *td;
 	int error, first, i, j, pageout_threads;
 
 	p = curproc;
 	td = curthread;
 
 	mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF);
 	swap_pager_swap_init();
 	for (first = -1, i = 0; i < vm_ndomains; i++) {
 		if (VM_DOMAIN_EMPTY(i)) {
 			if (bootverbose)
 				printf("domain %d empty; skipping pageout\n",
 				    i);
 			continue;
 		}
 		if (first == -1)
 			first = i;
 		else {
 			error = kthread_add(vm_pageout_worker,
 			    (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i);
 			if (error != 0)
 				panic("starting pageout for domain %d: %d\n",
 				    i, error);
 		}
 		pageout_threads = VM_DOMAIN(i)->vmd_inactive_threads;
 		for (j = 0; j < pageout_threads - 1; j++) {
 			error = kthread_add(vm_pageout_helper,
 			    (void *)(uintptr_t)i, p, NULL, 0, 0,
 			    "dom%d helper%d", i, j);
 			if (error != 0)
 				panic("starting pageout helper %d for domain "
 				    "%d: %d\n", j, i, error);
 		}
 		error = kthread_add(vm_pageout_laundry_worker,
 		    (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i);
 		if (error != 0)
 			panic("starting laundry for domain %d: %d", i, error);
 	}
 	error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma");
 	if (error != 0)
 		panic("starting uma_reclaim helper, error %d\n", error);
 
 	snprintf(td->td_name, sizeof(td->td_name), "dom%d", first);
 	vm_pageout_worker((void *)(uintptr_t)first);
 }
 
 /*
  * Perform an advisory wakeup of the page daemon.
  */
 void
 pagedaemon_wakeup(int domain)
 {
 	struct vm_domain *vmd;
 
 	vmd = VM_DOMAIN(domain);
 	vm_domain_pageout_assert_unlocked(vmd);
 	if (curproc == pageproc)
 		return;
 
 	if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
 		vm_domain_pageout_lock(vmd);
 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
 		wakeup(&vmd->vmd_pageout_wanted);
 		vm_domain_pageout_unlock(vmd);
 	}
 }
diff --git a/sys/vm/vm_pagequeue.h b/sys/vm/vm_pagequeue.h
index a9d4c920e5be..268d53a391db 100644
--- a/sys/vm/vm_pagequeue.h
+++ b/sys/vm/vm_pagequeue.h
@@ -1,468 +1,470 @@
 /*-
  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
  *
  * Copyright (c) 1991, 1993
  *	The Regents of the University of California.  All rights reserved.
  *
  * This code is derived from software contributed to Berkeley by
  * The Mach Operating System project at Carnegie-Mellon University.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. Neither the name of the University nor the names of its contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
  *
  *
  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  * All rights reserved.
  *
  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
  *
  * Permission to use, copy, modify and distribute this software and
  * its documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
  *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  *
  * Carnegie Mellon requests users of this software to return to
  *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
  *
  * any improvements or extensions that they make and grant Carnegie the
  * rights to redistribute these changes.
  *
  * $FreeBSD$
  */
 
 #ifndef	_VM_PAGEQUEUE_
 #define	_VM_PAGEQUEUE_
 
 #ifdef _KERNEL
 struct vm_pagequeue {
 	struct mtx	pq_mutex;
 	struct pglist	pq_pl;
 	int		pq_cnt;
 	const char	* const pq_name;
 	uint64_t	pq_pdpages;
 } __aligned(CACHE_LINE_SIZE);
 
 #ifndef VM_BATCHQUEUE_SIZE
-#define	VM_BATCHQUEUE_SIZE	7
+#define	VM_BATCHQUEUE_SIZE	15
 #endif
 
 struct vm_batchqueue {
 	vm_page_t	bq_pa[VM_BATCHQUEUE_SIZE];
 	int		bq_cnt;
 } __aligned(CACHE_LINE_SIZE);
 
 #include <vm/uma.h>
 #include <sys/_blockcount.h>
 #include <sys/pidctrl.h>
 struct sysctl_oid;
 
 /*
  * One vm_domain per NUMA domain.  Contains pagequeues, free page structures,
  * and accounting.
  *
  * Lock Key:
  * f	vmd_free_mtx
  * p	vmd_pageout_mtx
  * d	vm_domainset_lock
  * a	atomic
  * c	const after boot
  * q	page queue lock
  *
  * A unique page daemon thread manages each vm_domain structure and is
  * responsible for ensuring that some free memory is available by freeing
  * inactive pages and aging active pages.  To decide how many pages to process,
  * it uses thresholds derived from the number of pages in the domain:
  *
  *  vmd_page_count
  *       ---
  *        |
  *        |-> vmd_inactive_target (~3%)
  *        |   - The active queue scan target is given by
  *        |     (vmd_inactive_target + vmd_free_target - vmd_free_count).
  *        |
  *        |
  *        |-> vmd_free_target (~2%)
  *        |   - Target for page reclamation.
  *        |
  *        |-> vmd_pageout_wakeup_thresh (~1.8%)
  *        |   - Threshold for waking up the page daemon.
  *        |
  *        |
  *        |-> vmd_free_min (~0.5%)
  *        |   - First low memory threshold.
  *        |   - Causes per-CPU caching to be lazily disabled in UMA.
  *        |   - vm_wait() sleeps below this threshold.
  *        |
  *        |-> vmd_free_severe (~0.25%)
  *        |   - Second low memory threshold.
  *        |   - Triggers aggressive UMA reclamation, disables delayed buffer
  *        |     writes.
  *        |
  *        |-> vmd_free_reserved (~0.13%)
  *        |   - Minimum for VM_ALLOC_NORMAL page allocations.
  *        |-> vmd_pageout_free_min (32 + 2 pages)
  *        |   - Minimum for waking a page daemon thread sleeping in vm_wait().
  *        |-> vmd_interrupt_free_min (2 pages)
  *        |   - Minimum for VM_ALLOC_SYSTEM page allocations.
  *       ---
  *
  *--
  * Free page count regulation:
  *
  * The page daemon attempts to ensure that the free page count is above the free
  * target.  It wakes up periodically (every 100ms) to input the current free
  * page shortage (free_target - free_count) to a PID controller, which in
  * response outputs the number of pages to attempt to reclaim.  The shortage's
  * current magnitude, rate of change, and cumulative value are together used to
  * determine the controller's output.  The page daemon target thus adapts
  * dynamically to the system's demand for free pages, resulting in less
  * burstiness than a simple hysteresis loop.
  *
  * When the free page count drops below the wakeup threshold,
  * vm_domain_allocate() proactively wakes up the page daemon.  This helps ensure
  * that the system responds promptly to a large instantaneous free page
  * shortage.
  *
  * The page daemon also attempts to ensure that some fraction of the system's
  * memory is present in the inactive (I) and laundry (L) page queues, so that it
  * can respond promptly to a sudden free page shortage.  In particular, the page
  * daemon thread aggressively scans active pages so long as the following
  * condition holds:
  *
  *         len(I) + len(L) + free_target - free_count < inactive_target
  *
  * Otherwise, when the inactive target is met, the page daemon periodically
  * scans a small portion of the active queue in order to maintain up-to-date
  * per-page access history.  Unreferenced pages in the active queue thus
  * eventually migrate to the inactive queue.
  *
  * The per-domain laundry thread periodically launders dirty pages based on the
  * number of clean pages freed by the page daemon since the last laundering.  If
  * the page daemon fails to meet its scan target (i.e., the PID controller
  * output) because of a shortage of clean inactive pages, the laundry thread
  * attempts to launder enough pages to meet the free page target.
  *
  *--
  * Page allocation priorities:
  *
  * The system defines three page allocation priorities: VM_ALLOC_NORMAL,
  * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT.  An interrupt-priority allocation can
  * claim any free page.  This priority is used in the pmap layer when attempting
  * to allocate a page for the kernel page tables; in such cases an allocation
  * failure will usually result in a kernel panic.  The system priority is used
  * for most other kernel memory allocations, for instance by UMA's slab
  * allocator or the buffer cache.  Such allocations will fail if the free count
  * is below interrupt_free_min.  All other allocations occur at the normal
  * priority, which is typically used for allocation of user pages, for instance
  * in the page fault handler or when allocating page table pages or pv_entry
  * structures for user pmaps.  Such allocations fail if the free count is below
  * the free_reserved threshold.
  *
  *--
  * Free memory shortages:
  *
  * The system uses the free_min and free_severe thresholds to apply
  * back-pressure and give the page daemon a chance to recover.  When a page
  * allocation fails due to a shortage and the allocating thread cannot handle
  * failure, it may call vm_wait() to sleep until free pages are available.
  * vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises
  * above the free_min threshold; the page daemon and laundry threads are given
  * priority and will wake up once free_count reaches the (much smaller)
  * pageout_free_min threshold.
  *
  * On NUMA systems, the domainset iterators always prefer NUMA domains where the
  * free page count is above the free_min threshold.  This means that given the
  * choice between two NUMA domains, one above the free_min threshold and one
  * below, the former will be used to satisfy the allocation request regardless
  * of the domain selection policy.
  *
  * In addition to reclaiming memory from the page queues, the vm_lowmem event
  * fires every ten seconds so long as the system is under memory pressure (i.e.,
  * vmd_free_count < vmd_free_target).  This allows kernel subsystems to register
  * for notifications of free page shortages, upon which they may shrink their
  * caches.  Following a vm_lowmem event, UMA's caches are pruned to ensure that
  * they do not contain an excess of unused memory.  When a domain is below the
  * free_min threshold, UMA limits the population of per-CPU caches.  When a
  * domain falls below the free_severe threshold, UMA's caches are completely
  * drained.
  *
  * If the system encounters a global memory shortage, it may resort to the
  * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
  * last-ditch attempt to free up some pages.  Either of the two following
  * conditions will activate the OOM killer:
  *
  *  1. The page daemons collectively fail to reclaim any pages during their
  *     inactive queue scans.  After vm_pageout_oom_seq consecutive scans fail,
  *     the page daemon thread votes for an OOM kill, and an OOM kill is
  *     triggered when all page daemons have voted.  This heuristic is strict and
  *     may fail to trigger even when the system is effectively deadlocked.
  *
  *  2. Threads in the user fault handler are repeatedly unable to make progress
  *     while allocating a page to satisfy the fault.  After
  *     vm_pfault_oom_attempts page allocation failures with intervening
  *     vm_wait() calls, the faulting thread will trigger an OOM kill.
  */
 struct vm_domain {
 	struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
 	struct mtx_padalign vmd_free_mtx;
 	struct mtx_padalign vmd_pageout_mtx;
 	struct vm_pgcache {
 		int domain;
 		int pool;
 		uma_zone_t zone;
 	} vmd_pgcache[VM_NFREEPOOL];
 	struct vmem *vmd_kernel_arena;	/* (c) per-domain kva R/W arena. */
 	struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
 	u_int vmd_domain;		/* (c) Domain number. */
 	u_int vmd_page_count;		/* (c) Total page count. */
 	long vmd_segs;			/* (c) bitmask of the segments */
 	u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
 	u_int vmd_pageout_deficit;	/* (a) Estimated number of pages deficit */
 	uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
 
 	/* Paging control variables, used within single threaded page daemon. */
 	struct pidctrl vmd_pid;		/* Pageout controller. */
 	boolean_t vmd_oom;
 	u_int vmd_inactive_threads;
 	u_int vmd_inactive_shortage;		/* Per-thread shortage. */
 	blockcount_t vmd_inactive_running;	/* Number of inactive threads. */
 	blockcount_t vmd_inactive_starting;	/* Number of threads started. */
 	volatile u_int vmd_addl_shortage;	/* Shortage accumulator. */
 	volatile u_int vmd_inactive_freed;	/* Successful inactive frees. */
 	volatile u_int vmd_inactive_us;		/* Microseconds for above. */
 	u_int vmd_inactive_pps;		/* Exponential decay frees/second. */
 	int vmd_oom_seq;
 	int vmd_last_active_scan;
 	struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
 	struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
 	struct vm_page vmd_clock[2]; /* markers for active queue scan */
 
 	int vmd_pageout_wanted;		/* (a, p) pageout daemon wait channel */
 	int vmd_pageout_pages_needed;	/* (d) page daemon waiting for pages? */
 	bool vmd_minset;		/* (d) Are we in vm_min_domains? */
 	bool vmd_severeset;		/* (d) Are we in vm_severe_domains? */
 	enum {
 		VM_LAUNDRY_IDLE = 0,
 		VM_LAUNDRY_BACKGROUND,
 		VM_LAUNDRY_SHORTFALL
 	} vmd_laundry_request;
 
 	/* Paging thresholds and targets. */
 	u_int vmd_clean_pages_freed;	/* (q) accumulator for laundry thread */
 	u_int vmd_background_launder_target; /* (c) */
 	u_int vmd_free_reserved;	/* (c) pages reserved for deadlock */
 	u_int vmd_free_target;		/* (c) pages desired free */
 	u_int vmd_free_min;		/* (c) pages desired free */
 	u_int vmd_inactive_target;	/* (c) pages desired inactive */
 	u_int vmd_pageout_free_min;	/* (c) min pages reserved for kernel */
 	u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
 	u_int vmd_interrupt_free_min;	/* (c) reserved pages for int code */
 	u_int vmd_free_severe;		/* (c) severe page depletion point */
 
 	/* Name for sysctl etc. */
 	struct sysctl_oid *vmd_oid;
 	char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
 } __aligned(CACHE_LINE_SIZE);
 
 extern struct vm_domain vm_dom[MAXMEMDOM];
 
 #define	VM_DOMAIN(n)		(&vm_dom[(n)])
 #define	VM_DOMAIN_EMPTY(n)	(vm_dom[(n)].vmd_page_count == 0)
 
 #define	vm_pagequeue_assert_locked(pq)	mtx_assert(&(pq)->pq_mutex, MA_OWNED)
 #define	vm_pagequeue_lock(pq)		mtx_lock(&(pq)->pq_mutex)
 #define	vm_pagequeue_lockptr(pq)	(&(pq)->pq_mutex)
 #define	vm_pagequeue_trylock(pq)	mtx_trylock(&(pq)->pq_mutex)
 #define	vm_pagequeue_unlock(pq)		mtx_unlock(&(pq)->pq_mutex)
 
 #define	vm_domain_free_assert_locked(n)					\
 	    mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
 #define	vm_domain_free_assert_unlocked(n)				\
 	    mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
 #define	vm_domain_free_lock(d)						\
 	    mtx_lock(vm_domain_free_lockptr((d)))
 #define	vm_domain_free_lockptr(d)					\
 	    (&(d)->vmd_free_mtx)
 #define	vm_domain_free_trylock(d)					\
 	    mtx_trylock(vm_domain_free_lockptr((d)))
 #define	vm_domain_free_unlock(d)					\
 	    mtx_unlock(vm_domain_free_lockptr((d)))
 
 #define	vm_domain_pageout_lockptr(d)					\
 	    (&(d)->vmd_pageout_mtx)
 #define	vm_domain_pageout_assert_locked(n)				\
 	    mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
 #define	vm_domain_pageout_assert_unlocked(n)				\
 	    mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
 #define	vm_domain_pageout_lock(d)					\
 	    mtx_lock(vm_domain_pageout_lockptr((d)))
 #define	vm_domain_pageout_unlock(d)					\
 	    mtx_unlock(vm_domain_pageout_lockptr((d)))
 
 static __inline void
 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
 {
 
 	vm_pagequeue_assert_locked(pq);
 	pq->pq_cnt += addend;
 }
 #define	vm_pagequeue_cnt_inc(pq)	vm_pagequeue_cnt_add((pq), 1)
 #define	vm_pagequeue_cnt_dec(pq)	vm_pagequeue_cnt_add((pq), -1)
 
 static inline void
 vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
 {
 
 	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 	vm_pagequeue_cnt_dec(pq);
 }
 
 static inline void
 vm_batchqueue_init(struct vm_batchqueue *bq)
 {
 
 	bq->bq_cnt = 0;
 }
 
-static inline bool
+static inline int
 vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
 {
+	int slots_free;
 
-	if (bq->bq_cnt < nitems(bq->bq_pa)) {
+	slots_free = nitems(bq->bq_pa) - bq->bq_cnt;
+	if (slots_free > 0) {
 		bq->bq_pa[bq->bq_cnt++] = m;
-		return (true);
+		return (slots_free);
 	}
-	return (false);
+	return (slots_free);
 }
 
 static inline vm_page_t
 vm_batchqueue_pop(struct vm_batchqueue *bq)
 {
 
 	if (bq->bq_cnt == 0)
 		return (NULL);
 	return (bq->bq_pa[--bq->bq_cnt]);
 }
 
 void vm_domain_set(struct vm_domain *vmd);
 void vm_domain_clear(struct vm_domain *vmd);
 int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
 
 /*
  *      vm_pagequeue_domain:
  *
  *      Return the memory domain the page belongs to.
  */
 static inline struct vm_domain *
 vm_pagequeue_domain(vm_page_t m)
 {
 
 	return (VM_DOMAIN(vm_page_domain(m)));
 }
 
 /*
  * Return the number of pages we need to free-up or cache
  * A positive number indicates that we do not have enough free pages.
  */
 static inline int
 vm_paging_target(struct vm_domain *vmd)
 {
 
 	return (vmd->vmd_free_target - vmd->vmd_free_count);
 }
 
 /*
  * Returns TRUE if the pagedaemon needs to be woken up.
  */
 static inline int
 vm_paging_needed(struct vm_domain *vmd, u_int free_count)
 {
 
 	return (free_count < vmd->vmd_pageout_wakeup_thresh);
 }
 
 /*
  * Returns TRUE if the domain is below the min paging target.
  */
 static inline int
 vm_paging_min(struct vm_domain *vmd)
 {
 
         return (vmd->vmd_free_min > vmd->vmd_free_count);
 }
 
 /*
  * Returns TRUE if the domain is below the severe paging target.
  */
 static inline int
 vm_paging_severe(struct vm_domain *vmd)
 {
 
         return (vmd->vmd_free_severe > vmd->vmd_free_count);
 }
 
 /*
  * Return the number of pages we need to launder.
  * A positive number indicates that we have a shortfall of clean pages.
  */
 static inline int
 vm_laundry_target(struct vm_domain *vmd)
 {
 
 	return (vm_paging_target(vmd));
 }
 
 void pagedaemon_wakeup(int domain);
 
 static inline void
 vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
 {
 	u_int old, new;
 
 	old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
 	new = old + adj;
 	/*
 	 * Only update bitsets on transitions.  Notice we short-circuit the
 	 * rest of the checks if we're above min already.
 	 */
 	if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
 	    (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
 	    (old < vmd->vmd_pageout_free_min &&
 	    new >= vmd->vmd_pageout_free_min)))
 		vm_domain_clear(vmd);
 }
 
 #endif	/* _KERNEL */
 #endif				/* !_VM_PAGEQUEUE_ */