Index: sys/amd64/amd64/pmap.c
===================================================================
--- sys/amd64/amd64/pmap.c
+++ sys/amd64/amd64/pmap.c
@@ -2675,7 +2675,7 @@
 			RELEASE_PV_LIST_LOCK(lockp);
 			PMAP_UNLOCK(pmap);
 			PMAP_ASSERT_NOT_IN_DI();
-			VM_WAIT;
+			vm_wait();
 			PMAP_LOCK(pmap);
 		}
 
Index: sys/arm/arm/pmap-v4.c
===================================================================
--- sys/arm/arm/pmap-v4.c
+++ sys/arm/arm/pmap-v4.c
@@ -3248,7 +3248,7 @@
 			if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
 				PMAP_UNLOCK(pmap);
 				rw_wunlock(&pvh_global_lock);
-				VM_WAIT;
+				vm_wait();
 				rw_wlock(&pvh_global_lock);
 				PMAP_LOCK(pmap);
 				goto do_l2b_alloc;
Index: sys/arm/arm/pmap-v6.c
===================================================================
--- sys/arm/arm/pmap-v6.c
+++ sys/arm/arm/pmap-v6.c
@@ -2478,7 +2478,7 @@
 			if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
 				PMAP_UNLOCK(pmap);
 				rw_wunlock(&pvh_global_lock);
-				VM_WAIT;
+				vm_wait();
 				rw_wlock(&pvh_global_lock);
 				PMAP_LOCK(pmap);
 			}
Index: sys/arm/nvidia/drm2/tegra_bo.c
===================================================================
--- sys/arm/nvidia/drm2/tegra_bo.c
+++ sys/arm/nvidia/drm2/tegra_bo.c
@@ -114,7 +114,7 @@
 		if (tries < 3) {
 			if (!vm_page_reclaim_contig(pflags, npages, low, high,
 			    alignment, boundary))
-				VM_WAIT;
+				vm_wait();
 			tries++;
 			goto retry;
 		}
Index: sys/arm64/arm64/pmap.c
===================================================================
--- sys/arm64/arm64/pmap.c
+++ sys/arm64/arm64/pmap.c
@@ -1409,7 +1409,7 @@
 	 */
 	while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
-		VM_WAIT;
+		vm_wait();
 
 	l0phys = VM_PAGE_TO_PHYS(l0pt);
 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys);
@@ -1449,7 +1449,7 @@
 		if (lockp != NULL) {
 			RELEASE_PV_LIST_LOCK(lockp);
 			PMAP_UNLOCK(pmap);
-			VM_WAIT;
+			vm_wait();
 			PMAP_LOCK(pmap);
 		}
 
Index: sys/compat/linuxkpi/common/src/linux_page.c
===================================================================
--- sys/compat/linuxkpi/common/src/linux_page.c
+++ sys/compat/linuxkpi/common/src/linux_page.c
@@ -101,7 +101,7 @@
 				if (flags & M_WAITOK) {
 					if (!vm_page_reclaim_contig(req,
 					    npages, 0, pmax, PAGE_SIZE, 0)) {
-						VM_WAIT;
+						vm_wait();
 					}
 					flags &= ~M_WAITOK;
 					goto retry;
Index: sys/dev/drm2/i915/i915_gem.c
===================================================================
--- sys/dev/drm2/i915/i915_gem.c
+++ sys/dev/drm2/i915/i915_gem.c
@@ -1561,7 +1561,7 @@
 		i915_gem_object_unpin(obj);
 		DRM_UNLOCK(dev);
 		VM_OBJECT_WUNLOCK(vm_obj);
-		VM_WAIT;
+		vm_wait_for_obj(vm_obj);
 		goto retry;
 	}
 	page->valid = VM_PAGE_BITS_ALL;
Index: sys/dev/drm2/i915/i915_gem_gtt.c
===================================================================
--- sys/dev/drm2/i915/i915_gem_gtt.c
+++ sys/dev/drm2/i915/i915_gem_gtt.c
@@ -589,7 +589,7 @@
 		if (tries < 1) {
 			if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff,
 			    PAGE_SIZE, 0))
-				VM_WAIT;
+				vm_wait();
 			tries++;
 			goto retry;
 		}
Index: sys/dev/drm2/ttm/ttm_bo_vm.c
===================================================================
--- sys/dev/drm2/ttm/ttm_bo_vm.c
+++ sys/dev/drm2/ttm/ttm_bo_vm.c
@@ -246,7 +246,7 @@
 	if (m1 == NULL) {
 		if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
 			VM_OBJECT_WUNLOCK(vm_obj);
-			VM_WAIT;
+			vm_wait_for_obj(vm_obj);
 			VM_OBJECT_WLOCK(vm_obj);
 			ttm_mem_io_unlock(man);
 			ttm_bo_unreserve(bo);
Index: sys/dev/drm2/ttm/ttm_page_alloc.c
===================================================================
--- sys/dev/drm2/ttm/ttm_page_alloc.c
+++ sys/dev/drm2/ttm/ttm_page_alloc.c
@@ -168,7 +168,7 @@
 			return (p);
 		if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff,
 		    PAGE_SIZE, 0))
-			VM_WAIT;
+			vm_wait();
 	}
 }
 
@@ -181,7 +181,7 @@
 		p = vm_page_alloc(NULL, 0, req);
 		if (p != NULL)
 			break;
-		VM_WAIT;
+		vm_wait();
 	}
 	pmap_page_set_memattr(p, memattr);
 	return (p);
Index: sys/i386/i386/pmap.c
===================================================================
--- sys/i386/i386/pmap.c
+++ sys/i386/i386/pmap.c
@@ -1893,10 +1893,9 @@
 		m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
 		    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
 		if (m == NULL)
-			VM_WAIT;
-		else {
+			vm_wait();
+		else
 			ptdpg[i++] = m;
-		}
 	}
 
 	pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
@@ -1945,7 +1944,7 @@
 		if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
 			PMAP_UNLOCK(pmap);
 			rw_wunlock(&pvh_global_lock);
-			VM_WAIT;
+			vm_wait();
 			rw_wlock(&pvh_global_lock);
 			PMAP_LOCK(pmap);
 		}
Index: sys/mips/mips/pmap.c
===================================================================
--- sys/mips/mips/pmap.c
+++ sys/mips/mips/pmap.c
@@ -1050,11 +1050,11 @@
 {
 
 #ifdef __mips_n64
-	VM_WAIT;
+	vm_wait();
 #else
 	if (!vm_page_reclaim_contig(req, 1, 0, MIPS_KSEG0_LARGEST_PHYS,
 	    PAGE_SIZE, 0))
-		VM_WAIT;
+		vm_wait();
 #endif
 }
 
Index: sys/mips/mips/uma_machdep.c
===================================================================
--- sys/mips/mips/uma_machdep.c
+++ sys/mips/mips/uma_machdep.c
@@ -67,13 +67,11 @@
 		    0, MIPS_KSEG0_LARGEST_PHYS, PAGE_SIZE, 0))
 			continue;
 #endif
-		if (m == NULL) {
-			if (wait & M_NOWAIT)
-				return (NULL);
-			else
-				VM_WAIT;
-		} else
+		if (m != NULL)
 			break;
+		if ((wait & M_NOWAIT) != 0)
+			return (NULL);
+		vm_wait();
 	}
 
 	pa = VM_PAGE_TO_PHYS(m);
Index: sys/powerpc/aim/mmu_oea.c
===================================================================
--- sys/powerpc/aim/mmu_oea.c
+++ sys/powerpc/aim/mmu_oea.c
@@ -1124,7 +1124,7 @@
 		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
 			return (KERN_RESOURCE_SHORTAGE);
 		VM_OBJECT_ASSERT_UNLOCKED(m->object);
-		VM_WAIT;
+		vm_wait();
 	}
 }
 
Index: sys/powerpc/aim/mmu_oea64.c
===================================================================
--- sys/powerpc/aim/mmu_oea64.c
+++ sys/powerpc/aim/mmu_oea64.c
@@ -1383,7 +1383,7 @@
 		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
 			return (KERN_RESOURCE_SHORTAGE);
 		VM_OBJECT_ASSERT_UNLOCKED(m->object);
-		VM_WAIT;
+		vm_wait();
 	}
 
 	/*
Index: sys/powerpc/booke/pmap.c
===================================================================
--- sys/powerpc/booke/pmap.c
+++ sys/powerpc/booke/pmap.c
@@ -789,7 +789,7 @@
 				vm_wire_sub(i);
 				return (NULL);
 			}
-			VM_WAIT;
+			vm_wait();
 			rw_wlock(&pvh_global_lock);
 			PMAP_LOCK(pmap);
 		}
@@ -1033,7 +1033,7 @@
 				vm_wire_sub(i);
 				return (NULL);
 			}
-			VM_WAIT;
+			vm_wait();
 			rw_wlock(&pvh_global_lock);
 			PMAP_LOCK(pmap);
 		}
@@ -1346,7 +1346,7 @@
 		req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
 		while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
 			PMAP_UNLOCK(pmap);
-			VM_WAIT;
+			vm_wait();
 			PMAP_LOCK(pmap);
 		}
 		mtbl[i] = m;
Index: sys/riscv/riscv/pmap.c
===================================================================
--- sys/riscv/riscv/pmap.c
+++ sys/riscv/riscv/pmap.c
@@ -1203,7 +1203,7 @@
 	 */
 	while ((l1pt = vm_page_alloc(NULL, 0xdeadbeef, VM_ALLOC_NORMAL |
 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
-		VM_WAIT;
+		vm_wait();
 
 	l1phys = VM_PAGE_TO_PHYS(l1pt);
 	pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys);
@@ -1252,7 +1252,7 @@
 			RELEASE_PV_LIST_LOCK(lockp);
 			PMAP_UNLOCK(pmap);
 			rw_runlock(&pvh_global_lock);
-			VM_WAIT;
+			vm_wait();
 			rw_rlock(&pvh_global_lock);
 			PMAP_LOCK(pmap);
 		}
Index: sys/vm/vm_fault.c
===================================================================
--- sys/vm/vm_fault.c
+++ sys/vm/vm_fault.c
@@ -787,7 +787,7 @@
 			}
 			if (fs.m == NULL) {
 				unlock_and_deallocate(&fs);
-				VM_WAITPFAULT;
+				vm_waitpfault();
 				goto RetryFault;
 			}
 		}
@@ -1685,7 +1685,7 @@
 			if (dst_m == NULL) {
 				VM_OBJECT_WUNLOCK(dst_object);
 				VM_OBJECT_RUNLOCK(object);
-				VM_WAIT;
+				vm_wait_for_obj(dst_object);
 				VM_OBJECT_WLOCK(dst_object);
 				goto again;
 			}
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -2567,7 +2567,7 @@
  *	Returns true if reclamation is successful and false otherwise.  Since
  *	relocation requires the allocation of physical pages, reclamation may
  *	fail due to a shortage of free pages.  When reclamation fails, callers
- *	are expected to perform VM_WAIT before retrying a failed allocation
+ *	are expected to perform vm_wait() before retrying a failed allocation
  *	operation, e.g., vm_page_alloc_contig().
  *
  *	The caller must always specify an allocation class through "req".
@@ -2804,13 +2804,13 @@
 }
 
 /*
- *	vm_wait:	(also see VM_WAIT macro)
+ *	vm_wait:
  *
  *	Sleep until free pages are available for allocation.
  *	- Called in various places after failed memory allocations.
  */
-void
-vm_wait(void)
+static void
+vm_wait_doms(const domainset_t *wdoms)
 {
 
 	/*
@@ -2834,7 +2834,7 @@
 		 * consume all freed pages while old allocators wait.
 		 */
 		mtx_lock(&vm_domainset_lock);
-		if (vm_page_count_min()) {
+		if (DOMAINSET_SUBSET(&vm_min_domains, wdoms)) {
 			vm_min_waiters++;
 			msleep(&vm_min_domains, &vm_domainset_lock, PVM,
 			    "vmwait", 0);
@@ -2843,6 +2843,23 @@
 	}
 }
 
+void
+vm_wait(void)
+{
+
+	vm_wait_for_obj(NULL);
+}
+
+void
+vm_wait_for_obj(vm_object_t obj)
+{
+	struct domainset_ref *ds;
+
+	ds = obj == NULL || obj->domain.dr_policy == NULL ?
+	    &curthread->td_domain : &obj->domain;
+	vm_wait_doms(&ds->dr_policy->ds_mask);
+}
+
 /*
  *	vm_domain_alloc_fail:
  *
@@ -2877,7 +2894,7 @@
 }
 
 /*
- *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
+ *	vm_waitpfault:
  *
  *	Sleep until free pages are available for allocation.
  *	- Called only in vm_fault so that processes page faulting
Index: sys/vm/vm_pageout.h
===================================================================
--- sys/vm/vm_pageout.h
+++ sys/vm/vm_pageout.h
@@ -95,9 +95,8 @@
 
 void pagedaemon_wait(int domain, int pri, const char *wmesg);
 void pagedaemon_wakeup(int domain);
-#define VM_WAIT vm_wait()
-#define VM_WAITPFAULT vm_waitpfault()
 void vm_wait(void);
+void vm_wait_for_obj(vm_object_t obj);
 void vm_waitpfault(void);
 void vm_wait_domain(int domain);
 void vm_wait_min(void);
Index: sys/vm/vm_pageout.c
===================================================================
--- sys/vm/vm_pageout.c
+++ sys/vm/vm_pageout.c
@@ -1802,7 +1802,7 @@
 		 * them.  However, in the following case, this wakeup serves
 		 * to bound the amount of time that a thread might wait.
 		 * Suppose a thread's call to vm_page_alloc() fails, but
-		 * before that thread calls VM_WAIT, enough pages are freed by
+		 * before that thread calls vm_wait(), enough pages are freed by
 		 * other threads to alleviate the free page shortage.  The
 		 * thread will, nonetheless, wait until another page is freed
 		 * or this wakeup is performed.
@@ -1840,7 +1840,7 @@
 			pass++;
 		} else {
 			/*
-			 * Yes.  If threads are still sleeping in VM_WAIT
+			 * Yes.  If threads are still sleeping in vm_wait()
 			 * then we immediately start a new scan.  Otherwise,
 			 * sleep until the next wakeup or until pages need to
 			 * have their reference stats updated.