Index: head/sys/amd64/amd64/efirt_machdep.c =================================================================== --- head/sys/amd64/amd64/efirt_machdep.c +++ head/sys/amd64/amd64/efirt_machdep.c @@ -74,8 +74,7 @@ VM_OBJECT_RLOCK(obj_1t1_pt); TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq) m->wire_count = 0; - atomic_subtract_int(&vm_cnt.v_wire_count, - obj_1t1_pt->resident_page_count); + vm_wire_sub(obj_1t1_pt->resident_page_count); VM_OBJECT_RUNLOCK(obj_1t1_pt); vm_object_deallocate(obj_1t1_pt); } Index: head/sys/amd64/amd64/pmap.c =================================================================== --- head/sys/amd64/amd64/pmap.c +++ head/sys/amd64/amd64/pmap.c @@ -1246,7 +1246,7 @@ mpte->phys_addr = KPTphys + (i << PAGE_SHIFT); mpte->wire_count = 1; } - atomic_add_int(&vm_cnt.v_wire_count, nkpt); + vm_wire_add(nkpt); /* * If the kernel is running on a virtual machine, then it must assume @@ -2381,7 +2381,7 @@ /* Preserve the page's PG_ZERO setting. */ vm_page_free_toq(m); } - atomic_subtract_int(&vm_cnt.v_wire_count, count); + vm_wire_sub(count); } /* Index: head/sys/arm/arm/pmap-v6.c =================================================================== --- head/sys/arm/arm/pmap-v6.c +++ head/sys/arm/arm/pmap-v6.c @@ -2634,11 +2634,12 @@ pmap->pm_stats.resident_count--; /* - * This is a release store so that the ordinary store unmapping + * This barrier is so that the ordinary store unmapping * the L2 page table page is globally performed before TLB shoot- * down is begun. */ - atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1); + wmb(); + vm_wire_sub(1); } /* @@ -2945,7 +2946,7 @@ SLIST_REMOVE_HEAD(&free, plinks.s.ss); /* Recycle a freed page table page. */ m_pc->wire_count = 1; - atomic_add_int(&vm_cnt.v_wire_count, 1); + vm_wire_add(1); } pmap_free_zero_pages(&free); return (m_pc); Index: head/sys/arm64/arm64/efirt_machdep.c =================================================================== --- head/sys/arm64/arm64/efirt_machdep.c +++ head/sys/arm64/arm64/efirt_machdep.c @@ -75,8 +75,7 @@ VM_OBJECT_RLOCK(obj_1t1_pt); TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq) m->wire_count = 0; - atomic_subtract_int(&vm_cnt.v_wire_count, - obj_1t1_pt->resident_page_count); + vm_wire_sub(obj_1t1_pt->resident_page_count); VM_OBJECT_RUNLOCK(obj_1t1_pt); vm_object_deallocate(obj_1t1_pt); } Index: head/sys/arm64/arm64/pmap.c =================================================================== --- head/sys/arm64/arm64/pmap.c +++ head/sys/arm64/arm64/pmap.c @@ -1362,7 +1362,7 @@ } pmap_invalidate_page(pmap, va); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_wire_sub(1); /* * Put page on a list so that it is released after @@ -1907,7 +1907,7 @@ SLIST_REMOVE_HEAD(&free, plinks.s.ss); /* Recycle a freed page table page. */ m_pc->wire_count = 1; - atomic_add_int(&vm_cnt.v_wire_count, 1); + vm_wire_add(1); } pmap_free_zero_pages(&free); return (m_pc); @@ -1958,7 +1958,7 @@ /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); - vm_page_unwire(m, PQ_NONE); + vm_page_unwire_noq(m); vm_page_free(m); } @@ -2264,9 +2264,9 @@ pmap_resident_count_dec(pmap, 1); KASSERT(ml3->wire_count == NL3PG, ("pmap_remove_pages: l3 page wire count error")); - ml3->wire_count = 0; + ml3->wire_count = 1; + vm_page_unwire_noq(ml3); pmap_add_delayed_free_list(ml3, free, FALSE); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); } return (pmap_unuse_pt(pmap, sva, l1e, free)); } @@ -3711,11 +3711,10 @@ pmap_resident_count_dec(pmap,1); KASSERT(ml3->wire_count == NL3PG, ("pmap_remove_pages: l3 page wire count error")); - ml3->wire_count = 0; + ml3->wire_count = 1; + vm_page_unwire_noq(ml3); pmap_add_delayed_free_list(ml3, &free, FALSE); - atomic_subtract_int( - &vm_cnt.v_wire_count, 1); } break; case 2: Index: head/sys/compat/linprocfs/linprocfs.c =================================================================== --- head/sys/compat/linprocfs/linprocfs.c +++ head/sys/compat/linprocfs/linprocfs.c @@ -163,7 +163,7 @@ * is very little memory left, so we cheat and tell them that * all memory that isn't wired down is free. */ - memused = vm_cnt.v_wire_count * PAGE_SIZE; + memused = vm_wire_count() * PAGE_SIZE; memfree = memtotal - memused; swap_pager_status(&i, &j); swaptotal = (unsigned long long)i * PAGE_SIZE; Index: head/sys/compat/linux/linux_misc.c =================================================================== --- head/sys/compat/linux/linux_misc.c +++ head/sys/compat/linux/linux_misc.c @@ -165,7 +165,7 @@ LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; sysinfo.totalram = physmem * PAGE_SIZE; - sysinfo.freeram = sysinfo.totalram - vm_cnt.v_wire_count * PAGE_SIZE; + sysinfo.freeram = sysinfo.totalram - vm_wire_count() * PAGE_SIZE; sysinfo.sharedram = 0; mtx_lock(&vm_object_list_mtx); Index: head/sys/i386/i386/pmap.c =================================================================== --- head/sys/i386/i386/pmap.c +++ head/sys/i386/i386/pmap.c @@ -1718,7 +1718,7 @@ /* Preserve the page's PG_ZERO setting. */ vm_page_free_toq(m); } - atomic_subtract_int(&vm_cnt.v_wire_count, count); + vm_wire_sub(count); } /* Index: head/sys/kern/kern_mib.c =================================================================== --- head/sys/kern/kern_mib.c +++ head/sys/kern/kern_mib.c @@ -206,7 +206,7 @@ { u_long val; - val = ctob(physmem - vm_cnt.v_wire_count); + val = ctob(physmem - vm_wire_count()); return (sysctl_handle_long(oidp, &val, 0, req)); } Index: head/sys/kern/subr_pcpu.c =================================================================== --- head/sys/kern/subr_pcpu.c +++ head/sys/kern/subr_pcpu.c @@ -151,7 +151,7 @@ pcpu_zone_ptr = uma_zcreate("ptr pcpu", sizeof(void *), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU); } -SYSINIT(pcpu_zones, SI_SUB_KMEM, SI_ORDER_ANY, pcpu_zones_startup, NULL); +SYSINIT(pcpu_zones, SI_SUB_VM, SI_ORDER_ANY, pcpu_zones_startup, NULL); /* * First-fit extent based allocator for allocating space in the per-cpu Index: head/sys/kern/vfs_bio.c =================================================================== --- head/sys/kern/vfs_bio.c +++ head/sys/kern/vfs_bio.c @@ -4552,7 +4552,7 @@ p->wire_count--; vm_page_free(p); } - atomic_subtract_int(&vm_cnt.v_wire_count, bp->b_npages - newnpages); + vm_wire_sub(bp->b_npages - newnpages); bp->b_npages = newnpages; } Index: head/sys/mips/mips/pmap.c =================================================================== --- head/sys/mips/mips/pmap.c +++ head/sys/mips/mips/pmap.c @@ -1009,7 +1009,7 @@ * If the page is finally unwired, simply free it. */ vm_page_free_zero(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_wire_sub(1); } /* Index: head/sys/powerpc/booke/pmap.c =================================================================== --- head/sys/powerpc/booke/pmap.c +++ head/sys/powerpc/booke/pmap.c @@ -681,7 +681,7 @@ pa = pte_vatopa(mmu, kernel_pmap, va); m = PHYS_TO_VM_PAGE(pa); vm_page_free_zero(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_wire_sub(1); pmap_kremove(va); } @@ -786,7 +786,7 @@ ptbl_free_pmap_ptbl(pmap, ptbl); for (j = 0; j < i; j++) vm_page_free(mtbl[j]); - atomic_subtract_int(&vm_cnt.v_wire_count, i); + vm_wire_sub(i); return (NULL); } VM_WAIT; @@ -828,7 +828,7 @@ pa = pte_vatopa(mmu, kernel_pmap, va); m = PHYS_TO_VM_PAGE(pa); vm_page_free_zero(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_wire_sub(1); pmap_kremove(va); } @@ -1030,7 +1030,7 @@ ptbl_free_pmap_ptbl(pmap, ptbl); for (j = 0; j < i; j++) vm_page_free(mtbl[j]); - atomic_subtract_int(&vm_cnt.v_wire_count, i); + vm_wire_sub(i); return (NULL); } VM_WAIT; @@ -1091,7 +1091,7 @@ pa = pte_vatopa(mmu, kernel_pmap, va); m = PHYS_TO_VM_PAGE(pa); vm_page_free_zero(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_wire_sub(1); mmu_booke_kremove(mmu, va); } Index: head/sys/riscv/riscv/pmap.c =================================================================== --- head/sys/riscv/riscv/pmap.c +++ head/sys/riscv/riscv/pmap.c @@ -1153,7 +1153,7 @@ } pmap_invalidate_page(pmap, va); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_wire_sub(1); /* * Put page on a list so that it is released after Index: head/sys/sparc64/sparc64/pmap.c =================================================================== --- head/sys/sparc64/sparc64/pmap.c +++ head/sys/sparc64/sparc64/pmap.c @@ -1308,8 +1308,7 @@ while (!TAILQ_EMPTY(&obj->memq)) { m = TAILQ_FIRST(&obj->memq); m->md.pmap = NULL; - m->wire_count--; - atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_page_unwire_noq(m); vm_page_free_zero(m); } VM_OBJECT_WUNLOCK(obj); Index: head/sys/sys/vmmeter.h =================================================================== --- head/sys/sys/vmmeter.h +++ head/sys/sys/vmmeter.h @@ -125,6 +125,7 @@ counter_u64_t v_vforkpages; /* (p) pages affected by vfork() */ counter_u64_t v_rforkpages; /* (p) pages affected by rfork() */ counter_u64_t v_kthreadpages; /* (p) ... and by kernel fork() */ + counter_u64_t v_wire_count; /* (p) pages wired down */ #define VM_METER_NCOUNTERS \ (offsetof(struct vmmeter, v_page_size) / sizeof(counter_u64_t)) /* @@ -139,7 +140,6 @@ u_int v_pageout_free_min; /* (c) min pages reserved for kernel */ u_int v_interrupt_free_min; /* (c) reserved pages for int code */ u_int v_free_severe; /* (c) severe page depletion point */ - u_int v_wire_count VMMETER_ALIGNED; /* (a) pages wired down */ }; #endif /* _KERNEL || _WANT_VMMETER */ @@ -155,7 +155,27 @@ #define VM_CNT_INC(var) VM_CNT_ADD(var, 1) #define VM_CNT_FETCH(var) counter_u64_fetch(vm_cnt.var) +static inline void +vm_wire_add(int cnt) +{ + + VM_CNT_ADD(v_wire_count, cnt); +} + +static inline void +vm_wire_sub(int cnt) +{ + + VM_CNT_ADD(v_wire_count, -cnt); +} + u_int vm_free_count(void); +static inline u_int +vm_wire_count(void) +{ + + return (VM_CNT_FETCH(v_wire_count)); +} /* * Return TRUE if we are under our severe low-free-pages threshold Index: head/sys/vm/swap_pager.c =================================================================== --- head/sys/vm/swap_pager.c +++ head/sys/vm/swap_pager.c @@ -209,7 +209,8 @@ mtx_lock(&sw_dev_mtx); r = swap_reserved + incr; if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) { - s = vm_cnt.v_page_count - vm_cnt.v_free_reserved - vm_cnt.v_wire_count; + s = vm_cnt.v_page_count - vm_cnt.v_free_reserved - + vm_wire_count(); s *= PAGE_SIZE; } else s = 0; Index: head/sys/vm/vm_glue.c =================================================================== --- head/sys/vm/vm_glue.c +++ head/sys/vm/vm_glue.c @@ -191,7 +191,7 @@ * Also, the sysctl code, which is the only present user * of vslock(), does a hard loop on EAGAIN. */ - if (npages + vm_cnt.v_wire_count > vm_page_max_wired) + if (npages + vm_wire_count() > vm_page_max_wired) return (EAGAIN); #endif error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, Index: head/sys/vm/vm_meter.c =================================================================== --- head/sys/vm/vm_meter.c +++ head/sys/vm/vm_meter.c @@ -96,6 +96,7 @@ .v_vforkpages = EARLY_COUNTER, .v_rforkpages = EARLY_COUNTER, .v_kthreadpages = EARLY_COUNTER, + .v_wire_count = EARLY_COUNTER, }; static void @@ -105,7 +106,7 @@ COUNTER_ARRAY_ALLOC(cnt, VM_METER_NCOUNTERS, M_WAITOK); } -SYSINIT(counter, SI_SUB_CPU, SI_ORDER_FOURTH + 1, vmcounter_startup, NULL); +SYSINIT(counter, SI_SUB_KMEM, SI_ORDER_FIRST, vmcounter_startup, NULL); SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min, CTLFLAG_RW, &vm_cnt.v_free_min, 0, "Minimum low-free-pages threshold"); @@ -403,7 +404,7 @@ VM_STATS_UINT(v_free_target, "Pages desired free"); VM_STATS_UINT(v_free_min, "Minimum low-free-pages threshold"); VM_STATS_PROC(v_free_count, "Free pages", vm_free_count); -VM_STATS_UINT(v_wire_count, "Wired pages"); +VM_STATS_PROC(v_wire_count, "Wired pages", vm_wire_count); VM_STATS_PROC(v_active_count, "Active pages", vm_active_count); VM_STATS_UINT(v_inactive_target, "Desired inactive pages"); VM_STATS_PROC(v_inactive_count, "Inactive pages", vm_inactive_count); Index: head/sys/vm/vm_mmap.c =================================================================== --- head/sys/vm/vm_mmap.c +++ head/sys/vm/vm_mmap.c @@ -1002,7 +1002,7 @@ return (ENOMEM); } PROC_UNLOCK(proc); - if (npages + vm_cnt.v_wire_count > vm_page_max_wired) + if (npages + vm_wire_count() > vm_page_max_wired) return (EAGAIN); #ifdef RACCT if (racct_enable) { Index: head/sys/vm/vm_page.c =================================================================== --- head/sys/vm/vm_page.c +++ head/sys/vm/vm_page.c @@ -1796,7 +1796,7 @@ * The page lock is not required for wiring a page until that * page is inserted into the object. */ - atomic_add_int(&vm_cnt.v_wire_count, 1); + vm_wire_add(1); m->wire_count = 1; } m->act_count = 0; @@ -1805,7 +1805,7 @@ if (vm_page_insert_after(m, object, pindex, mpred)) { pagedaemon_wakeup(domain); if (req & VM_ALLOC_WIRED) { - atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_wire_sub(1); m->wire_count = 0; } KASSERT(m->object == NULL, ("page %p has object", m)); @@ -1989,7 +1989,7 @@ if ((req & VM_ALLOC_SBUSY) != 0) busy_lock = VPB_SHARERS_WORD(1); if ((req & VM_ALLOC_WIRED) != 0) - atomic_add_int(&vm_cnt.v_wire_count, npages); + vm_wire_add(npages); if (object != NULL) { if (object->memattr != VM_MEMATTR_DEFAULT && memattr == VM_MEMATTR_DEFAULT) @@ -2007,8 +2007,7 @@ if (vm_page_insert_after(m, object, pindex, mpred)) { pagedaemon_wakeup(domain); if ((req & VM_ALLOC_WIRED) != 0) - atomic_subtract_int( - &vm_cnt.v_wire_count, npages); + vm_wire_sub(npages); KASSERT(m->object == NULL, ("page %p has object", m)); mpred = m; @@ -2133,7 +2132,7 @@ * The page lock is not required for wiring a page that does * not belong to an object. */ - atomic_add_int(&vm_cnt.v_wire_count, 1); + vm_wire_add(1); m->wire_count = 1; } /* Unmanaged pages don't use "act_count". */ @@ -3256,7 +3255,7 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0 || m->queue == PQ_NONE, ("vm_page_wire: unmanaged page %p is queued", m)); - atomic_add_int(&vm_cnt.v_wire_count, 1); + vm_wire_add(1); } m->wire_count++; KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); @@ -3331,7 +3330,7 @@ panic("vm_page_unwire: page %p's wire count is zero", m); m->wire_count--; if (m->wire_count == 0) { - atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_wire_sub(1); return (true); } else return (false); @@ -4157,7 +4156,7 @@ db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count()); db_printf("vm_cnt.v_active_count: %d\n", vm_active_count()); db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count()); - db_printf("vm_cnt.v_wire_count: %d\n", vm_cnt.v_wire_count); + db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count()); db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);