Index: sys/vm/memguard.c =================================================================== --- sys/vm/memguard.c +++ sys/vm/memguard.c @@ -262,7 +262,7 @@ if (pa == 0) panic("MemGuard detected double-free of %p", (void *)va); p = PHYS_TO_VM_PAGE(pa); - KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, + KASSERT(vm_page_wired(p) && p->queue == PQ_NONE, ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); return (&p->plinks.memguard.p); } @@ -277,7 +277,7 @@ if (pa == 0) panic("MemGuard detected double-free of %p", (void *)va); p = PHYS_TO_VM_PAGE(pa); - KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, + KASSERT(vm_page_wired(p) && p->queue == PQ_NONE, ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); return (&p->plinks.memguard.v); } Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c +++ sys/vm/swap_pager.c @@ -1679,7 +1679,7 @@ vm_page_dirty(m); #ifdef INVARIANTS vm_page_lock(m); - if (m->wire_count == 0 && m->queue == PQ_NONE) + if (!vm_page_wired(m) && m->queue == PQ_NONE) panic("page %p is neither wired nor queued", m); vm_page_unlock(m); #endif Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -1006,7 +1006,7 @@ */ if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { vm_page_lock(fs.m); - if (fs.m->wire_count == 0) + if (!vm_page_wired(fs.m)) vm_page_free(fs.m); else vm_page_xunbusy_maybelocked(fs.m); @@ -1029,7 +1029,7 @@ */ if (fs.object != fs.first_object) { vm_page_lock(fs.m); - if (fs.m->wire_count == 0) + if (!vm_page_wired(fs.m)) vm_page_free(fs.m); else vm_page_xunbusy_maybelocked(fs.m); @@ -1818,7 +1818,7 @@ vm_page_wire(dst_m); vm_page_unlock(dst_m); } else { - KASSERT(dst_m->wire_count > 0, + KASSERT(vm_page_wired(dst_m), ("dst_m %p is not wired", dst_m)); } } else { Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -720,7 +720,7 @@ */ vm_page_change_lock(p, &mtx); p->object = NULL; - if (p->wire_count != 0) + if (vm_page_wired(p)) continue; VM_CNT_INC(v_pfree); vm_page_free(p); @@ -1595,7 +1595,7 @@ vm_page_lock(p); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); - if (p->wire_count == 0) + if (!vm_page_wired(p)) vm_page_free(p); else vm_page_remove(p); @@ -1639,7 +1639,7 @@ vm_page_lock(p); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); - if (p->wire_count == 0) + if (!vm_page_wired(p)) vm_page_free(p); else vm_page_remove(p); @@ -1944,7 +1944,7 @@ VM_OBJECT_WLOCK(object); goto again; } - if (p->wire_count != 0) { + if (vm_page_wired(p)) { if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0) pmap_remove_all(p); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -2577,7 +2577,7 @@ error = ENOMEM; goto unlock; } - KASSERT(m_new->wire_count == 0, + KASSERT(!vm_page_wired(m_new), ("page %p is wired", m_new)); /* @@ -3404,7 +3404,7 @@ vm_page_assert_locked(m); - if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0) + if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) return; if (vm_page_queue(m) == PQ_ACTIVE) { if (m->act_count < ACT_INIT) @@ -3479,7 +3479,7 @@ m->valid = 0; vm_page_undirty(m); - if (m->wire_count != 0) + if (vm_page_wired(m)) panic("vm_page_free_prep: freeing wired page %p", m); /* @@ -3573,7 +3573,7 @@ m)); return; } - if (m->wire_count == 0) { + if (!vm_page_wired(m)) { KASSERT((m->oflags & VPO_UNMANAGED) == 0 || m->queue == PQ_NONE, ("vm_page_wire: unmanaged page %p is queued", m)); @@ -3651,7 +3651,7 @@ ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); return (false); } - if (m->wire_count == 0) + if (!vm_page_wired(m)) panic("vm_page_unwire: page %p's wire count is zero", m); m->wire_count--; if (m->wire_count == 0) { @@ -3673,7 +3673,7 @@ vm_page_assert_locked(m); - if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0) + if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) return; if (!vm_page_inactive(m)) { @@ -3697,7 +3697,7 @@ vm_page_assert_locked(m); - if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0) + if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) return; if (!vm_page_inactive(m)) { @@ -3719,7 +3719,7 @@ { vm_page_assert_locked(m); - if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0) + if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) return; if (vm_page_in_laundry(m)) @@ -3740,7 +3740,7 @@ { vm_page_assert_locked(m); - KASSERT(m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0, + KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0, ("page %p already unswappable", m)); vm_page_dequeue(m); Index: sys/vm/vm_pageout.c =================================================================== --- sys/vm/vm_pageout.c +++ sys/vm/vm_pageout.c @@ -750,7 +750,7 @@ * from the queue now to avoid needless revisits during * future scans. */ - if (m->wire_count != 0) { + if (vm_page_wired(m)) { vm_page_dequeue_deferred(m); continue; } @@ -1199,7 +1199,7 @@ /* * Wired pages are dequeued lazily. */ - if (m->wire_count != 0) { + if (vm_page_wired(m)) { vm_page_dequeue_deferred(m); continue; } @@ -1418,7 +1418,7 @@ * from the queue now to avoid needless revisits during * future scans. */ - if (m->wire_count != 0) { + if (vm_page_wired(m)) { vm_page_dequeue_deferred(m); continue; }