Index: head/sys/kern/kern_sendfile.c =================================================================== --- head/sys/kern/kern_sendfile.c +++ head/sys/kern/kern_sendfile.c @@ -163,7 +163,7 @@ */ if (nocache) vm_page_deactivate_noreuse(pg); - else if (pg->queue == PQ_ACTIVE) + else if (vm_page_active(pg)) vm_page_reference(pg); else vm_page_deactivate(pg); Index: head/sys/kern/vfs_bio.c =================================================================== --- head/sys/kern/vfs_bio.c +++ head/sys/kern/vfs_bio.c @@ -2933,7 +2933,7 @@ */ if (m->valid == 0 || (bp->b_flags & B_NOREUSE) != 0) vm_page_deactivate_noreuse(m); - else if (m->queue == PQ_ACTIVE) + else if (vm_page_active(m)) vm_page_reference(m); else vm_page_deactivate(m); Index: head/sys/vm/vm_object.c =================================================================== --- head/sys/vm/vm_object.c +++ head/sys/vm/vm_object.c @@ -2379,9 +2379,9 @@ * sysctl is only meant to give an * approximation of the system anyway. */ - if (vm_page_active(m)) + if (m->queue == PQ_ACTIVE) kvo->kvo_active++; - else if (vm_page_inactive(m)) + else if (m->queue == PQ_INACTIVE) kvo->kvo_inactive++; } Index: head/sys/vm/vm_page.h =================================================================== --- head/sys/vm/vm_page.h +++ head/sys/vm/vm_page.h @@ -785,43 +785,45 @@ (void)mret; } -static inline bool -vm_page_active(vm_page_t m) +/* + * vm_page_queue: + * + * Return the index of the queue containing m. This index is guaranteed + * not to change while the page lock is held. + */ +static inline uint8_t +vm_page_queue(vm_page_t m) { - return (m->queue == PQ_ACTIVE); + vm_page_assert_locked(m); + + if ((m->aflags & PGA_DEQUEUE) != 0) + return (PQ_NONE); + atomic_thread_fence_acq(); + return (m->queue); } static inline bool -vm_page_inactive(vm_page_t m) +vm_page_active(vm_page_t m) { - return (m->queue == PQ_INACTIVE); + return (vm_page_queue(m) == PQ_ACTIVE); } static inline bool -vm_page_in_laundry(vm_page_t m) +vm_page_inactive(vm_page_t m) { - return (m->queue == PQ_LAUNDRY || m->queue == PQ_UNSWAPPABLE); + return (vm_page_queue(m) == PQ_INACTIVE); } -/* - * vm_page_enqueued: - * - * Return true if the page is logically enqueued and no deferred - * dequeue is pending. - */ static inline bool -vm_page_enqueued(vm_page_t m) +vm_page_in_laundry(vm_page_t m) { + uint8_t queue; - vm_page_assert_locked(m); - - if ((m->aflags & PGA_DEQUEUE) != 0) - return (false); - atomic_thread_fence_acq(); - return (m->queue != PQ_NONE); + queue = vm_page_queue(m); + return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE); } /* Index: head/sys/vm/vm_page.c =================================================================== --- head/sys/vm/vm_page.c +++ head/sys/vm/vm_page.c @@ -2403,7 +2403,7 @@ vm_reserv_size(level)) - pa); #endif } else if (object->memattr == VM_MEMATTR_DEFAULT && - vm_page_enqueued(m) && !vm_page_busied(m)) { + vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { /* * The page is allocated but eligible for * relocation. Extend the current run by one @@ -2554,7 +2554,8 @@ error = EINVAL; else if (object->memattr != VM_MEMATTR_DEFAULT) error = EINVAL; - else if (vm_page_enqueued(m) && !vm_page_busied(m)) { + else if (vm_page_queue(m) != PQ_NONE && + !vm_page_busied(m)) { KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, ("page %p has an unexpected memattr", m)); @@ -3391,9 +3392,9 @@ { int queue; - vm_page_lock_assert(m, MA_OWNED); + vm_page_assert_locked(m); - if ((queue = m->queue) == PQ_ACTIVE || m->wire_count > 0 || + if ((queue = vm_page_queue(m)) == PQ_ACTIVE || m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0) { if (queue == PQ_ACTIVE && m->act_count < ACT_INIT) m->act_count = ACT_INIT; @@ -3610,7 +3611,7 @@ if (!unwired || (m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL) return (unwired); - if (m->queue == queue) { + if (vm_page_queue(m) == queue) { if (queue == PQ_ACTIVE) vm_page_reference(m); else if (queue != PQ_NONE) @@ -3716,7 +3717,7 @@ if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0) return; - if (m->queue == PQ_LAUNDRY) + if (vm_page_in_laundry(m)) vm_page_requeue(m); else { vm_page_remque(m); Index: head/sys/vm/vm_pageout.c =================================================================== --- head/sys/vm/vm_pageout.c +++ head/sys/vm/vm_pageout.c @@ -384,12 +384,12 @@ break; } vm_page_test_dirty(p); - if (p->dirty == 0 || !vm_page_in_laundry(p)) { + if (p->dirty == 0) { ib = 0; break; } vm_page_lock(p); - if (vm_page_held(p)) { + if (vm_page_held(p) || !vm_page_in_laundry(p)) { vm_page_unlock(p); ib = 0; break; @@ -412,10 +412,10 @@ if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p)) break; vm_page_test_dirty(p); - if (p->dirty == 0 || !vm_page_in_laundry(p)) + if (p->dirty == 0) break; vm_page_lock(p); - if (vm_page_held(p)) { + if (vm_page_held(p) || !vm_page_in_laundry(p)) { vm_page_unlock(p); break; } @@ -1129,7 +1129,7 @@ { struct vm_domain *vmd; - if (!vm_page_inactive(m) || (m->aflags & PGA_ENQUEUED) != 0) + if (m->queue != PQ_INACTIVE || (m->aflags & PGA_ENQUEUED) != 0) return (0); vm_page_aflag_set(m, PGA_ENQUEUED); if ((m->aflags & PGA_REQUEUE_HEAD) != 0) {