Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -167,10 +167,8 @@ vm_page_deactivate_noreuse(pg); else if (pg->queue == PQ_ACTIVE) vm_page_reference(pg); - else if (pg->queue != PQ_INACTIVE) - vm_page_deactivate(pg); else - vm_page_requeue(pg); + vm_page_deactivate(pg); } } } Index: sys/kern/vfs_bio.c =================================================================== --- sys/kern/vfs_bio.c +++ sys/kern/vfs_bio.c @@ -2789,10 +2789,8 @@ vm_page_deactivate_noreuse(m); else if (m->queue == PQ_ACTIVE) vm_page_reference(m); - else if (m->queue != PQ_INACTIVE) - vm_page_deactivate(m); else - vm_page_requeue(m); + vm_page_deactivate(m); } } vm_page_unlock(m); Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -682,7 +682,7 @@ /* * Reference the page before unlocking and * sleeping so that the page daemon is less - * likely to reclaim it. + * likely to reclaim it. */ vm_page_aflag_set(fs.m, PGA_REFERENCED); if (fs.object != fs.first_object) { @@ -710,9 +710,6 @@ vm_object_deallocate(fs.first_object); goto RetryFault; } - vm_page_lock(fs.m); - vm_page_remque(fs.m); - vm_page_unlock(fs.m); /* * Mark page busy for other processes, and the @@ -723,7 +720,7 @@ vm_page_xbusy(fs.m); if (fs.m->valid != VM_PAGE_BITS_ALL) goto readrest; - break; + break; /* break to PAGE HAS BEEN FOUND */ } KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m)); @@ -1105,6 +1102,7 @@ */ fs.object == fs.first_object->backing_object) { vm_page_lock(fs.m); + vm_page_remque(fs.m); vm_page_remove(fs.m); vm_page_unlock(fs.m); vm_page_lock(fs.first_m); @@ -1379,7 +1377,8 @@ * active queue. */ vm_page_lock(m); - vm_page_deactivate(m); + if (!vm_page_inactive(m)) + vm_page_deactivate(m); vm_page_unlock(m); } } Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -1504,7 +1504,9 @@ VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(mnew->object == NULL, - ("vm_page_replace: page already in object")); + ("vm_page_replace: page %p already in object", mnew)); + KASSERT(mnew->queue == PQ_NONE, + ("vm_page_replace: new page %p is on a paging queue", mnew)); /* * This function mostly follows vm_page_insert() and @@ -1516,7 +1518,7 @@ mnew->pindex = pindex; mold = vm_radix_replace(&object->rtree, mnew); KASSERT(mold->queue == PQ_NONE, - ("vm_page_replace: mold is on a paging queue")); + ("vm_page_replace: old page %p is on a paging queue", mold)); /* Keep the resident page list in sorted order. */ TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); @@ -3377,7 +3379,8 @@ } /* - * Move the specified page to the inactive queue. + * Move the specified page to the inactive queue, or requeue the page if it is + * already in the inactive queue. * * Normally, "noreuse" is FALSE, resulting in LRU ordering of the inactive * queue. However, setting "noreuse" to TRUE will accelerate the specified @@ -3395,15 +3398,10 @@ vm_page_assert_locked(m); - /* - * Ignore if the page is already inactive, unless it is unlikely to be - * reactivated. - */ - if ((queue = m->queue) == PQ_INACTIVE && !noreuse) - return; if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { pq = &vm_pagequeue_domain(m)->vmd_pagequeues[PQ_INACTIVE]; /* Avoid multiple acquisitions of the inactive queue lock. */ + queue = m->queue; if (queue == PQ_INACTIVE) { vm_pagequeue_lock(pq); vm_page_dequeue_locked(m); @@ -3425,7 +3423,8 @@ } /* - * Move the specified page to the inactive queue. + * Move the specified page to the inactive queue, or requeue the page if it is + * already in the inactive queue. * * The page must be locked. */ @@ -3452,19 +3451,20 @@ /* * vm_page_launder * - * Put a page in the laundry. + * Put a page in the laundry, or requeue it if it is already there. */ void vm_page_launder(vm_page_t m) { - int queue; vm_page_assert_locked(m); - if ((queue = m->queue) != PQ_LAUNDRY && m->wire_count == 0 && - (m->oflags & VPO_UNMANAGED) == 0) { - if (queue != PQ_NONE) - vm_page_dequeue(m); - vm_page_enqueue(PQ_LAUNDRY, m); + if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { + if (m->queue == PQ_LAUNDRY) + vm_page_requeue(m); + else { + vm_page_remque(m); + vm_page_enqueue(PQ_LAUNDRY, m); + } } } @@ -3554,7 +3554,7 @@ */ if (m->dirty == 0) vm_page_deactivate_noreuse(m); - else + else if (!vm_page_in_laundry(m)) vm_page_launder(m); }