Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -128,6 +128,7 @@ static void sendfile_free_page(vm_page_t pg, bool nocache) { + bool freed; vm_page_lock(pg); /* @@ -136,15 +137,15 @@ * responsible for freeing the page. In 'noncache' case try to free * the page, but only if it is cheap to. */ - if (vm_page_unwire(pg, nocache ? PQ_NONE : PQ_INACTIVE)) { + if (vm_page_unwire_noq(pg)) { vm_object_t obj; if ((obj = pg->object) == NULL) vm_page_free(pg); - else if (nocache) { - if (!vm_page_xbusied(pg) && VM_OBJECT_TRYWLOCK(obj)) { - bool freed; - + else { + freed = false; + if (nocache && !vm_page_xbusied(pg) && + VM_OBJECT_TRYWLOCK(obj)) { /* Only free unmapped pages. */ if (obj->ref_count == 0 || !pmap_page_is_mapped(pg)) @@ -153,13 +154,24 @@ * locked cannot be relied upon. */ freed = vm_page_try_to_free(pg); - else - freed = false; VM_OBJECT_WUNLOCK(obj); - if (!freed) + } + if (!freed) { + /* + * If we were asked to not cache the page, place + * it near the head of the inactive queue so + * that it is reclaimed sooner. Otherwise, + * maintain LRU. + */ + if (nocache) vm_page_deactivate_noreuse(pg); - } else - vm_page_deactivate_noreuse(pg); + else if (pg->queue == PQ_ACTIVE) + vm_page_reference(pg); + else if (pg->queue != PQ_INACTIVE) + vm_page_deactivate(pg); + else + vm_page_requeue(pg); + } } } vm_page_unlock(pg); Index: sys/kern/vfs_bio.c =================================================================== --- sys/kern/vfs_bio.c +++ sys/kern/vfs_bio.c @@ -2610,7 +2610,7 @@ bool freed; vm_page_lock(m); - if (vm_page_unwire(m, PQ_NONE)) { + if (vm_page_unwire_noq(m)) { /* * Determine if the page should be freed before adding * it to the inactive queue. @@ -2626,14 +2626,16 @@ if (!freed) { /* * If the page is unlikely to be reused, let the - * VM know. Otherwise, maintain LRU page - * ordering and put the page at the tail of the - * inactive queue. + * VM know. Otherwise, maintain LRU. */ if ((bp->b_flags & B_NOREUSE) != 0) vm_page_deactivate_noreuse(m); - else + else if (m->queue == PQ_ACTIVE) + vm_page_reference(m); + else if (m->queue != PQ_INACTIVE) vm_page_deactivate(m); + else + vm_page_requeue(m); } } vm_page_unlock(m); Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -516,7 +516,8 @@ int vm_page_trysbusy(vm_page_t m); void vm_page_unhold_pages(vm_page_t *ma, int count); void vm_page_unswappable(vm_page_t m); -boolean_t vm_page_unwire(vm_page_t m, uint8_t queue); +bool vm_page_unwire(vm_page_t m, uint8_t queue); +bool vm_page_unwire_noq(vm_page_t m); void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); void vm_page_wire (vm_page_t); void vm_page_xunbusy_hard(vm_page_t m); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -2723,9 +2723,7 @@ if (queue != PQ_NONE) vm_page_dequeue(m); vm_page_enqueue(PQ_ACTIVE, m); - } else - KASSERT(queue == PQ_NONE, - ("vm_page_activate: wired page %p is queued", m)); + } } else { if (m->act_count < ACT_INIT) m->act_count = ACT_INIT; @@ -2897,26 +2895,18 @@ } /* - * vm_page_wire: + * vm_page_wire: * - * Mark this page as wired down by yet - * another map, removing it from paging queues - * as necessary. + * Mark this page as wired down. If the page is fictitious, then + * its wire count must remain one. * - * If the page is fictitious, then its wire count must remain one. - * - * The page must be locked. + * The page must be locked. */ void vm_page_wire(vm_page_t m) { - /* - * Only bump the wire statistics if the page is not already wired, - * and only unqueue the page if it is on some queue (if it is unmanaged - * it is already off the queues). - */ - vm_page_lock_assert(m, MA_OWNED); + vm_page_assert_locked(m); if ((m->flags & PG_FICTITIOUS) != 0) { KASSERT(m->wire_count == 1, ("vm_page_wire: fictitious page %p's wire count isn't one", @@ -2927,7 +2917,6 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0 || m->queue == PQ_NONE, ("vm_page_wire: unmanaged page %p is queued", m)); - vm_page_remque(m); atomic_add_int(&vm_cnt.v_wire_count, 1); } m->wire_count++; @@ -2944,38 +2933,69 @@ * Only managed pages belonging to an object can be paged out. If the number * of wirings transitions to zero and the page is eligible for page out, then * the page is added to the specified paging queue (unless PQ_NONE is - * specified). + * specified, in which case the page is dequeued if it belongs to a paging + * queue). * * If a page is fictitious, then its wire count must always be one. * * A managed page must be locked. */ -boolean_t +bool vm_page_unwire(vm_page_t m, uint8_t queue) { + bool unwired; KASSERT(queue < PQ_COUNT || queue == PQ_NONE, ("vm_page_unwire: invalid queue %u request for page %p", queue, m)); + + unwired = vm_page_unwire_noq(m); + if (unwired && (m->oflags & VPO_UNMANAGED) == 0 && m->object != NULL) { + if (m->queue == queue) { + if (queue == PQ_ACTIVE) + vm_page_reference(m); + else if (queue != PQ_NONE) + vm_page_requeue(m); + } else { + vm_page_remque(m); + if (queue != PQ_NONE) { + vm_page_enqueue(queue, m); + if (queue == PQ_ACTIVE) + /* Initialize act_count. */ + vm_page_activate(m); + } + } + } + return (unwired); +} + +/* + * + * vm_page_unwire_noq: + * + * Unwire a page without (re-)inserting it into a page queue. It is up + * to the caller to enqueue, requeue, or free the page as appropriate. + * In most cases, vm_page_unwire() should be used instead. + */ +bool +vm_page_unwire_noq(vm_page_t m) +{ + if ((m->oflags & VPO_UNMANAGED) == 0) vm_page_assert_locked(m); if ((m->flags & PG_FICTITIOUS) != 0) { KASSERT(m->wire_count == 1, ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); - return (FALSE); - } - if (m->wire_count > 0) { - m->wire_count--; - if (m->wire_count == 0) { - atomic_subtract_int(&vm_cnt.v_wire_count, 1); - if ((m->oflags & VPO_UNMANAGED) == 0 && - m->object != NULL && queue != PQ_NONE) - vm_page_enqueue(queue, m); - return (TRUE); - } else - return (FALSE); - } else + return (false); + } + if (m->wire_count == 0) panic("vm_page_unwire: page %p's wire count is zero", m); + m->wire_count--; + if (m->wire_count == 0) { + atomic_subtract_int(&vm_cnt.v_wire_count, 1); + return (true); + } else + return (false); } /* Index: sys/vm/vm_pageout.c =================================================================== --- sys/vm/vm_pageout.c +++ sys/vm/vm_pageout.c @@ -352,9 +352,6 @@ VM_OBJECT_ASSERT_WLOCKED(object); pindex = m->pindex; - /* - * We can't clean the page if it is busy or held. - */ vm_page_assert_unbusied(m); KASSERT(m->hold_count == 0, ("page %p is held", m)); @@ -396,7 +393,7 @@ } vm_page_lock(p); if (!vm_page_in_laundry(p) || - p->hold_count != 0) { /* may be undergoing I/O */ + p->hold_count != 0 || p->wire_count != 0) { vm_page_unlock(p); ib = 0; break; @@ -423,7 +420,7 @@ break; vm_page_lock(p); if (!vm_page_in_laundry(p) || - p->hold_count != 0) { /* may be undergoing I/O */ + p->hold_count != 0 || p->wire_count != 0) { vm_page_unlock(p); break; } @@ -754,11 +751,19 @@ vm_page_unlock(m); continue; } + if (m->wire_count != 0) { + vm_page_dequeue_locked(m); + vm_page_unlock(m); + continue; + } object = m->object; if ((!VM_OBJECT_TRYWLOCK(object) && (!vm_pageout_fallback_object_lock(m, &next) || - m->hold_count != 0)) || vm_page_busied(m)) { + m->hold_count != 0 || m->wire_count != 0)) || + vm_page_busied(m)) { VM_OBJECT_WUNLOCK(object); + if (m->wire_count != 0 && vm_page_pagequeue(m) == pq) + vm_page_dequeue_locked(m); vm_page_unlock(m); continue; } @@ -1194,7 +1199,16 @@ */ if (!vm_pageout_page_lock(m, &next)) goto unlock_page; - else if (m->hold_count != 0) { + else if (m->wire_count != 0) { + /* + * Wired pages may not be freed, and unwiring a queued + * page will cause it to be requeued. Thus, remove them + * from the queue now to avoid unnecessary revisits. + */ + vm_page_dequeue_locked(m); + addl_page_shortage++; + goto unlock_page; + } else if (m->hold_count != 0) { /* * Held pages are essentially stuck in the * queue. So, they ought to be discounted @@ -1209,7 +1223,11 @@ if (!VM_OBJECT_TRYWLOCK(object)) { if (!vm_pageout_fallback_object_lock(m, &next)) goto unlock_object; - else if (m->hold_count != 0) { + else if (m->wire_count != 0) { + vm_page_dequeue_locked(m); + addl_page_shortage++; + goto unlock_object; + } else if (m->hold_count != 0) { addl_page_shortage++; goto unlock_object; } @@ -1231,6 +1249,7 @@ continue; } KASSERT(m->hold_count == 0, ("Held page %p", m)); + KASSERT(m->wire_count == 0, ("Wired page %p", m)); /* * Dequeue the inactive page and unlock the inactive page @@ -1429,6 +1448,15 @@ */ VM_CNT_INC(v_pdpages); + /* + * Wired pages are dequeued lazily. + */ + if (m->wire_count != 0) { + vm_page_dequeue_locked(m); + vm_page_unlock(m); + continue; + } + /* * Check to see "how much" the page has been used. */