Index: sys/vm/vm_pageout.c =================================================================== --- sys/vm/vm_pageout.c +++ sys/vm/vm_pageout.c @@ -718,7 +718,8 @@ struct mtx *mtx; vm_object_t object; vm_page_t m, marker; - int act_delta, error, numpagedout, queue, starting_target; + vm_page_astate_t new, old; + int act_delta, error, numpagedout, queue, refs, starting_target; int vnodes_skipped; bool pageout_ok; @@ -832,40 +833,46 @@ if (vm_page_none_valid(m)) goto free_page; - /* - * If the page has been referenced and the object is not dead, - * reactivate or requeue the page depending on whether the - * object is mapped. - * - * Test PGA_REFERENCED after calling pmap_ts_referenced() so - * that a reference from a concurrently destroyed mapping is - * observed here and now. - */ - if (object->ref_count != 0) - act_delta = pmap_ts_referenced(m); - else { - KASSERT(!pmap_page_is_mapped(m), - ("page %p is mapped", m)); - act_delta = 0; - } - if ((m->a.flags & PGA_REFERENCED) != 0) { - vm_page_aflag_clear(m, PGA_REFERENCED); - act_delta++; - } - if (act_delta != 0) { - if (object->ref_count != 0) { + refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; + + for (old = vm_page_astate_load(m);;) { + /* + * If the page has been dequeued for some reason, leave + * it alone. Most likely it has been wired via a pmap + * lookup. + */ + if (old.queue == PQ_NONE || + (old.flags & PGA_DEQUEUE) != 0) { vm_page_xunbusy(m); - VM_CNT_INC(v_reactivated); - vm_page_activate(m); + goto next_page; + } + new = old; + act_delta = refs; + if ((old.flags & PGA_REFERENCED) != 0) { + new.flags &= ~PGA_REFERENCED; + act_delta++; + } + if (act_delta == 0) { + ; + } else if (object->ref_count != 0) { /* - * Increase the activation count if the page - * was referenced while in the laundry queue. - * This makes it less likely that the page will - * be returned prematurely to the inactive - * queue. - */ - m->a.act_count += act_delta + ACT_ADVANCE; + * Increase the activation count if the page was + * referenced while in the inactive queue. This + * makes it less likely that the page will be + * returned prematurely to the inactive queue. + */ + new.act_count += ACT_ADVANCE + + act_delta; + if (new.act_count > ACT_MAX) + new.act_count = ACT_MAX; + + new.flags |= PGA_REQUEUE; + new.queue = PQ_ACTIVE; + if (!vm_page_pqstate_commit(m, &old, new)) + continue; + + vm_page_xunbusy(m); /* * If this was a background laundering, count @@ -877,12 +884,16 @@ */ if (!in_shortfall) launder--; - continue; + VM_CNT_INC(v_reactivated); + goto next_page; } else if ((object->flags & OBJ_DEAD) == 0) { + new.flags |= PGA_REQUEUE; + if (!vm_page_pqstate_commit(m, &old, new)) + continue; vm_page_xunbusy(m); - vm_page_requeue(m); - continue; + goto next_page; } + break; } /* @@ -901,6 +912,18 @@ } } + /* + * Now we are guaranteed that no other threads are manipulating + * the page, check for a last-second reference. + */ + old = vm_page_astate_load(m); + if (old.queue != queue || (old.flags & PGA_ENQUEUED) == 0) + goto next_page; + if ((old.flags & PGA_QUEUE_OP_MASK) != 0) { + vm_page_pqbatch_submit(m, queue); + goto next_page; + } + /* * Clean pages are freed, and dirty pages are paged out unless * they belong to a dead object. Requeueing dirty pages from @@ -921,7 +944,7 @@ pageout_ok = true; if (!pageout_ok) { vm_page_xunbusy(m); - vm_page_requeue(m); + vm_page_launder(m); continue; } @@ -950,6 +973,7 @@ object = NULL; } else vm_page_xunbusy(m); +next_page:; } if (mtx != NULL) { mtx_unlock(mtx);