diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -202,6 +202,11 @@ &vm_page_max_user_wired, 0, "system-wide limit to user-wired page count"); +static u_int vm_pageout_scan_inactive_run_max = 32; +SYSCTL_UINT(_vm, OID_AUTO, pageout_scan_inactive_run_max, CTLFLAG_RWTUN, + &vm_pageout_scan_inactive_run_max, 0, + "consecutive inactive pages to scan under a single object lock"); + static u_int isqrt(u_int num); static int vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall); @@ -1426,6 +1431,7 @@ vm_object_t object; vm_page_astate_t old, new; int act_delta, addl_page_shortage, starting_page_shortage, refs; + u_int run; object = NULL; vm_batchqueue_init(&rq); @@ -1475,6 +1481,7 @@ /* The page is being freed by another thread. */ continue; + run = 0; /* Depends on type-stability. */ VM_OBJECT_WLOCK(object); if (__predict_false(m->object != object)) { @@ -1483,6 +1490,7 @@ goto reinsert; } } + run++; if (vm_page_tryxbusy(m) == 0) { /* @@ -1607,6 +1615,20 @@ * without holding the queue lock. */ m->a.queue = PQ_NONE; + if (run >= vm_pageout_scan_inactive_run_max) { + /* + * Take a break on the object lock if we've + * been at this for a while with the same + * object. Do a small amount of work outside + * the lock to give someone else a chance to + * acquire it. + */ + if (!vm_page_remove(m)) + panic("%s: not the last reference, " + "m=%p, o=%p", __func__, m, object); + VM_OBJECT_WUNLOCK(object); + object = NULL; + } vm_page_free(m); page_shortage--; continue;