Index: head/sys/vm/vm_pageout.c =================================================================== --- head/sys/vm/vm_pageout.c +++ head/sys/vm/vm_pageout.c @@ -1352,7 +1352,7 @@ * target. */ static int -vm_pageout_scan_inactive(struct vm_domain *vmd, int pass, int shortage, +vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage, int *addl_shortage) { struct scan_state ss; @@ -1366,25 +1366,6 @@ bool obj_locked; /* - * If we need to reclaim memory ask kernel caches to return - * some. We rate limit to avoid thrashing. - */ - if (vmd == VM_DOMAIN(0) && pass > 0 && - (time_uptime - lowmem_uptime) >= lowmem_period) { - /* - * Decrease registered cache sizes. - */ - SDT_PROBE0(vm, , , vm__lowmem_scan); - EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES); - /* - * We do this explicitly after the caches have been - * drained above. - */ - uma_reclaim(); - lowmem_uptime = time_uptime; - } - - /* * The addl_page_shortage is an estimate of the number of temporarily * stuck pages in the inactive queue. In other words, the * number of pages from the inactive count that should be @@ -1393,16 +1374,13 @@ addl_page_shortage = 0; /* - * Calculate the number of pages that we want to free. This number - * can be negative if many pages are freed between the wakeup call to - * the page daemon and this calculation. + * vmd_pageout_deficit counts the number of pages requested in + * allocations that failed because of a free page shortage. We assume + * that the allocations will be reattempted and thus include the deficit + * in our scan target. */ - if (pass > 0) { - deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit); - page_shortage = shortage + deficit; - } else - page_shortage = deficit = 0; - starting_page_shortage = page_shortage; + deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit); + starting_page_shortage = page_shortage = shortage + deficit; mtx = NULL; obj_locked = false; @@ -1638,8 +1616,7 @@ /* * Reclaim pages by swapping out idle processes, if configured to do so. */ - if (pass > 0) - vm_swapout_run_idle(); + vm_swapout_run_idle(); /* * See the description of addl_page_shortage above. @@ -1870,15 +1847,35 @@ } static void +vm_pageout_lowmem(struct vm_domain *vmd) +{ + + if (vmd == VM_DOMAIN(0) && + time_uptime - lowmem_uptime >= lowmem_period) { + /* + * Decrease registered cache sizes. + */ + SDT_PROBE0(vm, , , vm__lowmem_scan); + EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES); + + /* + * We do this explicitly after the caches have been + * drained above. + */ + uma_reclaim(); + lowmem_uptime = time_uptime; + } +} + +static void vm_pageout_worker(void *arg) { struct vm_domain *vmd; - int addl_shortage, domain, pass, shortage; + int addl_shortage, domain, shortage; bool target_met; domain = (uintptr_t)arg; vmd = VM_DOMAIN(domain); - pass = 0; shortage = 0; target_met = true; @@ -1896,6 +1893,7 @@ */ while (TRUE) { vm_domain_pageout_lock(vmd); + /* * We need to clear wanted before we check the limits. This * prevents races with wakers who will check wanted after they @@ -1908,12 +1906,12 @@ */ if (vm_paging_needed(vmd, vmd->vmd_free_count)) { /* - * Yes, the scan failed to free enough pages. If - * we have performed a level >= 1 (page reclamation) - * scan, then sleep a bit and try again. + * Yes. If the scan failed to produce enough free + * pages, sleep uninterruptibly for some time in the + * hope that the laundry thread will clean some pages. */ vm_domain_pageout_unlock(vmd); - if (pass > 1) + if (!target_met) pause("pwait", hz / VM_INACT_SCAN_RATE); } else { /* @@ -1934,10 +1932,12 @@ * this interval, and scan the inactive queue. */ shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count); - if (shortage > 0 && pass == 0) - pass = 1; - target_met = vm_pageout_scan_inactive(vmd, pass, shortage, - &addl_shortage); + if (shortage > 0) { + vm_pageout_lowmem(vmd); + target_met = vm_pageout_scan_inactive(vmd, shortage, + &addl_shortage); + } else + addl_shortage = 0; /* * Scan the active queue. A positive value for shortage @@ -1946,13 +1946,6 @@ */ shortage = vm_pageout_active_target(vmd) + addl_shortage; vm_pageout_scan_active(vmd, shortage); - - /* - * If the target was not met we must increase the pass to - * more aggressively reclaim. - */ - if (!target_met) - pass++; } }