Index: sys/vm/vm_pageout.c =================================================================== --- sys/vm/vm_pageout.c +++ sys/vm/vm_pageout.c @@ -159,6 +159,7 @@ VM_LAUNDRY_BACKGROUND, VM_LAUNDRY_SHORTFALL } vm_laundry_request = VM_LAUNDRY_IDLE; +static int vm_inactq_scans; static int vm_pageout_update_period; static int disable_swap_pageouts; @@ -961,7 +962,7 @@ struct vm_domain *domain; struct vm_pagequeue *pq; uint64_t nclean, ndirty; - u_int last_launder, wakeups; + u_int inactq_scans, last_launder; int domidx, last_target, launder, shortfall, shortfall_cycle, target; bool in_shortfall; @@ -975,6 +976,7 @@ in_shortfall = false; shortfall_cycle = 0; target = 0; + inactq_scans = 0; last_launder = 0; /* @@ -993,7 +995,6 @@ KASSERT(shortfall_cycle >= 0, ("negative cycle %d", shortfall_cycle)); launder = 0; - wakeups = VM_CNT_FETCH(v_pdwakeups); /* * First determine whether we need to launder pages to meet a @@ -1017,7 +1018,7 @@ target = 0; goto trybackground; } - last_launder = wakeups; + last_launder = inactq_scans; launder = target / shortfall_cycle--; goto dolaundry; @@ -1034,29 +1035,29 @@ * * The background laundering threshold is not a constant. * Instead, it is a slowly growing function of the number of - * page daemon wakeups since the last laundering. Thus, as the + * page daemon scans since the last laundering. Thus, as the * ratio of dirty to clean inactive pages grows, the amount of * memory pressure required to trigger laundering decreases. */ trybackground: nclean = vm_cnt.v_inactive_count + vm_cnt.v_free_count; ndirty = vm_cnt.v_laundry_count; - if (target == 0 && wakeups != last_launder && - ndirty * isqrt(wakeups - last_launder) >= nclean) { + if (target == 0 && inactq_scans != last_launder && + ndirty * isqrt(inactq_scans - last_launder) >= nclean) { target = vm_background_launder_target; } /* * We have a non-zero background laundering target. If we've * laundered up to our maximum without observing a page daemon - * wakeup, just stop. This is a safety belt that ensures we + * request, just stop. This is a safety belt that ensures we * don't launder an excessive amount if memory pressure is low * and the ratio of dirty to clean pages is large. Otherwise, * proceed at the background laundering rate. */ if (target > 0) { - if (wakeups != last_launder) { - last_launder = wakeups; + if (inactq_scans != last_launder) { + last_launder = inactq_scans; last_target = target; } else if (last_target - target >= vm_background_launder_max * PAGE_SIZE / 1024) { @@ -1104,6 +1105,7 @@ if (target == 0) vm_laundry_request = VM_LAUNDRY_IDLE; + inactq_scans = vm_inactq_scans; vm_pagequeue_unlock(pq); } } @@ -1349,12 +1351,16 @@ * need to launder more aggressively. If PQ_LAUNDRY is empty and no * swap devices are configured, the laundry thread has no work to do, so * don't bother waking it up. + * + * The laundry thread uses the number of inactive queue scans elapsed + * since the last laundering to determine whether to launder again, so + * keep count. */ - if (vm_laundry_request == VM_LAUNDRY_IDLE && - starting_page_shortage > 0) { + if (starting_page_shortage > 0) { pq = &vm_dom[0].vmd_pagequeues[PQ_LAUNDRY]; vm_pagequeue_lock(pq); - if (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled)) { + if (vm_laundry_request == VM_LAUNDRY_IDLE && + (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) { if (page_shortage > 0) { vm_laundry_request = VM_LAUNDRY_SHORTFALL; VM_CNT_INC(v_pdshortfalls); @@ -1362,6 +1368,7 @@ vm_laundry_request = VM_LAUNDRY_BACKGROUND; wakeup(&vm_laundry_request); } + vm_inactq_scans++; vm_pagequeue_unlock(pq); }