diff --git a/sys/vm/swap_pager.h b/sys/vm/swap_pager.h --- a/sys/vm/swap_pager.h +++ b/sys/vm/swap_pager.h @@ -68,6 +68,7 @@ #ifdef _KERNEL +extern bool swap_pager_almost_full; extern int swap_pager_avail; extern int nsw_cluster_max; diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -386,7 +386,7 @@ } static bool swap_pager_full = true; /* swap space exhaustion (task killing) */ -static bool swap_pager_almost_full = true; /* swap space exhaustion (w/hysteresis) */ +bool swap_pager_almost_full = true; /* swap space exhaustion (w/hysteresis) */ static struct mtx swbuf_mtx; /* to sync nsw_wcount_async */ static int nsw_wcount_async; /* limit async write buffers */ static int nsw_wcount_async_max;/* assigned maximum */ diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -80,6 +80,7 @@ #include #include #include +#include #include #include #include @@ -183,26 +184,33 @@ CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0, "back-to-back calls to oom detector to start OOM"); -static int act_scan_laundry_weight = 3; - static int -sysctl_act_scan_laundry_weight(SYSCTL_HANDLER_ARGS) +sysctl_laundry_weight(SYSCTL_HANDLER_ARGS) { - int error, newval; + int error, val; - newval = act_scan_laundry_weight; - error = sysctl_handle_int(oidp, &newval, 0, req); - if (error || req->newptr == NULL) + val = *(int *)arg1; + error = sysctl_handle_int(oidp, &val, 0, req); + if (error != 0 || req->newptr == NULL) return (error); - if (newval < 1) + if (val < arg2 || val > 100) return (EINVAL); - act_scan_laundry_weight = newval; + *(int *)arg1 = val; return (0); } -SYSCTL_PROC(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN | CTLTYPE_INT, - &act_scan_laundry_weight, 0, sysctl_act_scan_laundry_weight, "I", + +static int act_scan_laundry_weight = 3; +SYSCTL_PROC(_vm, OID_AUTO, act_scan_laundry_weight, + CTLTYPE_INT | CTLFLAG_RWTUN, &act_scan_laundry_weight, 1, + sysctl_laundry_weight, "I", "weight given to clean vs. dirty pages in active queue scans"); +static int inact_scan_laundry_weight = 1; +SYSCTL_PROC(_vm, OID_AUTO, inact_scan_laundry_weight, + CTLTYPE_INT | CTLFLAG_RWTUN, &inact_scan_laundry_weight, 0, + sysctl_laundry_weight, "I", + "weight given to clean vs. dirty pages in inactive queue scans"); + static u_int vm_background_launder_rate = 4096; SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN, &vm_background_launder_rate, 0, @@ -1417,7 +1425,8 @@ struct vm_pagequeue *pq; vm_object_t object; vm_page_astate_t old, new; - int act_delta, addl_page_shortage, starting_page_shortage, refs; + int act_delta, addl_page_shortage, dirty_count, dirty_thresh; + int starting_page_shortage, refs; object = NULL; vm_batchqueue_init(&rq); @@ -1431,6 +1440,18 @@ */ addl_page_shortage = 0; + /* + * dirty_count is the number of pages encountered that require + * laundering before reclamation is possible. If we encounter a large + * number of dirty pages, we may abort the scan without meeting the page + * shortage in the hope that laundering will allow a future scan to meet + * the target. + */ + dirty_count = 0; + dirty_thresh = inact_scan_laundry_weight * page_shortage; + if (dirty_thresh == 0) + dirty_thresh = INT_MAX; + /* * Start scanning the inactive queue for pages that we can free. The * scan will stop when we reach the target or we have scanned the @@ -1443,7 +1464,7 @@ pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; vm_pagequeue_lock(pq); vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); - while (page_shortage > 0) { + while (page_shortage > 0 && dirty_count < dirty_thresh) { /* * If we need to refill the scan batch queue, release any * optimistically held object lock. This gives someone else a @@ -1617,8 +1638,20 @@ page_shortage--; continue; } - if ((object->flags & OBJ_DEAD) == 0) + if ((object->flags & OBJ_DEAD) == 0) { vm_page_launder(m); + + /* + * If the page would be paged out to a swap device, and + * no devices are configured or they are all nearly + * full, then don't count it against our threshold, + * since it most likely can't be used to meet our + * target. + */ + if ((object->flags & OBJ_SWAP) == 0 || + !atomic_load_bool(&swap_pager_almost_full)) + dirty_count++; + } skip_page: vm_page_xunbusy(m); continue;