Index: sys/conf/options =================================================================== --- sys/conf/options +++ sys/conf/options @@ -629,6 +629,7 @@ VM_KMEM_SIZE_MAX opt_vm.h VM_NRESERVLEVEL opt_vm.h VM_LEVEL_0_ORDER opt_vm.h +VM_PAGEOUT_PAGE_COUNT opt_vm.h MALLOC_MAKE_FAILURES opt_vm.h MALLOC_PROFILE opt_vm.h MALLOC_DEBUG_MAXZONES opt_vm.h Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -1115,7 +1115,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, int flags, boolean_t *allclean, boolean_t *eio) { - vm_page_t ma[2 * vm_pageout_page_count - 1], tp; + vm_page_t ma[2 * VM_PAGEOUT_PAGE_COUNT - 1], tp; int base, count, runlen; vm_page_lock_assert(p, MA_NOTOWNED); @@ -1123,7 +1123,7 @@ VM_OBJECT_ASSERT_WLOCKED(object); base = nitems(ma) / 2; ma[base] = p; - for (count = 1, tp = p; count < vm_pageout_page_count; count++) { + for (count = 1, tp = p; count < VM_PAGEOUT_PAGE_COUNT; count++) { tp = vm_page_next(tp); if (tp == NULL || vm_page_tryxbusy(tp) == 0) break; @@ -1134,7 +1134,7 @@ ma[base + count] = tp; } - for (tp = p; count < vm_pageout_page_count; count++) { + for (tp = p; count < VM_PAGEOUT_PAGE_COUNT; count++) { tp = vm_page_prev(tp); if (tp == NULL || vm_page_tryxbusy(tp) == 0) break; Index: sys/vm/vm_pageout.h =================================================================== --- sys/vm/vm_pageout.h +++ sys/vm/vm_pageout.h @@ -72,7 +72,14 @@ */ extern u_long vm_page_max_user_wired; -extern int vm_pageout_page_count; + +/* + * The maximum number of adjacent dirty pages from the same VM object that + * are written by a single pageout operation + */ +#ifndef VM_PAGEOUT_PAGE_COUNT +#define VM_PAGEOUT_PAGE_COUNT 32 +#endif #define VM_OOM_MEM 1 #define VM_OOM_MEM_PF 2 Index: sys/vm/vm_pageout.c =================================================================== --- sys/vm/vm_pageout.c +++ sys/vm/vm_pageout.c @@ -145,7 +145,6 @@ #define VM_INACT_SCAN_RATE 10 static int swapdev_enabled; -int vm_pageout_page_count = 32; static int vm_panic_on_oom = 0; SYSCTL_INT(_vm, OID_AUTO, panic_on_oom, @@ -348,7 +347,7 @@ vm_pageout_cluster(vm_page_t m) { vm_object_t object; - vm_page_t mc[2 * vm_pageout_page_count - 1], p, pb, ps; + vm_page_t mc[2 * VM_PAGEOUT_PAGE_COUNT - 1], p, pb, ps; vm_pindex_t pindex; int ib, is, page_base, pageout_count; @@ -377,7 +376,7 @@ * forward scan if room remains. */ more: - while (ib != 0 && pageout_count < vm_pageout_page_count) { + while (ib != 0 && pageout_count < VM_PAGEOUT_PAGE_COUNT) { if (ib > pindex) { ib = 0; break; @@ -411,10 +410,10 @@ * We are at an alignment boundary. Stop here, and switch * directions. Do not clear ib. */ - if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) + if ((pindex - (ib - 1)) % VM_PAGEOUT_PAGE_COUNT == 0) break; } - while (pageout_count < vm_pageout_page_count && + while (pageout_count < VM_PAGEOUT_PAGE_COUNT && pindex + is < object->size) { if ((p = vm_page_next(ps)) == NULL || vm_page_tryxbusy(p) == 0) @@ -442,7 +441,7 @@ * when possible, even past an alignment boundary. This catches * boundary conditions. */ - if (ib != 0 && pageout_count < vm_pageout_page_count) + if (ib != 0 && pageout_count < VM_PAGEOUT_PAGE_COUNT) goto more; return (vm_pageout_flush(&mc[page_base], pageout_count, @@ -2260,7 +2259,7 @@ */ vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE + vmd->vmd_interrupt_free_min; - vmd->vmd_free_reserved = vm_pageout_page_count + + vmd->vmd_free_reserved = VM_PAGEOUT_PAGE_COUNT + vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768; vmd->vmd_free_min = vmd->vmd_page_count / 200; vmd->vmd_free_severe = vmd->vmd_free_min / 2;