Index: head/sys/vm/vm_pageout.c =================================================================== --- head/sys/vm/vm_pageout.c +++ head/sys/vm/vm_pageout.c @@ -1965,12 +1965,20 @@ } } +/* + * Signal a free page shortage to subsystems that have registered an event + * handler. Reclaim memory from UMA in the event of a severe shortage. + * Return true if the free page count should be re-evaluated. + */ static bool vm_pageout_lowmem(void) { static int lowmem_ticks = 0; int last; + bool ret; + ret = false; + last = atomic_load_int(&lowmem_ticks); while ((u_int)(ticks - last) / hz >= lowmem_period) { if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0) @@ -1984,15 +1992,27 @@ /* * We do this explicitly after the caches have been - * drained above. If we have a severe page shortage on - * our hands, completely drain all UMA zones. Otherwise, - * just prune the caches. + * drained above. */ - uma_reclaim(vm_page_count_min() ? UMA_RECLAIM_DRAIN_CPU : - UMA_RECLAIM_TRIM); - return (true); + uma_reclaim(UMA_RECLAIM_TRIM); + ret = true; } - return (false); + + /* + * Kick off an asynchronous reclaim of cached memory if one of the + * page daemons is failing to keep up with demand. Use the "severe" + * threshold instead of "min" to ensure that we do not blow away the + * caches if a subset of the NUMA domains are depleted by kernel memory + * allocations; the domainset iterators automatically skip domains + * below the "min" threshold on the first pass. + * + * UMA reclaim worker has its own rate-limiting mechanism, so don't + * worry about kicking it too often. + */ + if (vm_page_count_severe()) + uma_reclaim_wakeup(); + + return (ret); } static void