Index: vm/vm_page.c =================================================================== --- vm/vm_page.c +++ vm/vm_page.c @@ -1409,9 +1409,7 @@ * * Note: we *always* dirty the page. It is necessary both for the * fact that we moved it, and because we may be invalidating - * swap. If the page is on the cache, we have to deactivate it - * or vm_page_dirty() will panic. Dirty pages are not allowed - * on the cache. + * swap. * * The objects must be locked. */ @@ -2042,8 +2040,8 @@ } else if (level >= 0) { /* * The page is reserved but not yet allocated. In - * other words, it is still cached or free. Extend - * the current run by one page. + * other words, it is still free. Extend the current + * run by one page. */ run_ext = 1; #endif @@ -2050,10 +2048,10 @@ } else if ((order = m->order) < VM_NFREEORDER) { /* * The page is enqueued in the physical memory - * allocator's cache/free page queues. Moreover, it - * is the first page in a power-of-two-sized run of - * contiguous cache/free pages. Add these pages to - * the end of the current run, and jump ahead. + * allocator's free page queues. Moreover, it is the + * first page in a power-of-two-sized run of + * contiguous free pages. Add these pages to the end + * of the current run, and jump ahead. */ run_ext = 1 << order; m_inc = 1 << order; @@ -2061,16 +2059,15 @@ /* * Skip the page for one of the following reasons: (1) * It is enqueued in the physical memory allocator's - * cache/free page queues. However, it is not the - * first page in a run of contiguous cache/free pages. - * (This case rarely occurs because the scan is - * performed in ascending order.) (2) It is not - * reserved, and it is transitioning from free to - * allocated. (Conversely, the transition from - * allocated to free for managed pages is blocked by - * the page lock.) (3) It is allocated but not - * contained by an object and not wired, e.g., - * allocated by Xen's balloon driver. + * free page queues. However, it is not the first + * page in a run of contiguous free pages. (This case + * rarely occurs because the scan is performed in + * ascending order.) (2) It is not reserved, and it is + * transitioning from free to allocated. (Conversely, + * the transition from allocated to free for managed + * pages is blocked by the page lock.) (3) It is + * allocated but not contained by an object and not + * wired, e.g., allocated by Xen's balloon driver. */ run_ext = 0; } @@ -2282,11 +2279,11 @@ if (order < VM_NFREEORDER) { /* * The page is enqueued in the physical memory - * allocator's cache/free page queues. - * Moreover, it is the first page in a power- - * of-two-sized run of contiguous cache/free - * pages. Jump ahead to the last page within - * that run, and continue from there. + * allocator's free page queues. Moreover, it + * is the first page in a power-of-two-sized + * run of contiguous free pages. Jump ahead + * to the last page within that run, and + * continue from there. */ m += (1 << order) - 1; } @@ -2334,9 +2331,9 @@ * conditions by relocating the virtual pages using that physical memory. * Returns true if reclamation is successful and false otherwise. Since * relocation requires the allocation of physical pages, reclamation may - * fail due to a shortage of cache/free pages. When reclamation fails, - * callers are expected to perform VM_WAIT before retrying a failed - * allocation operation, e.g., vm_page_alloc_contig(). + * fail due to a shortage of free pages. When reclamation fails, callers + * are expected to perform VM_WAIT before retrying a failed allocation + * operation, e.g., vm_page_alloc_contig(). * * The caller must always specify an allocation class through "req". * @@ -2371,8 +2368,8 @@ req_class = VM_ALLOC_SYSTEM; /* - * Return if the number of cached and free pages cannot satisfy the - * requested allocation. + * Return if the number of free pages cannot satisfy the requested + * allocation. */ count = vm_cnt.v_free_count; if (count < npages + vm_cnt.v_free_reserved || (count < npages + @@ -2642,9 +2639,8 @@ /* * vm_page_free_wakeup: * - * Helper routine for vm_page_free_toq() and vm_page_cache(). This - * routine is called when a page has been added to the cache or free - * queues. + * Helper routine for vm_page_free_toq(). This routine is called + * when a page is added to the free queues. * * The page queues must be locked. */ @@ -2733,7 +2729,7 @@ /* * Insert the page into the physical memory allocator's - * cache/free page queues. + * free page queues. */ mtx_lock(&vm_page_queue_free_mtx); vm_phys_freecnt_adj(m, 1); @@ -2833,21 +2829,10 @@ /* * Move the specified page to the inactive queue. * - * Many pages placed on the inactive queue should actually go - * into the cache, but it is difficult to figure out which. What - * we do instead, if the inactive target is well met, is to put - * clean pages at the head of the inactive queue instead of the tail. - * This will cause them to be moved to the cache more quickly and - * if not actively re-referenced, reclaimed more quickly. If we just - * stick these pages at the end of the inactive queue, heavy filesystem - * meta-data accesses can cause an unnecessary paging load on memory bound - * processes. This optimization causes one-time-use metadata to be - * reused more quickly. - * - * Normally noreuse is FALSE, resulting in LRU operation. noreuse is set - * to TRUE if we want this page to be 'as if it were placed in the cache', - * except without unmapping it from the process address space. In - * practice this is implemented by inserting the page at the head of the + * Normally, "noreuse" is FALSE, resulting in LRU ordering of the inactive + * queue. However, setting "noreuse" to TRUE will accelerate the specified + * page's reclamation, but it will not unmap the page from any address space. + * This is implemented by inserting the page near the head of the inactive * queue, using a marker page to guide FIFO insertion ordering. * * The page must be locked. @@ -2974,16 +2959,9 @@ if (advice == MADV_FREE) /* * Mark the page clean. This will allow the page to be freed - * up by the system. However, such pages are often reused - * quickly by malloc() so we do not do anything that would - * cause a page fault if we can help it. - * - * Specifically, we do not try to actually free the page now - * nor do we try to put it in the cache (which would cause a - * page fault on reuse). - * - * But we do make the page as freeable as we can without - * actually taking the step of unmapping it. + * without first paging it out. MADV_FREE pages are often + * quickly reused by malloc(3), so we do not do anything that + * would result in a page fault on a later access. */ vm_page_undirty(m); else if (advice != MADV_DONTNEED)