Page MenuHomeFreeBSD

D8752.diff
No OneTemporary

D8752.diff

Index: head/sys/vm/vm_map.c
===================================================================
--- head/sys/vm/vm_map.c
+++ head/sys/vm/vm_map.c
@@ -1858,9 +1858,7 @@
* limited number of page mappings are created at the low-end of the
* specified address range. (For this purpose, a superpage mapping
* counts as one page mapping.) Otherwise, all resident pages within
- * the specified address range are mapped. Because these mappings are
- * being created speculatively, cached pages are not reactivated and
- * mapped.
+ * the specified address range are mapped.
*/
static void
vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
Index: head/sys/vm/vm_object.h
===================================================================
--- head/sys/vm/vm_object.h
+++ head/sys/vm/vm_object.h
@@ -79,17 +79,6 @@
*
* vm_object_t Virtual memory object.
*
- * The root of cached pages pool is protected by both the per-object lock
- * and the free pages queue mutex.
- * On insert in the cache radix trie, the per-object lock is expected
- * to be already held and the free pages queue mutex will be
- * acquired during the operation too.
- * On remove and lookup from the cache radix trie, only the free
- * pages queue mutex is expected to be locked.
- * These rules allow for reliably checking for the presence of cached
- * pages with only the per-object lock held, thereby reducing contention
- * for the free pages queue mutex.
- *
* List of locks
* (c) const until freed
* (o) per-object lock
Index: head/sys/vm/vm_object.c
===================================================================
--- head/sys/vm/vm_object.c
+++ head/sys/vm/vm_object.c
@@ -1356,7 +1356,7 @@
goto retry;
}
- /* vm_page_rename() will handle dirty and cache. */
+ /* vm_page_rename() will dirty the page. */
if (vm_page_rename(m, new_object, idx)) {
VM_OBJECT_WUNLOCK(new_object);
VM_OBJECT_WUNLOCK(orig_object);
@@ -1446,7 +1446,7 @@
/*
* Initial conditions:
*
- * We do not want to have to test for the existence of cache or swap
+ * We do not want to have to test for the existence of swap
* pages in the backing object. XXX but with the new swapper this
* would be pretty easy to do.
*/
@@ -1590,8 +1590,7 @@
* backing object to the main object.
*
* If the page was mapped to a process, it can remain mapped
- * through the rename. vm_page_rename() will handle dirty and
- * cache.
+ * through the rename. vm_page_rename() will dirty the page.
*/
if (vm_page_rename(p, object, new_pindex)) {
next = vm_object_collapse_scan_wait(object, NULL, next,
Index: head/sys/vm/vm_page.h
===================================================================
--- head/sys/vm/vm_page.h
+++ head/sys/vm/vm_page.h
@@ -352,19 +352,16 @@
* free
* Available for allocation now.
*
- * cache
- * Almost available for allocation. Still associated with
- * an object, but clean and immediately freeable.
- *
- * The following lists are LRU sorted:
- *
* inactive
* Low activity, candidates for reclamation.
+ * This list is approximately LRU ordered.
+ *
+ * laundry
* This is the list of pages that should be
* paged out next.
*
* active
- * Pages that are "active" i.e. they have been
+ * Pages that are "active", i.e., they have been
* recently referenced.
*
*/
Index: head/sys/vm/vm_page.c
===================================================================
--- head/sys/vm/vm_page.c
+++ head/sys/vm/vm_page.c
@@ -1409,9 +1409,7 @@
*
* Note: we *always* dirty the page. It is necessary both for the
* fact that we moved it, and because we may be invalidating
- * swap. If the page is on the cache, we have to deactivate it
- * or vm_page_dirty() will panic. Dirty pages are not allowed
- * on the cache.
+ * swap.
*
* The objects must be locked.
*/
@@ -2042,18 +2040,18 @@
} else if (level >= 0) {
/*
* The page is reserved but not yet allocated. In
- * other words, it is still cached or free. Extend
- * the current run by one page.
+ * other words, it is still free. Extend the current
+ * run by one page.
*/
run_ext = 1;
#endif
} else if ((order = m->order) < VM_NFREEORDER) {
/*
* The page is enqueued in the physical memory
- * allocator's cache/free page queues. Moreover, it
- * is the first page in a power-of-two-sized run of
- * contiguous cache/free pages. Add these pages to
- * the end of the current run, and jump ahead.
+ * allocator's free page queues. Moreover, it is the
+ * first page in a power-of-two-sized run of
+ * contiguous free pages. Add these pages to the end
+ * of the current run, and jump ahead.
*/
run_ext = 1 << order;
m_inc = 1 << order;
@@ -2061,16 +2059,15 @@
/*
* Skip the page for one of the following reasons: (1)
* It is enqueued in the physical memory allocator's
- * cache/free page queues. However, it is not the
- * first page in a run of contiguous cache/free pages.
- * (This case rarely occurs because the scan is
- * performed in ascending order.) (2) It is not
- * reserved, and it is transitioning from free to
- * allocated. (Conversely, the transition from
- * allocated to free for managed pages is blocked by
- * the page lock.) (3) It is allocated but not
- * contained by an object and not wired, e.g.,
- * allocated by Xen's balloon driver.
+ * free page queues. However, it is not the first
+ * page in a run of contiguous free pages. (This case
+ * rarely occurs because the scan is performed in
+ * ascending order.) (2) It is not reserved, and it is
+ * transitioning from free to allocated. (Conversely,
+ * the transition from allocated to free for managed
+ * pages is blocked by the page lock.) (3) It is
+ * allocated but not contained by an object and not
+ * wired, e.g., allocated by Xen's balloon driver.
*/
run_ext = 0;
}
@@ -2282,11 +2279,11 @@
if (order < VM_NFREEORDER) {
/*
* The page is enqueued in the physical memory
- * allocator's cache/free page queues.
- * Moreover, it is the first page in a power-
- * of-two-sized run of contiguous cache/free
- * pages. Jump ahead to the last page within
- * that run, and continue from there.
+ * allocator's free page queues. Moreover, it
+ * is the first page in a power-of-two-sized
+ * run of contiguous free pages. Jump ahead
+ * to the last page within that run, and
+ * continue from there.
*/
m += (1 << order) - 1;
}
@@ -2334,9 +2331,9 @@
* conditions by relocating the virtual pages using that physical memory.
* Returns true if reclamation is successful and false otherwise. Since
* relocation requires the allocation of physical pages, reclamation may
- * fail due to a shortage of cache/free pages. When reclamation fails,
- * callers are expected to perform VM_WAIT before retrying a failed
- * allocation operation, e.g., vm_page_alloc_contig().
+ * fail due to a shortage of free pages. When reclamation fails, callers
+ * are expected to perform VM_WAIT before retrying a failed allocation
+ * operation, e.g., vm_page_alloc_contig().
*
* The caller must always specify an allocation class through "req".
*
@@ -2371,8 +2368,8 @@
req_class = VM_ALLOC_SYSTEM;
/*
- * Return if the number of cached and free pages cannot satisfy the
- * requested allocation.
+ * Return if the number of free pages cannot satisfy the requested
+ * allocation.
*/
count = vm_cnt.v_free_count;
if (count < npages + vm_cnt.v_free_reserved || (count < npages +
@@ -2642,9 +2639,8 @@
/*
* vm_page_free_wakeup:
*
- * Helper routine for vm_page_free_toq() and vm_page_cache(). This
- * routine is called when a page has been added to the cache or free
- * queues.
+ * Helper routine for vm_page_free_toq(). This routine is called
+ * when a page is added to the free queues.
*
* The page queues must be locked.
*/
@@ -2732,8 +2728,8 @@
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
/*
- * Insert the page into the physical memory allocator's
- * cache/free page queues.
+ * Insert the page into the physical memory allocator's free
+ * page queues.
*/
mtx_lock(&vm_page_queue_free_mtx);
vm_phys_freecnt_adj(m, 1);
@@ -2833,21 +2829,10 @@
/*
* Move the specified page to the inactive queue.
*
- * Many pages placed on the inactive queue should actually go
- * into the cache, but it is difficult to figure out which. What
- * we do instead, if the inactive target is well met, is to put
- * clean pages at the head of the inactive queue instead of the tail.
- * This will cause them to be moved to the cache more quickly and
- * if not actively re-referenced, reclaimed more quickly. If we just
- * stick these pages at the end of the inactive queue, heavy filesystem
- * meta-data accesses can cause an unnecessary paging load on memory bound
- * processes. This optimization causes one-time-use metadata to be
- * reused more quickly.
- *
- * Normally noreuse is FALSE, resulting in LRU operation. noreuse is set
- * to TRUE if we want this page to be 'as if it were placed in the cache',
- * except without unmapping it from the process address space. In
- * practice this is implemented by inserting the page at the head of the
+ * Normally, "noreuse" is FALSE, resulting in LRU ordering of the inactive
+ * queue. However, setting "noreuse" to TRUE will accelerate the specified
+ * page's reclamation, but it will not unmap the page from any address space.
+ * This is implemented by inserting the page near the head of the inactive
* queue, using a marker page to guide FIFO insertion ordering.
*
* The page must be locked.
@@ -2974,16 +2959,9 @@
if (advice == MADV_FREE)
/*
* Mark the page clean. This will allow the page to be freed
- * up by the system. However, such pages are often reused
- * quickly by malloc() so we do not do anything that would
- * cause a page fault if we can help it.
- *
- * Specifically, we do not try to actually free the page now
- * nor do we try to put it in the cache (which would cause a
- * page fault on reuse).
- *
- * But we do make the page as freeable as we can without
- * actually taking the step of unmapping it.
+ * without first paging it out. MADV_FREE pages are often
+ * quickly reused by malloc(3), so we do not do anything that
+ * would result in a page fault on a later access.
*/
vm_page_undirty(m);
else if (advice != MADV_DONTNEED)
Index: head/sys/vm/vm_reserv.c
===================================================================
--- head/sys/vm/vm_reserv.c
+++ head/sys/vm/vm_reserv.c
@@ -62,7 +62,7 @@
/*
* The reservation system supports the speculative allocation of large physical
- * pages ("superpages"). Speculative allocation enables the fully-automatic
+ * pages ("superpages"). Speculative allocation enables the fully automatic
* utilization of superpages by the virtual memory system. In other words, no
* programmatic directives are required to use superpages.
*/
@@ -155,11 +155,11 @@
* physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
* within that object. The reservation's "popcnt" tracks the number of these
* small physical pages that are in use at any given time. When and if the
- * reservation is not fully utilized, it appears in the queue of partially-
+ * reservation is not fully utilized, it appears in the queue of partially
* populated reservations. The reservation always appears on the containing
* object's list of reservations.
*
- * A partially-populated reservation can be broken and reclaimed at any time.
+ * A partially populated reservation can be broken and reclaimed at any time.
*/
struct vm_reserv {
TAILQ_ENTRY(vm_reserv) partpopq;
@@ -196,11 +196,11 @@
static vm_reserv_t vm_reserv_array;
/*
- * The partially-populated reservation queue
+ * The partially populated reservation queue
*
- * This queue enables the fast recovery of an unused cached or free small page
- * from a partially-populated reservation. The reservation at the head of
- * this queue is the least-recently-changed, partially-populated reservation.
+ * This queue enables the fast recovery of an unused free small page from a
+ * partially populated reservation. The reservation at the head of this queue
+ * is the least recently changed, partially populated reservation.
*
* Access to this queue is synchronized by the free page queue lock.
*/
@@ -225,7 +225,7 @@
static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
- sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues");
+ sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues");
static long vm_reserv_reclaimed;
SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
@@ -267,7 +267,7 @@
}
/*
- * Describes the current state of the partially-populated reservation queue.
+ * Describes the current state of the partially populated reservation queue.
*/
static int
sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
@@ -301,7 +301,7 @@
/*
* Reduces the given reservation's population count. If the population count
* becomes zero, the reservation is destroyed. Additionally, moves the
- * reservation to the tail of the partially-populated reservation queue if the
+ * reservation to the tail of the partially populated reservation queue if the
* population count is non-zero.
*
* The free page queue lock must be held.
@@ -363,7 +363,7 @@
/*
* Increases the given reservation's population count. Moves the reservation
- * to the tail of the partially-populated reservation queue.
+ * to the tail of the partially populated reservation queue.
*
* The free page queue must be locked.
*/
@@ -597,7 +597,7 @@
}
/*
- * Allocates a page from an existing or newly-created reservation.
+ * Allocates a page from an existing or newly created reservation.
*
* The page "mpred" must immediately precede the offset "pindex" within the
* specified object.
@@ -721,12 +721,12 @@
}
/*
- * Breaks the given reservation. Except for the specified cached or free
- * page, all cached and free pages in the reservation are returned to the
- * physical memory allocator. The reservation's population count and map are
- * reset to their initial state.
+ * Breaks the given reservation. Except for the specified free page, all free
+ * pages in the reservation are returned to the physical memory allocator.
+ * The reservation's population count and map are reset to their initial
+ * state.
*
- * The given reservation must not be in the partially-populated reservation
+ * The given reservation must not be in the partially populated reservation
* queue. The free page queue lock must be held.
*/
static void
@@ -895,7 +895,7 @@
}
/*
- * Returns a reservation level if the given page belongs to a fully-populated
+ * Returns a reservation level if the given page belongs to a fully populated
* reservation and -1 otherwise.
*/
int
@@ -908,8 +908,8 @@
}
/*
- * Breaks the given partially-populated reservation, releasing its cached and
- * free pages to the physical memory allocator.
+ * Breaks the given partially populated reservation, releasing its free pages
+ * to the physical memory allocator.
*
* The free page queue lock must be held.
*/
@@ -927,9 +927,9 @@
}
/*
- * Breaks the reservation at the head of the partially-populated reservation
- * queue, releasing its cached and free pages to the physical memory
- * allocator. Returns TRUE if a reservation is broken and FALSE otherwise.
+ * Breaks the reservation at the head of the partially populated reservation
+ * queue, releasing its free pages to the physical memory allocator. Returns
+ * TRUE if a reservation is broken and FALSE otherwise.
*
* The free page queue lock must be held.
*/
@@ -947,11 +947,10 @@
}
/*
- * Searches the partially-populated reservation queue for the least recently
- * active reservation with unused pages, i.e., cached or free, that satisfy the
- * given request for contiguous physical memory. If a satisfactory reservation
- * is found, it is broken. Returns TRUE if a reservation is broken and FALSE
- * otherwise.
+ * Searches the partially populated reservation queue for the least recently
+ * changed reservation with free pages that satisfy the given request for
+ * contiguous physical memory. If a satisfactory reservation is found, it is
+ * broken. Returns TRUE if a reservation is broken and FALSE otherwise.
*
* The free page queue lock must be held.
*/

File Metadata

Mime Type
text/plain
Expires
Thu, Dec 26, 10:44 AM (11 h, 46 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15603930
Default Alt Text
D8752.diff (16 KB)

Event Timeline