Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/vm_pageout.c
Show First 20 Lines • Show All 299 Lines • ▼ Show 20 Lines | if (__predict_true(m != NULL)) | ||||
TAILQ_INSERT_BEFORE(m, marker, plinks.q); | TAILQ_INSERT_BEFORE(m, marker, plinks.q); | ||||
else | else | ||||
TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q); | TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q); | ||||
if (dequeue) | if (dequeue) | ||||
vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt); | vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt); | ||||
vm_pagequeue_unlock(pq); | vm_pagequeue_unlock(pq); | ||||
} | } | ||||
/* Return the next page to be scanned, or NULL if the scan is complete. */ | /* | ||||
* Return the next page to be scanned, or NULL if the scan is complete. | |||||
*/ | |||||
static __always_inline vm_page_t | static __always_inline vm_page_t | ||||
vm_pageout_next(struct scan_state *ss, const bool dequeue) | vm_pageout_next(struct scan_state *ss, const bool dequeue) | ||||
{ | { | ||||
if (ss->bq.bq_cnt == 0) | if (ss->bq.bq_cnt == 0) | ||||
vm_pageout_collect_batch(ss, dequeue); | vm_pageout_collect_batch(ss, dequeue); | ||||
return (vm_batchqueue_pop(&ss->bq)); | return (vm_batchqueue_pop(&ss->bq)); | ||||
} | } | ||||
/* | /* | ||||
* Scan for pages at adjacent offsets within the given page's object that are | * Scan for pages at adjacent offsets within the given page's object that are | ||||
* eligible for laundering, form a cluster of these pages and the given page, | * eligible for laundering, form a cluster of these pages and the given page, | ||||
* and launder that cluster. | * and launder that cluster. | ||||
*/ | */ | ||||
static int | static int | ||||
vm_pageout_cluster(vm_page_t m) | vm_pageout_cluster(vm_page_t m) | ||||
{ | { | ||||
vm_object_t object; | vm_object_t object; | ||||
vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps; | vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps; | ||||
vm_pindex_t pindex; | vm_pindex_t pindex; | ||||
int ib, is, page_base, pageout_count; | int ib, is, page_base, pageout_count; | ||||
vm_page_assert_locked(m); | |||||
object = m->object; | object = m->object; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
pindex = m->pindex; | pindex = m->pindex; | ||||
vm_page_assert_unbusied(m); | vm_page_assert_unbusied(m); | ||||
KASSERT(!vm_page_wired(m), ("page %p is wired", m)); | |||||
pmap_remove_write(m); | |||||
vm_page_unlock(m); | |||||
mc[vm_pageout_page_count] = pb = ps = m; | mc[vm_pageout_page_count] = pb = ps = m; | ||||
pageout_count = 1; | pageout_count = 1; | ||||
page_base = vm_pageout_page_count; | page_base = vm_pageout_page_count; | ||||
ib = 1; | ib = 1; | ||||
is = 1; | is = 1; | ||||
/* | /* | ||||
* We can cluster only if the page is not clean, busy, or held, and | * We can cluster only if the page is not clean, busy, or held, and | ||||
* the page is in the laundry queue. | * the page is in the laundry queue. | ||||
* | * | ||||
* During heavy mmap/modification loads the pageout | * During heavy mmap/modification loads the pageout | ||||
* daemon can really fragment the underlying file | * daemon can really fragment the underlying file | ||||
* due to flushing pages out of order and not trying to | * due to flushing pages out of order and not trying to | ||||
* align the clusters (which leaves sporadic out-of-order | * align the clusters (which leaves sporadic out-of-order | ||||
* holes). To solve this problem we do the reverse scan | * holes). To solve this problem we do the reverse scan | ||||
* first and attempt to align our cluster, then do a | * first and attempt to align our cluster, then do a | ||||
* forward scan if room remains. | * forward scan if room remains. | ||||
*/ | */ | ||||
more: | more: | ||||
while (ib != 0 && pageout_count < vm_pageout_page_count) { | while (ib != 0 && pageout_count < vm_pageout_page_count) { | ||||
if (ib > pindex) { | if (ib > pindex) { | ||||
ib = 0; | ib = 0; | ||||
break; | break; | ||||
} | } | ||||
if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) { | if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p) || | ||||
vm_page_wired(p)) { | |||||
ib = 0; | ib = 0; | ||||
break; | break; | ||||
} | } | ||||
vm_page_test_dirty(p); | vm_page_test_dirty(p); | ||||
if (p->dirty == 0) { | if (p->dirty == 0) { | ||||
ib = 0; | ib = 0; | ||||
break; | break; | ||||
} | } | ||||
vm_page_lock(p); | vm_page_lock(p); | ||||
if (vm_page_wired(p) || !vm_page_in_laundry(p)) { | if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) { | ||||
vm_page_unlock(p); | vm_page_unlock(p); | ||||
ib = 0; | ib = 0; | ||||
break; | break; | ||||
} | } | ||||
pmap_remove_write(p); | |||||
vm_page_unlock(p); | vm_page_unlock(p); | ||||
mc[--page_base] = pb = p; | mc[--page_base] = pb = p; | ||||
++pageout_count; | ++pageout_count; | ||||
++ib; | ++ib; | ||||
/* | /* | ||||
* We are at an alignment boundary. Stop here, and switch | * We are at an alignment boundary. Stop here, and switch | ||||
* directions. Do not clear ib. | * directions. Do not clear ib. | ||||
*/ | */ | ||||
if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) | if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) | ||||
break; | break; | ||||
} | } | ||||
while (pageout_count < vm_pageout_page_count && | while (pageout_count < vm_pageout_page_count && | ||||
pindex + is < object->size) { | pindex + is < object->size) { | ||||
if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p)) | if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p) || | ||||
vm_page_wired(p)) | |||||
break; | break; | ||||
vm_page_test_dirty(p); | vm_page_test_dirty(p); | ||||
if (p->dirty == 0) | if (p->dirty == 0) | ||||
break; | break; | ||||
vm_page_lock(p); | vm_page_lock(p); | ||||
if (vm_page_wired(p) || !vm_page_in_laundry(p)) { | if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) { | ||||
vm_page_unlock(p); | vm_page_unlock(p); | ||||
break; | break; | ||||
} | } | ||||
pmap_remove_write(p); | |||||
vm_page_unlock(p); | vm_page_unlock(p); | ||||
mc[page_base + pageout_count] = ps = p; | mc[page_base + pageout_count] = ps = p; | ||||
++pageout_count; | ++pageout_count; | ||||
++is; | ++is; | ||||
} | } | ||||
/* | /* | ||||
* If we exhausted our forward scan, continue with the reverse scan | * If we exhausted our forward scan, continue with the reverse scan | ||||
▲ Show 20 Lines • Show All 228 Lines • ▼ Show 20 Lines | if (object->type == OBJT_VNODE) { | ||||
if (!vm_page_in_laundry(m) || m->object != object || | if (!vm_page_in_laundry(m) || m->object != object || | ||||
m->pindex != pindex || m->dirty == 0) { | m->pindex != pindex || m->dirty == 0) { | ||||
vm_page_unlock(m); | vm_page_unlock(m); | ||||
error = ENXIO; | error = ENXIO; | ||||
goto unlock_all; | goto unlock_all; | ||||
} | } | ||||
/* | /* | ||||
* The page may have been busied or referenced while the object | * The page may have been busied while the object and page | ||||
* and page locks were released. | * locks were released. | ||||
*/ | */ | ||||
if (vm_page_busied(m) || vm_page_wired(m)) { | if (vm_page_busied(m)) { | ||||
vm_page_unlock(m); | vm_page_unlock(m); | ||||
error = EBUSY; | error = EBUSY; | ||||
goto unlock_all; | goto unlock_all; | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Remove all writeable mappings, failing if the page is wired. | |||||
*/ | |||||
if (!vm_page_try_remove_write(m)) { | |||||
vm_page_unlock(m); | |||||
error = EBUSY; | |||||
goto unlock_all; | |||||
} | |||||
vm_page_unlock(m); | |||||
/* | |||||
* If a page is dirty, then it is either being washed | * If a page is dirty, then it is either being washed | ||||
* (but not yet cleaned) or it is still in the | * (but not yet cleaned) or it is still in the | ||||
* laundry. If it is still in the laundry, then we | * laundry. If it is still in the laundry, then we | ||||
* start the cleaning operation. | * start the cleaning operation. | ||||
*/ | */ | ||||
if ((*numpagedout = vm_pageout_cluster(m)) == 0) | if ((*numpagedout = vm_pageout_cluster(m)) == 0) | ||||
error = EIO; | error = EIO; | ||||
▲ Show 20 Lines • Show All 57 Lines • ▼ Show 20 Lines | while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) { | ||||
if (__predict_false((m->flags & PG_MARKER) != 0)) | if (__predict_false((m->flags & PG_MARKER) != 0)) | ||||
continue; | continue; | ||||
vm_page_change_lock(m, &mtx); | vm_page_change_lock(m, &mtx); | ||||
recheck: | recheck: | ||||
/* | /* | ||||
* The page may have been disassociated from the queue | * The page may have been disassociated from the queue | ||||
* while locks were dropped. | * or even freed while locks were dropped. We thus must be | ||||
* careful whenever modifying page state. Once the object lock | |||||
* has been acquired, we have a stable reference to the page. | |||||
*/ | */ | ||||
if (vm_page_queue(m) != queue) | if (vm_page_queue(m) != queue) | ||||
continue; | continue; | ||||
/* | /* | ||||
* A requeue was requested, so this page gets a second | * A requeue was requested, so this page gets a second | ||||
* chance. | * chance. | ||||
*/ | */ | ||||
if ((m->aflags & PGA_REQUEUE) != 0) { | if ((m->aflags & PGA_REQUEUE) != 0) { | ||||
vm_page_pqbatch_submit(m, queue); | vm_page_pqbatch_submit(m, queue); | ||||
continue; | continue; | ||||
} | } | ||||
/* | /* | ||||
* Wired pages may not be freed. Complete their removal | * Wired pages may not be freed. Complete their removal | ||||
* from the queue now to avoid needless revisits during | * from the queue now to avoid needless revisits during | ||||
* future scans. | * future scans. This check is racy and must be reverified once | ||||
* we hold the object lock and have verified that the page | |||||
* is not busy. | |||||
*/ | */ | ||||
if (vm_page_wired(m)) { | if (vm_page_wired(m)) { | ||||
vm_page_dequeue_deferred(m); | vm_page_dequeue_deferred(m); | ||||
continue; | continue; | ||||
} | } | ||||
if (object != m->object) { | if (object != m->object) { | ||||
if (object != NULL) | if (object != NULL) | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
object = m->object; | |||||
if (!VM_OBJECT_TRYWLOCK(object)) { | /* | ||||
* A page's object pointer may be set to NULL before | |||||
* the object lock is acquired. | |||||
*/ | |||||
object = (vm_object_t)atomic_load_ptr(&m->object); | |||||
if (object != NULL && !VM_OBJECT_TRYWLOCK(object)) { | |||||
mtx_unlock(mtx); | mtx_unlock(mtx); | ||||
/* Depends on type-stability. */ | /* Depends on type-stability. */ | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
mtx_lock(mtx); | mtx_lock(mtx); | ||||
goto recheck; | goto recheck; | ||||
} | } | ||||
} | } | ||||
if (__predict_false(m->object == NULL)) | |||||
/* | |||||
* The page has been removed from its object. | |||||
*/ | |||||
continue; | |||||
KASSERT(m->object == object, ("page %p does not belong to %p", | |||||
m, object)); | |||||
if (vm_page_busied(m)) | if (vm_page_busied(m)) | ||||
continue; | continue; | ||||
/* | /* | ||||
* Re-check for wirings now that we hold the object lock and | |||||
* have verified that the page is unbusied. If the page is | |||||
* mapped, it may still be wired by pmap lookups. The call to | |||||
* vm_page_try_remove_all() below atomically checks for such | |||||
* wirings and removes mappings. If the page is unmapped, the | |||||
* wire count is guaranteed not to increase. | |||||
*/ | |||||
if (__predict_false(vm_page_wired(m))) { | |||||
vm_page_dequeue_deferred(m); | |||||
continue; | |||||
} | |||||
/* | |||||
* Invalid pages can be easily freed. They cannot be | * Invalid pages can be easily freed. They cannot be | ||||
* mapped; vm_page_free() asserts this. | * mapped; vm_page_free() asserts this. | ||||
*/ | */ | ||||
if (m->valid == 0) | if (m->valid == 0) | ||||
goto free_page; | goto free_page; | ||||
/* | /* | ||||
* If the page has been referenced and the object is not dead, | * If the page has been referenced and the object is not dead, | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | recheck: | ||||
* If the page appears to be clean at the machine-independent | * If the page appears to be clean at the machine-independent | ||||
* layer, then remove all of its mappings from the pmap in | * layer, then remove all of its mappings from the pmap in | ||||
* anticipation of freeing it. If, however, any of the page's | * anticipation of freeing it. If, however, any of the page's | ||||
* mappings allow write access, then the page may still be | * mappings allow write access, then the page may still be | ||||
* modified until the last of those mappings are removed. | * modified until the last of those mappings are removed. | ||||
*/ | */ | ||||
if (object->ref_count != 0) { | if (object->ref_count != 0) { | ||||
vm_page_test_dirty(m); | vm_page_test_dirty(m); | ||||
if (m->dirty == 0) | if (m->dirty == 0 && !vm_page_try_remove_all(m)) { | ||||
pmap_remove_all(m); | vm_page_dequeue_deferred(m); | ||||
continue; | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* Clean pages are freed, and dirty pages are paged out unless | * Clean pages are freed, and dirty pages are paged out unless | ||||
* they belong to a dead object. Requeueing dirty pages from | * they belong to a dead object. Requeueing dirty pages from | ||||
* dead objects is pointless, as they are being paged out and | * dead objects is pointless, as they are being paged out and | ||||
* freed by the thread that destroyed the object. | * freed by the thread that destroyed the object. | ||||
*/ | */ | ||||
if (m->dirty == 0) { | if (m->dirty == 0) { | ||||
▲ Show 20 Lines • Show All 274 Lines • ▼ Show 20 Lines | |||||
* Scan the active queue. If there is no shortage of inactive pages, scan a | * Scan the active queue. If there is no shortage of inactive pages, scan a | ||||
* small portion of the queue in order to maintain quasi-LRU. | * small portion of the queue in order to maintain quasi-LRU. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage) | vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage) | ||||
{ | { | ||||
struct scan_state ss; | struct scan_state ss; | ||||
struct mtx *mtx; | struct mtx *mtx; | ||||
vm_object_t object; | |||||
vm_page_t m, marker; | vm_page_t m, marker; | ||||
struct vm_pagequeue *pq; | struct vm_pagequeue *pq; | ||||
long min_scan; | long min_scan; | ||||
int act_delta, max_scan, scan_tick; | int act_delta, max_scan, scan_tick; | ||||
marker = &vmd->vmd_markers[PQ_ACTIVE]; | marker = &vmd->vmd_markers[PQ_ACTIVE]; | ||||
pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; | pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; | ||||
vm_pagequeue_lock(pq); | vm_pagequeue_lock(pq); | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | while ((m = vm_pageout_next(&ss, false)) != NULL) { | ||||
} | } | ||||
if (__predict_false((m->flags & PG_MARKER) != 0)) | if (__predict_false((m->flags & PG_MARKER) != 0)) | ||||
continue; | continue; | ||||
vm_page_change_lock(m, &mtx); | vm_page_change_lock(m, &mtx); | ||||
/* | /* | ||||
* The page may have been disassociated from the queue | * The page may have been disassociated from the queue | ||||
* while locks were dropped. | * or even freed while locks were dropped. We thus must be | ||||
* careful whenever modifying page state. Once the object lock | |||||
* has been acquired, we have a stable reference to the page. | |||||
*/ | */ | ||||
if (vm_page_queue(m) != PQ_ACTIVE) | if (vm_page_queue(m) != PQ_ACTIVE) | ||||
continue; | continue; | ||||
/* | /* | ||||
* Wired pages are dequeued lazily. | * Wired pages are dequeued lazily. | ||||
*/ | */ | ||||
if (vm_page_wired(m)) { | if (vm_page_wired(m)) { | ||||
vm_page_dequeue_deferred(m); | vm_page_dequeue_deferred(m); | ||||
continue; | continue; | ||||
} | } | ||||
/* | /* | ||||
* A page's object pointer may be set to NULL before | |||||
* the object lock is acquired. | |||||
*/ | |||||
object = (vm_object_t)atomic_load_ptr(&m->object); | |||||
if (__predict_false(object == NULL)) | |||||
/* | |||||
* The page has been removed from its object. | |||||
*/ | |||||
continue; | |||||
/* | |||||
* Check to see "how much" the page has been used. | * Check to see "how much" the page has been used. | ||||
* | * | ||||
* Test PGA_REFERENCED after calling pmap_ts_referenced() so | * Test PGA_REFERENCED after calling pmap_ts_referenced() so | ||||
* that a reference from a concurrently destroyed mapping is | * that a reference from a concurrently destroyed mapping is | ||||
* observed here and now. | * observed here and now. | ||||
* | * | ||||
* Perform an unsynchronized object ref count check. While | * Perform an unsynchronized object ref count check. While | ||||
* the page lock ensures that the page is not reallocated to | * the page lock ensures that the page is not reallocated to | ||||
* another object, in particular, one with unmanaged mappings | * another object, in particular, one with unmanaged mappings | ||||
* that cannot support pmap_ts_referenced(), two races are, | * that cannot support pmap_ts_referenced(), two races are, | ||||
* nonetheless, possible: | * nonetheless, possible: | ||||
* 1) The count was transitioning to zero, but we saw a non- | * 1) The count was transitioning to zero, but we saw a non- | ||||
* zero value. pmap_ts_referenced() will return zero | * zero value. pmap_ts_referenced() will return zero | ||||
* because the page is not mapped. | * because the page is not mapped. | ||||
* 2) The count was transitioning to one, but we saw zero. | * 2) The count was transitioning to one, but we saw zero. | ||||
* This race delays the detection of a new reference. At | * This race delays the detection of a new reference. At | ||||
* worst, we will deactivate and reactivate the page. | * worst, we will deactivate and reactivate the page. | ||||
*/ | */ | ||||
if (m->object->ref_count != 0) | if (object->ref_count != 0) | ||||
act_delta = pmap_ts_referenced(m); | act_delta = pmap_ts_referenced(m); | ||||
else | else | ||||
act_delta = 0; | act_delta = 0; | ||||
if ((m->aflags & PGA_REFERENCED) != 0) { | if ((m->aflags & PGA_REFERENCED) != 0) { | ||||
vm_page_aflag_clear(m, PGA_REFERENCED); | vm_page_aflag_clear(m, PGA_REFERENCED); | ||||
act_delta++; | act_delta++; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 159 Lines • ▼ Show 20 Lines | while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) { | ||||
KASSERT((m->flags & PG_MARKER) == 0, | KASSERT((m->flags & PG_MARKER) == 0, | ||||
("marker page %p was dequeued", m)); | ("marker page %p was dequeued", m)); | ||||
vm_page_change_lock(m, &mtx); | vm_page_change_lock(m, &mtx); | ||||
recheck: | recheck: | ||||
/* | /* | ||||
* The page may have been disassociated from the queue | * The page may have been disassociated from the queue | ||||
* while locks were dropped. | * or even freed while locks were dropped. We thus must be | ||||
* careful whenever modifying page state. Once the object lock | |||||
* has been acquired, we have a stable reference to the page. | |||||
*/ | */ | ||||
if (vm_page_queue(m) != PQ_INACTIVE) { | if (vm_page_queue(m) != PQ_INACTIVE) { | ||||
addl_page_shortage++; | addl_page_shortage++; | ||||
continue; | continue; | ||||
} | } | ||||
/* | /* | ||||
* The page was re-enqueued after the page queue lock was | * The page was re-enqueued after the page queue lock was | ||||
* dropped, or a requeue was requested. This page gets a second | * dropped, or a requeue was requested. This page gets a second | ||||
* chance. | * chance. | ||||
*/ | */ | ||||
if ((m->aflags & (PGA_ENQUEUED | PGA_REQUEUE | | if ((m->aflags & (PGA_ENQUEUED | PGA_REQUEUE | | ||||
PGA_REQUEUE_HEAD)) != 0) | PGA_REQUEUE_HEAD)) != 0) | ||||
goto reinsert; | goto reinsert; | ||||
/* | /* | ||||
* Wired pages may not be freed. Complete their removal | * Wired pages may not be freed. Complete their removal | ||||
* from the queue now to avoid needless revisits during | * from the queue now to avoid needless revisits during | ||||
* future scans. | * future scans. This check is racy and must be reverified once | ||||
* we hold the object lock and have verified that the page | |||||
* is not busy. | |||||
*/ | */ | ||||
if (vm_page_wired(m)) { | if (vm_page_wired(m)) { | ||||
vm_page_dequeue_deferred(m); | vm_page_dequeue_deferred(m); | ||||
continue; | continue; | ||||
} | } | ||||
if (object != m->object) { | if (object != m->object) { | ||||
if (object != NULL) | if (object != NULL) | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
object = m->object; | |||||
if (!VM_OBJECT_TRYWLOCK(object)) { | /* | ||||
* A page's object pointer may be set to NULL before | |||||
* the object lock is acquired. | |||||
*/ | |||||
object = (vm_object_t)atomic_load_ptr(&m->object); | |||||
if (object != NULL && !VM_OBJECT_TRYWLOCK(object)) { | |||||
mtx_unlock(mtx); | mtx_unlock(mtx); | ||||
/* Depends on type-stability. */ | /* Depends on type-stability. */ | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
mtx_lock(mtx); | mtx_lock(mtx); | ||||
goto recheck; | goto recheck; | ||||
} | } | ||||
} | } | ||||
if (__predict_false(m->object == NULL)) | |||||
/* | |||||
* The page has been removed from its object. | |||||
*/ | |||||
continue; | |||||
KASSERT(m->object == object, ("page %p does not belong to %p", | |||||
m, object)); | |||||
if (vm_page_busied(m)) { | if (vm_page_busied(m)) { | ||||
/* | /* | ||||
* Don't mess with busy pages. Leave them at | * Don't mess with busy pages. Leave them at | ||||
* the front of the queue. Most likely, they | * the front of the queue. Most likely, they | ||||
* are being paged out and will leave the | * are being paged out and will leave the | ||||
* queue shortly after the scan finishes. So, | * queue shortly after the scan finishes. So, | ||||
* they ought to be discounted from the | * they ought to be discounted from the | ||||
* inactive count. | * inactive count. | ||||
*/ | */ | ||||
addl_page_shortage++; | addl_page_shortage++; | ||||
goto reinsert; | goto reinsert; | ||||
} | } | ||||
/* | /* | ||||
* Re-check for wirings now that we hold the object lock and | |||||
* have verified that the page is unbusied. If the page is | |||||
* mapped, it may still be wired by pmap lookups. The call to | |||||
* vm_page_try_remove_all() below atomically checks for such | |||||
* wirings and removes mappings. If the page is unmapped, the | |||||
* wire count is guaranteed not to increase. | |||||
*/ | |||||
if (__predict_false(vm_page_wired(m))) { | |||||
vm_page_dequeue_deferred(m); | |||||
continue; | |||||
} | |||||
/* | |||||
* Invalid pages can be easily freed. They cannot be | * Invalid pages can be easily freed. They cannot be | ||||
* mapped, vm_page_free() asserts this. | * mapped, vm_page_free() asserts this. | ||||
*/ | */ | ||||
if (m->valid == 0) | if (m->valid == 0) | ||||
goto free_page; | goto free_page; | ||||
/* | /* | ||||
* If the page has been referenced and the object is not dead, | * If the page has been referenced and the object is not dead, | ||||
Show All 39 Lines | recheck: | ||||
* If the page appears to be clean at the machine-independent | * If the page appears to be clean at the machine-independent | ||||
* layer, then remove all of its mappings from the pmap in | * layer, then remove all of its mappings from the pmap in | ||||
* anticipation of freeing it. If, however, any of the page's | * anticipation of freeing it. If, however, any of the page's | ||||
* mappings allow write access, then the page may still be | * mappings allow write access, then the page may still be | ||||
* modified until the last of those mappings are removed. | * modified until the last of those mappings are removed. | ||||
*/ | */ | ||||
if (object->ref_count != 0) { | if (object->ref_count != 0) { | ||||
vm_page_test_dirty(m); | vm_page_test_dirty(m); | ||||
if (m->dirty == 0) | if (m->dirty == 0 && !vm_page_try_remove_all(m)) { | ||||
pmap_remove_all(m); | vm_page_dequeue_deferred(m); | ||||
continue; | |||||
} | |||||
} | } | ||||
/* | /* | ||||
* Clean pages can be freed, but dirty pages must be sent back | * Clean pages can be freed, but dirty pages must be sent back | ||||
* to the laundry, unless they belong to a dead object. | * to the laundry, unless they belong to a dead object. | ||||
* Requeueing dirty pages from dead objects is pointless, as | * Requeueing dirty pages from dead objects is pointless, as | ||||
* they are being paged out and freed by the thread that | * they are being paged out and freed by the thread that | ||||
* destroyed the object. | * destroyed the object. | ||||
▲ Show 20 Lines • Show All 614 Lines • Show Last 20 Lines |