Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/vm_page.c
Show First 20 Lines • Show All 1,365 Lines • ▼ Show 20 Lines | vm_page_readahead_finish(vm_page_t m) | ||||
KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m)); | KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m)); | ||||
/* | /* | ||||
* Since the page is not the actually needed one, whether it should | * Since the page is not the actually needed one, whether it should | ||||
* be activated or deactivated is not obvious. Empirical results | * be activated or deactivated is not obvious. Empirical results | ||||
* have shown that deactivating the page is usually the best choice, | * have shown that deactivating the page is usually the best choice, | ||||
* unless the page is wanted by another thread. | * unless the page is wanted by another thread. | ||||
*/ | */ | ||||
vm_page_lock(m); | |||||
if ((m->busy_lock & VPB_BIT_WAITERS) != 0) | if ((m->busy_lock & VPB_BIT_WAITERS) != 0) | ||||
vm_page_activate(m); | vm_page_activate(m); | ||||
else | else | ||||
vm_page_deactivate(m); | vm_page_deactivate(m); | ||||
vm_page_unlock(m); | |||||
vm_page_xunbusy_unchecked(m); | vm_page_xunbusy_unchecked(m); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_sleep_if_busy: | * vm_page_sleep_if_busy: | ||||
* | * | ||||
* Sleep and release the object lock if the page is busied. | * Sleep and release the object lock if the page is busied. | ||||
* Returns TRUE if the thread slept. | * Returns TRUE if the thread slept. | ||||
▲ Show 20 Lines • Show All 2,258 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Schedule the given page for insertion into the specified page queue. | * Schedule the given page for insertion into the specified page queue. | ||||
* Physical insertion of the page may be deferred indefinitely. | * Physical insertion of the page may be deferred indefinitely. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_page_enqueue(vm_page_t m, uint8_t queue) | vm_page_enqueue(vm_page_t m, uint8_t queue) | ||||
{ | { | ||||
vm_page_assert_locked(m); | |||||
KASSERT(m->a.queue == PQ_NONE && | KASSERT(m->a.queue == PQ_NONE && | ||||
(m->a.flags & PGA_QUEUE_STATE_MASK) == 0, | (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, | ||||
("%s: page %p is already enqueued", __func__, m)); | ("%s: page %p is already enqueued", __func__, m)); | ||||
KASSERT(m->ref_count > 0, | KASSERT(m->ref_count > 0, | ||||
("%s: page %p does not carry any references", __func__, m)); | ("%s: page %p does not carry any references", __func__, m)); | ||||
m->a.queue = queue; | m->a.queue = queue; | ||||
if ((m->a.flags & PGA_REQUEUE) == 0) | if ((m->a.flags & PGA_REQUEUE) == 0) | ||||
▲ Show 20 Lines • Show All 456 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Put a page in the PQ_UNSWAPPABLE holding queue. | * Put a page in the PQ_UNSWAPPABLE holding queue. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_unswappable(vm_page_t m) | vm_page_unswappable(vm_page_t m) | ||||
{ | { | ||||
vm_page_assert_locked(m); | |||||
KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0, | KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0, | ||||
("page %p already unswappable", m)); | ("page %p already unswappable", m)); | ||||
vm_page_dequeue(m); | vm_page_dequeue(m); | ||||
vm_page_enqueue(m, PQ_UNSWAPPABLE); | vm_page_enqueue(m, PQ_UNSWAPPABLE); | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 81 Lines • ▼ Show 20 Lines | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("vm_page_release_locked: page %p is unmanaged", m)); | ("vm_page_release_locked: page %p is unmanaged", m)); | ||||
if (vm_page_unwire_noq(m)) { | if (vm_page_unwire_noq(m)) { | ||||
if ((flags & VPR_TRYFREE) != 0 && | if ((flags & VPR_TRYFREE) != 0 && | ||||
(m->object->ref_count == 0 || !pmap_page_is_mapped(m)) && | (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) && | ||||
m->dirty == 0 && vm_page_tryxbusy(m)) { | m->dirty == 0 && vm_page_tryxbusy(m)) { | ||||
vm_page_free(m); | vm_page_free(m); | ||||
} else { | } else { | ||||
vm_page_lock(m); | |||||
vm_page_release_toq(m, PQ_INACTIVE, flags != 0); | vm_page_release_toq(m, PQ_INACTIVE, flags != 0); | ||||
vm_page_unlock(m); | |||||
} | } | ||||
} | } | ||||
} | } | ||||
static bool | static bool | ||||
vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t)) | vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t)) | ||||
{ | { | ||||
u_int old; | u_int old; | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | |||||
* Apply the specified advice to the given page. | * Apply the specified advice to the given page. | ||||
* | * | ||||
* The object and page must be locked. | * The object and page must be locked. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_advise(vm_page_t m, int advice) | vm_page_advise(vm_page_t m, int advice) | ||||
{ | { | ||||
vm_page_assert_locked(m); | |||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | VM_OBJECT_ASSERT_WLOCKED(m->object); | ||||
if (advice == MADV_FREE) | if (advice == MADV_FREE) | ||||
/* | /* | ||||
* Mark the page clean. This will allow the page to be freed | * Mark the page clean. This will allow the page to be freed | ||||
* without first paging it out. MADV_FREE pages are often | * without first paging it out. MADV_FREE pages are often | ||||
* quickly reused by malloc(3), so we do not do anything that | * quickly reused by malloc(3), so we do not do anything that | ||||
* would result in a page fault on a later access. | * would result in a page fault on a later access. | ||||
*/ | */ | ||||
vm_page_undirty(m); | vm_page_undirty(m); | ||||
else if (advice != MADV_DONTNEED) { | else if (advice != MADV_DONTNEED) { | ||||
if (advice == MADV_WILLNEED) | if (advice == MADV_WILLNEED) | ||||
vm_page_activate(m); | vm_page_activate(m); | ||||
return; | return; | ||||
} | } | ||||
if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) | |||||
vm_page_dirty(m); | |||||
/* | /* | ||||
* Clear any references to the page. Otherwise, the page daemon will | * Clear any references to the page. Otherwise, the page daemon will | ||||
* immediately reactivate the page. | * immediately reactivate the page. | ||||
*/ | */ | ||||
vm_page_aflag_clear(m, PGA_REFERENCED); | vm_page_aflag_clear(m, PGA_REFERENCED); | ||||
if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) | |||||
vm_page_dirty(m); | |||||
/* | /* | ||||
* Place clean pages near the head of the inactive queue rather than | * Place clean pages near the head of the inactive queue rather than | ||||
* the tail, thus defeating the queue's LRU operation and ensuring that | * the tail, thus defeating the queue's LRU operation and ensuring that | ||||
* the page will be reused quickly. Dirty pages not already in the | * the page will be reused quickly. Dirty pages not already in the | ||||
* laundry are moved there. | * laundry are moved there. | ||||
*/ | */ | ||||
if (m->dirty == 0) | if (m->dirty == 0) | ||||
▲ Show 20 Lines • Show All 961 Lines • Show Last 20 Lines |